* Add one element to a (possibly empty) eary struct.
* SQL statement.
+ * Dump all databases. There are no system objects to worry about.
* Dump all tables, indexes and sequences in the current database.
char *addfields = ",c.oid AS \"Oid\", nspname AS \"Schema\", spcname as \"Tablespace\" ";
- opts->systables ? "" : "n.nspname NOT IN ('pg_catalog', 'pg_toast', 'information_schema') AND");
+ opts->systables ? "" : "n.nspname NOT IN ('pg_catalog', 'pg_toast', 'information_schema') AND");
* given objects in the current database.
char *addfields = ",c.oid AS \"Oid\", nspname AS \"Schema\", spcname as \"Tablespace\" ";
" LEFT JOIN pg_catalog.pg_database d ON d.datname = current_database(),\n"
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.26 2005/09/24 17:53:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.27 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (def)
{
/*
- * make sure there is no existing default
- * encoding> pair in this name space
+ * make sure there is no existing default encoding>
+ * pair in this name space
*/
if (FindDefaultConversion(connamespace,
conforencoding,
contoencoding))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("default conversion for %s to %s already exists",
- pg_encoding_to_char(conforencoding),
- pg_encoding_to_char(contoencoding))));
+ errmsg("default conversion for %s to %s already exists",
+ pg_encoding_to_char(conforencoding),
+ pg_encoding_to_char(contoencoding))));
}
/* open pg_conversion */
if (!superuser() &&
((Form_pg_conversion) GETSTRUCT(tuple))->conowner != GetUserId())
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
- NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
+ NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
ReleaseSysCache(tuple);
ReleaseSysCache(tuple);
/*
- * build text result structure. we cannot use textin() here, since
- * textin assumes that input string encoding is same as database
- * encoding.
+ * build text result structure. we cannot use textin() here, since textin
+ * assumes that input string encoding is same as database encoding.
*/
len = strlen(result) + VARHDRSZ;
retval = palloc(len);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.14 2005/08/01 04:03:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.15 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return; /* nothing to do */
/*
- * During bootstrap, do nothing since pg_depend may not exist yet.
- * initdb will fill in appropriate pg_depend entries after bootstrap.
+ * During bootstrap, do nothing since pg_depend may not exist yet. initdb
+ * will fill in appropriate pg_depend entries after bootstrap.
*/
if (IsBootstrapProcessingMode())
return;
for (i = 0; i < nreferenced; i++, referenced++)
{
/*
- * If the referenced object is pinned by the system, there's no
- * real need to record dependencies on it. This saves lots of
- * space in pg_depend, so it's worth the time taken to check.
+ * If the referenced object is pinned by the system, there's no real
+ * need to record dependencies on it. This saves lots of space in
+ * pg_depend, so it's worth the time taken to check.
*/
if (!isObjectPinned(referenced, dependDesc))
{
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * If oldRefObjectId is pinned, there won't be any dependency entries
- * on it --- we can't cope in that case. (This isn't really worth
- * expending code to fix, in current usage; it just means you can't
- * rename stuff out of pg_catalog, which would likely be a bad move
- * anyway.)
+ * If oldRefObjectId is pinned, there won't be any dependency entries on
+ * it --- we can't cope in that case. (This isn't really worth expending
+ * code to fix, in current usage; it just means you can't rename stuff out
+ * of pg_catalog, which would likely be a bad move anyway.)
*/
objAddr.classId = refClassId;
objAddr.objectId = oldRefObjectId;
if (isObjectPinned(&objAddr, depRel))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot remove dependency on %s because it is a system object",
- getObjectDescription(&objAddr))));
+ errmsg("cannot remove dependency on %s because it is a system object",
+ getObjectDescription(&objAddr))));
/*
- * We can handle adding a dependency on something pinned, though,
- * since that just means deleting the dependency entry.
+ * We can handle adding a dependency on something pinned, though, since
+ * that just means deleting the dependency entry.
*/
objAddr.objectId = newRefObjectId;
/*
* Since we won't generate additional pg_depend entries for pinned
- * objects, there can be at most one entry referencing a pinned
- * object. Hence, it's sufficient to look at the first returned
- * tuple; we don't need to loop.
+ * objects, there can be at most one entry referencing a pinned object.
+ * Hence, it's sufficient to look at the first returned tuple; we don't
+ * need to loop.
*/
tup = systable_getnext(scan);
if (HeapTupleIsValid(tup))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.93 2005/07/07 20:39:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.94 2005/10/15 02:49:14 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
/*
* For SQL92 compatibility, '+' and '-' cannot be the last char of a
- * multi-char operator unless the operator contains chars that are not
- * in SQL92 operators. The idea is to lex '=-' as two operators, but
- * not to forbid operator names like '?-' that could not be sequences
- * of SQL92 operators.
+ * multi-char operator unless the operator contains chars that are not in
+ * SQL92 operators. The idea is to lex '=-' as two operators, but not to
+ * forbid operator names like '?-' that could not be sequences of SQL92
+ * operators.
*/
if (len > 1 &&
(name[len - 1] == '+' ||
}
/*
- * initialize values[] with the operator name and input data types.
- * Note that oprcode is set to InvalidOid, indicating it's a shell.
+ * initialize values[] with the operator name and input data types. Note
+ * that oprcode is set to InvalidOid, indicating it's a shell.
*/
i = 0;
namestrcpy(&oname, operatorName);
values[i++] = NameGetDatum(&oname); /* oprname */
values[i++] = ObjectIdGetDatum(operatorNamespace); /* oprnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */
values[i++] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); /* oprkind */
values[i++] = BoolGetDatum(false); /* oprcanhash */
values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */
if (!OidIsValid(leftTypeId) && !OidIsValid(rightTypeId))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("at least one of leftarg or rightarg must be specified")));
+ errmsg("at least one of leftarg or rightarg must be specified")));
if (!(OidIsValid(leftTypeId) && OidIsValid(rightTypeId)))
{
if (commutatorName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have commutators")));
+ errmsg("only binary operators can have commutators")));
if (joinName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have join selectivity")));
+ errmsg("only binary operators can have join selectivity")));
if (canHash)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
*/
/*
- * Look up registered procedures -- find the return type of
- * procedureName to place in "result" field. Do this before shells are
- * created so we don't have to worry about deleting them later.
+ * Look up registered procedures -- find the return type of procedureName
+ * to place in "result" field. Do this before shells are created so we
+ * don't have to worry about deleting them later.
*/
if (!OidIsValid(leftTypeId))
{
namestrcpy(&oname, operatorName);
values[i++] = NameGetDatum(&oname); /* oprname */
values[i++] = ObjectIdGetDatum(operatorNamespace); /* oprnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* oprowner */
values[i++] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); /* oprkind */
values[i++] = BoolGetDatum(canHash); /* oprcanhash */
values[i++] = ObjectIdGetDatum(leftTypeId); /* oprleft */
/*
* If a commutator and/or negator link is provided, update the other
- * operator(s) to point at this one, if they don't already have a
- * link. This supports an alternate style of operator definition
- * wherein the user first defines one operator without giving negator
- * or commutator, then defines the other operator of the pair with the
- * proper commutator or negator attribute. That style doesn't require
- * creation of a shell, and it's the only style that worked right
- * before Postgres version 6.5. This code also takes care of the
- * situation where the new operator is its own commutator.
+ * operator(s) to point at this one, if they don't already have a link.
+ * This supports an alternate style of operator definition wherein the
+ * user first defines one operator without giving negator or commutator,
+ * then defines the other operator of the pair with the proper commutator
+ * or negator attribute. That style doesn't require creation of a shell,
+ * and it's the only style that worked right before Postgres version 6.5.
+ * This code also takes care of the situation where the new operator is
+ * its own commutator.
*/
if (selfCommutator)
commutatorId = operatorObjectId;
if (!isCommutator)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("operator cannot be its own negator or sort operator")));
+ errmsg("operator cannot be its own negator or sort operator")));
return InvalidOid;
}
0, 0, 0);
/*
- * if the commutator and negator are the same operator, do one update.
- * XXX this is probably useless code --- I doubt it ever makes sense
- * for commutator and negator to be the same thing...
+ * if the commutator and negator are the same operator, do one update. XXX
+ * this is probably useless code --- I doubt it ever makes sense for
+ * commutator and negator to be the same thing...
*/
if (commId == negId)
{
* NOTE: we do not consider the operator to depend on the associated
* operators oprcom, oprnegate, oprlsortop, oprrsortop, oprltcmpop,
* oprgtcmpop. We would not want to delete this operator if those go
- * away, but only reset the link fields; which is not a function that
- * the dependency code can presently handle. (Something could perhaps
- * be done with objectSubId though.) For now, it's okay to let those
- * links dangle if a referenced operator is removed.
+ * away, but only reset the link fields; which is not a function that the
+ * dependency code can presently handle. (Something could perhaps be done
+ * with objectSubId though.) For now, it's okay to let those links dangle
+ * if a referenced operator is removed.
*/
/* Dependency on implementation function */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.133 2005/09/24 22:54:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.134 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (allParameterTypes != PointerGetDatum(NULL))
{
/*
- * We expect the array to be a 1-D OID array; verify that. We
- * don't need to use deconstruct_array() since the array data is
- * just going to look like a C array of OID values.
+ * We expect the array to be a 1-D OID array; verify that. We don't
+ * need to use deconstruct_array() since the array data is just going
+ * to look like a C array of OID values.
*/
allParamCount = ARR_DIMS(DatumGetPointer(allParameterTypes))[0];
if (ARR_NDIM(DatumGetPointer(allParameterTypes)) != 1 ||
/*
* Do not allow return type ANYARRAY or ANYELEMENT unless at least one
- * input argument is ANYARRAY or ANYELEMENT. Also, do not allow
- * return type INTERNAL unless at least one input argument is INTERNAL.
+ * input argument is ANYARRAY or ANYELEMENT. Also, do not allow return
+ * type INTERNAL unless at least one input argument is INTERNAL.
*/
for (i = 0; i < parameterCount; i++)
{
for (i = 0; i < allParamCount; i++)
{
/*
- * We don't bother to distinguish input and output params here,
- * so if there is, say, just an input INTERNAL param then we will
- * still set internalOutParam. This is OK since we don't really
+ * We don't bother to distinguish input and output params here, so
+ * if there is, say, just an input INTERNAL param then we will
+ * still set internalOutParam. This is OK since we don't really
* care.
*/
switch (allParams[i])
else
nulls[Anum_pg_proc_proargnames - 1] = 'n';
values[Anum_pg_proc_prosrc - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(prosrc));
+ CStringGetDatum(prosrc));
values[Anum_pg_proc_probin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(probin));
+ CStringGetDatum(probin));
/* start out with empty permissions */
nulls[Anum_pg_proc_proacl - 1] = 'n';
if (!replace)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
- errmsg("function \"%s\" already exists with same argument types",
- procedureName)));
+ errmsg("function \"%s\" already exists with same argument types",
+ procedureName)));
if (!pg_proc_ownercheck(HeapTupleGetOid(oldtup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
procedureName);
parameterModes,
parameterNames);
if (olddesc == NULL && newdesc == NULL)
- /* ok, both are runtime-defined RECORDs */ ;
+ /* ok, both are runtime-defined RECORDs */ ;
else if (olddesc == NULL || newdesc == NULL ||
!equalTupleDescs(olddesc, newdesc))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("cannot change return type of existing function"),
- errdetail("Row type defined by OUT parameters is different."),
- errhint("Use DROP FUNCTION first.")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("cannot change return type of existing function"),
+ errdetail("Row type defined by OUT parameters is different."),
+ errhint("Use DROP FUNCTION first.")));
}
/* Can't change aggregate status, either */
char *prosrc;
/*
- * We do not honor check_function_bodies since it's unlikely the
- * function name will be found later if it isn't there now.
+ * We do not honor check_function_bodies since it's unlikely the function
+ * name will be found later if it isn't there now.
*/
tuple = SearchSysCache(PROCOID,
char *probin;
/*
- * It'd be most consistent to skip the check if
- * !check_function_bodies, but the purpose of that switch is to be
- * helpful for pg_dump loading, and for pg_dump loading it's much
- * better if we *do* check.
+ * It'd be most consistent to skip the check if !check_function_bodies,
+ * but the purpose of that switch is to be helpful for pg_dump loading,
+ * and for pg_dump loading it's much better if we *do* check.
*/
tuple = SearchSysCache(PROCOID,
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL functions cannot have arguments of type %s",
- format_type_be(proc->proargtypes.values[i]))));
+ errmsg("SQL functions cannot have arguments of type %s",
+ format_type_be(proc->proargtypes.values[i]))));
}
}
error_context_stack = &sqlerrcontext;
/*
- * We can't do full prechecking of the function definition if
- * there are any polymorphic input types, because actual datatypes
- * of expression results will be unresolvable. The check will be
- * done at runtime instead.
+ * We can't do full prechecking of the function definition if there
+ * are any polymorphic input types, because actual datatypes of
+ * expression results will be unresolvable. The check will be done at
+ * runtime instead.
*
- * We can run the text through the raw parser though; this will at
- * least catch silly syntactic errors.
+ * We can run the text through the raw parser though; this will at least
+ * catch silly syntactic errors.
*/
if (!haspolyarg)
{
* Nothing to do unless we are dealing with a syntax error that has a
* cursor position.
*
- * Some PLs may prefer to report the error position as an internal error
- * to begin with, so check that too.
+ * Some PLs may prefer to report the error position as an internal error to
+ * begin with, so check that too.
*/
origerrposition = geterrposition();
if (origerrposition <= 0)
int cursorpos)
{
/*
- * Rather than fully parsing the CREATE FUNCTION command, we just scan
- * the command looking for $prosrc$ or 'prosrc'. This could be fooled
- * (though not in any very probable scenarios), so fail if we find
- * more than one match.
+ * Rather than fully parsing the CREATE FUNCTION command, we just scan the
+ * command looking for $prosrc$ or 'prosrc'. This could be fooled (though
+ * not in any very probable scenarios), so fail if we find more than one
+ * match.
*/
int prosrclen = strlen(prosrc);
int querylen = strlen(queryText);
{
/*
* Found a $foo$ match. Since there are no embedded quoting
- * characters in a dollar-quoted literal, we don't have to do
- * any fancy arithmetic; just offset by the starting position.
+ * characters in a dollar-quoted literal, we don't have to do any
+ * fancy arithmetic; just offset by the starting position.
*/
if (matchpos)
return 0; /* multiple matches, fail */
cursorpos, &newcursorpos))
{
/*
- * Found a 'foo' match. match_prosrc_to_literal() has
- * adjusted for any quotes or backslashes embedded in the
- * literal.
+ * Found a 'foo' match. match_prosrc_to_literal() has adjusted
+ * for any quotes or backslashes embedded in the literal.
*/
if (matchpos)
return 0; /* multiple matches, fail */
* string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
- * We do the comparison a character at a time, not a byte at a time, so
- * that we can do the correct cursorpos math.
+ * We do the comparison a character at a time, not a byte at a time, so that
+ * we can do the correct cursorpos math.
*/
while (*prosrc)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.2 2005/08/30 01:07:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.3 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} objectType;
static int getOidListDiff(Oid *list1, int nlist1, Oid *list2, int nlist2,
- Oid **diff);
-static Oid classIdGetDbId(Oid classId);
+ Oid **diff);
+static Oid classIdGetDbId(Oid classId);
static void shdepLockAndCheckObject(Oid classId, Oid objectId);
static void shdepChangeDep(Relation sdepRel, Oid classid, Oid objid,
- Oid refclassid, Oid refobjid,
- SharedDependencyType deptype);
+ Oid refclassid, Oid refobjid,
+ SharedDependencyType deptype);
static void shdepAddDependency(Relation sdepRel, Oid classId, Oid objectId,
- Oid refclassId, Oid refobjId,
- SharedDependencyType deptype);
+ Oid refclassId, Oid refobjId,
+ SharedDependencyType deptype);
static void shdepDropDependency(Relation sdepRel, Oid classId, Oid objectId,
- Oid refclassId, Oid refobjId,
- SharedDependencyType deptype);
+ Oid refclassId, Oid refobjId,
+ SharedDependencyType deptype);
static void storeObjectDescription(StringInfo descs, objectType type,
- ObjectAddress *object,
- SharedDependencyType deptype,
- int count);
+ ObjectAddress *object,
+ SharedDependencyType deptype,
+ int count);
static bool isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel);
*/
void
recordSharedDependencyOn(ObjectAddress *depender,
- ObjectAddress *referenced,
+ ObjectAddress *referenced,
SharedDependencyType deptype)
{
Relation sdepRel;
sdepRel))
{
shdepAddDependency(sdepRel, depender->classId, depender->objectId,
- referenced->classId, referenced->objectId,
+ referenced->classId, referenced->objectId,
deptype);
}
* shdepChangeDep
*
* Update shared dependency records to account for an updated referenced
- * object. This is an internal workhorse for operations such as changing
+ * object. This is an internal workhorse for operations such as changing
* an object's owner.
*
* There must be no more than one existing entry for the given dependent
- * object and dependency type! So in practice this can only be used for
+ * object and dependency type! So in practice this can only be used for
* updating SHARED_DEPENDENCY_OWNER entries, which should have that property.
*
* If there is no previous entry, we assume it was referencing a PINned
Oid dbid = classIdGetDbId(classid);
HeapTuple oldtup = NULL;
HeapTuple scantup;
- ScanKeyData key[3];
- SysScanDesc scan;
+ ScanKeyData key[3];
+ SysScanDesc scan;
/*
- * Make sure the new referenced object doesn't go away while we record
- * the dependency.
+ * Make sure the new referenced object doesn't go away while we record the
+ * dependency.
*/
shdepLockAndCheckObject(refclassid, refobjid);
* Look for a previous entry
*/
ScanKeyInit(&key[0],
- Anum_pg_shdepend_dbid,
+ Anum_pg_shdepend_dbid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(dbid));
ScanKeyInit(&key[1],
- Anum_pg_shdepend_classid,
+ Anum_pg_shdepend_classid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classid));
ScanKeyInit(&key[2],
scan = systable_beginscan(sdepRel, SharedDependDependerIndexId, true,
SnapshotNow, 3, key);
-
+
while ((scantup = systable_getnext(scan)) != NULL)
{
/* Ignore if not of the target dependency type */
else
{
/* Need to insert new entry */
- Datum values[Natts_pg_shdepend];
- bool nulls[Natts_pg_shdepend];
+ Datum values[Natts_pg_shdepend];
+ bool nulls[Natts_pg_shdepend];
memset(nulls, 0, sizeof(nulls));
values[Anum_pg_shdepend_deptype - 1] = CharGetDatum(deptype);
/*
- * we are reusing oldtup just to avoid declaring a new variable,
- * but it's certainly a new tuple
+ * we are reusing oldtup just to avoid declaring a new variable, but
+ * it's certainly a new tuple
*/
oldtup = heap_form_tuple(RelationGetDescr(sdepRel), values, nulls);
simple_heap_insert(sdepRel, oldtup);
* was previously granted some rights to the object.
*
* This step is analogous to aclnewowner's removal of duplicate entries
- * in the ACL. We have to do it to handle this scenario:
+ * in the ACL. We have to do it to handle this scenario:
* A grants some rights on an object to B
* ALTER OWNER changes the object's owner to B
* ALTER OWNER changes the object's owner to C
* Helper for updateAclDependencies.
*
* Takes two Oid arrays and returns elements from the first not found in the
- * second. We assume both arrays are sorted and de-duped, and that the
+ * second. We assume both arrays are sorted and de-duped, and that the
* second array does not contain any values not found in the first.
*
* NOTE: Both input arrays are pfreed.
static int
getOidListDiff(Oid *list1, int nlist1, Oid *list2, int nlist2, Oid **diff)
{
- Oid *result;
- int i,
- j,
- k = 0;
+ Oid *result;
+ int i,
+ j,
+ k = 0;
AssertArg(nlist1 >= nlist2 && nlist2 >= 0);
result = palloc(sizeof(Oid) * (nlist1 - nlist2));
*diff = result;
- for (i = 0, j = 0; i < nlist1 && j < nlist2; )
+ for (i = 0, j = 0; i < nlist1 && j < nlist2;)
{
if (list1[i] == list2[j])
{
/*
* updateAclDependencies
- * Update the pg_shdepend info for an object's ACL during GRANT/REVOKE.
+ * Update the pg_shdepend info for an object's ACL during GRANT/REVOKE.
*
* classId, objectId: identify the object whose ACL this is
* ownerId: role owning the object
/* Add or drop the respective dependency */
for (i = 0; i < ndiff; i++)
{
- Oid roleid = diff[i];
+ Oid roleid = diff[i];
/*
- * Skip the owner: he has an OWNER shdep entry instead.
- * (This is not just a space optimization; it makes ALTER OWNER
- * easier. See notes in changeDependencyOnOwner.)
+ * Skip the owner: he has an OWNER shdep entry instead. (This is
+ * not just a space optimization; it makes ALTER OWNER easier.
+ * See notes in changeDependencyOnOwner.)
*/
if (roleid == ownerId)
continue;
shdepAddDependency(sdepRel, classId, objectId,
AuthIdRelationId, roleid,
SHARED_DEPENDENCY_ACL);
- else
+ else
shdepDropDependency(sdepRel, classId, objectId,
AuthIdRelationId, roleid,
SHARED_DEPENDENCY_ACL);
*/
typedef struct
{
- Oid dbOid;
- int count;
+ Oid dbOid;
+ int count;
} remoteDep;
/*
* checkSharedDependencies
*
* Check whether there are shared dependency entries for a given shared
- * object. Returns a string containing a newline-separated list of object
+ * object. Returns a string containing a newline-separated list of object
* descriptions that depend on the shared object, or NULL if none is found.
*
* We can find three different kinds of dependencies: dependencies on objects
checkSharedDependencies(Oid classId, Oid objectId)
{
Relation sdepRel;
- ScanKeyData key[2];
- SysScanDesc scan;
+ ScanKeyData key[2];
+ SysScanDesc scan;
HeapTuple tup;
int totalDeps = 0;
int numLocalDeps = 0;
int numSharedDeps = 0;
List *remDeps = NIL;
ListCell *cell;
- ObjectAddress object;
+ ObjectAddress object;
StringInfoData descs;
/*
- * We try to limit the number of reported dependencies to something
- * sane, both for the user's sake and to avoid blowing out memory.
+ * We try to limit the number of reported dependencies to something sane,
+ * both for the user's sake and to avoid blowing out memory.
*/
#define MAX_REPORTED_DEPS 100
sdepRel = heap_open(SharedDependRelationId, AccessShareLock);
ScanKeyInit(&key[0],
- Anum_pg_shdepend_refclassid,
+ Anum_pg_shdepend_refclassid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classId));
ScanKeyInit(&key[1],
- Anum_pg_shdepend_refobjid,
+ Anum_pg_shdepend_refobjid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(objectId));
-
+
scan = systable_beginscan(sdepRel, SharedDependReferenceIndexId, true,
SnapshotNow, 2, key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- Form_pg_shdepend sdepForm = (Form_pg_shdepend) GETSTRUCT(tup);
+ Form_pg_shdepend sdepForm = (Form_pg_shdepend) GETSTRUCT(tup);
/* This case can be dispatched quickly */
if (sdepForm->deptype == SHARED_DEPENDENCY_PIN)
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
errmsg("cannot drop %s because it is required by the database system",
- getObjectDescription(&object))));
+ getObjectDescription(&object))));
}
object.classId = sdepForm->classid;
* If it's a dependency local to this database or it's a shared
* object, describe it.
*
- * If it's a remote dependency, keep track of it so we can report
- * the number of them later.
+ * If it's a remote dependency, keep track of it so we can report the
+ * number of them later.
*/
if (sdepForm->dbid == MyDatabaseId)
{
bool stored = false;
/*
- * XXX this info is kept on a simple List. Maybe it's not good
+ * XXX this info is kept on a simple List. Maybe it's not good
* for performance, but using a hash table seems needlessly
- * complex. The expected number of databases is not high
- * anyway, I suppose.
+ * complex. The expected number of databases is not high anyway,
+ * I suppose.
*/
foreach(cell, remDeps)
{
/*
* Report seems unreasonably long, so reduce it to per-database info
*
- * Note: we don't ever suppress per-database totals, which should
- * be OK as long as there aren't too many databases ...
+ * Note: we don't ever suppress per-database totals, which should be OK
+ * as long as there aren't too many databases ...
*/
descs.len = 0; /* reset to empty */
descs.data[0] = '\0';
foreach(cell, remDeps)
{
- remoteDep *dep = lfirst(cell);
+ remoteDep *dep = lfirst(cell);
object.classId = DatabaseRelationId;
object.objectId = dep->dbOid;
{
Relation sdepRel;
TupleDesc sdepDesc;
- ScanKeyData key[1];
- SysScanDesc scan;
+ ScanKeyData key[1];
+ SysScanDesc scan;
HeapTuple tup;
CatalogIndexState indstate;
Datum values[Natts_pg_shdepend];
values[Anum_pg_shdepend_dbid - 1] = ObjectIdGetDatum(newDbId);
/*
- * Copy the entries of the original database, changing the database Id
- * to that of the new database. Note that because we are not copying
- * rows with dbId == 0 (ie, rows describing dependent shared objects)
- * we won't copy the ownership dependency of the template database
- * itself; this is what we want.
+ * Copy the entries of the original database, changing the database Id to
+ * that of the new database. Note that because we are not copying rows
+ * with dbId == 0 (ie, rows describing dependent shared objects) we won't
+ * copy the ownership dependency of the template database itself; this is
+ * what we want.
*/
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
dropDatabaseDependencies(Oid databaseId)
{
Relation sdepRel;
- ScanKeyData key[1];
- SysScanDesc scan;
+ ScanKeyData key[1];
+ SysScanDesc scan;
HeapTuple tup;
sdepRel = heap_open(SharedDependRelationId, RowExclusiveLock);
/*
- * First, delete all the entries that have the database Oid in the
- * dbid field.
+ * First, delete all the entries that have the database Oid in the dbid
+ * field.
*/
ScanKeyInit(&key[0],
Anum_pg_shdepend_dbid,
/*
* shdepAddDependency
- * Internal workhorse for inserting into pg_shdepend
+ * Internal workhorse for inserting into pg_shdepend
*
* sdepRel must be the pg_shdepend relation, already opened and suitably
* locked.
bool nulls[Natts_pg_shdepend];
/*
- * Make sure the object doesn't go away while we record the dependency
- * on it. DROP routines should lock the object exclusively before they
- * check shared dependencies.
+ * Make sure the object doesn't go away while we record the dependency on
+ * it. DROP routines should lock the object exclusively before they check
+ * shared dependencies.
*/
shdepLockAndCheckObject(refclassId, refobjId);
/*
* shdepDropDependency
- * Internal workhorse for deleting entries from pg_shdepend.
+ * Internal workhorse for deleting entries from pg_shdepend.
*
* We drop entries having the following properties:
* dependent object is the one identified by classId/objectId
Oid refclassId, Oid refobjId,
SharedDependencyType deptype)
{
- ScanKeyData key[3];
- SysScanDesc scan;
+ ScanKeyData key[3];
+ SysScanDesc scan;
HeapTuple tup;
/* Scan for entries matching the dependent object */
ScanKeyInit(&key[0],
- Anum_pg_shdepend_dbid,
+ Anum_pg_shdepend_dbid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classIdGetDbId(classId)));
ScanKeyInit(&key[1],
- Anum_pg_shdepend_classid,
+ Anum_pg_shdepend_classid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classId));
ScanKeyInit(&key[2],
LockSharedObject(classId, objectId, 0, AccessShareLock);
/*
- * We have to recognize sinval updates here, else our local syscache
- * may still contain the object even if it was just dropped.
+ * We have to recognize sinval updates here, else our local syscache may
+ * still contain the object even if it was just dropped.
*/
AcceptInvalidationMessages();
objectId)));
break;
- /*
- * Currently, this routine need not support any other shared object
- * types besides roles. If we wanted to record explicit dependencies
- * on databases or tablespaces, we'd need code along these lines:
- */
+ /*
+ * Currently, this routine need not support any other shared
+ * object types besides roles. If we wanted to record explicit
+ * dependencies on databases or tablespaces, we'd need code along
+ * these lines:
+ */
#ifdef NOT_USED
case TableSpaceRelationId:
- {
- /* For lack of a syscache on pg_tablespace, do this: */
- char *tablespace = get_tablespace_name(objectId);
-
- if (tablespace == NULL)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace %u was concurrently dropped",
- objectId)));
- pfree(tablespace);
- break;
- }
+ {
+ /* For lack of a syscache on pg_tablespace, do this: */
+ char *tablespace = get_tablespace_name(objectId);
+
+ if (tablespace == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("tablespace %u was concurrently dropped",
+ objectId)));
+ pfree(tablespace);
+ break;
+ }
#endif
default:
SharedDependencyType deptype,
int count)
{
- char *objdesc = getObjectDescription(object);
+ char *objdesc = getObjectDescription(object);
/* separate entries with a newline */
if (descs->len != 0)
appendStringInfoChar(descs, '\n');
- switch (type)
+ switch (type)
{
case LOCAL_OBJECT:
case SHARED_OBJECT:
isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel)
{
bool result = false;
- ScanKeyData key[2];
- SysScanDesc scan;
+ ScanKeyData key[2];
+ SysScanDesc scan;
HeapTuple tup;
ScanKeyInit(&key[0],
- Anum_pg_shdepend_refclassid,
+ Anum_pg_shdepend_refclassid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(classId));
ScanKeyInit(&key[1],
- Anum_pg_shdepend_refobjid,
+ Anum_pg_shdepend_refobjid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(objectId));
/*
* Since we won't generate additional pg_shdepend entries for pinned
- * objects, there can be at most one entry referencing a pinned
- * object. Hence, it's sufficient to look at the first returned
- * tuple; we don't need to loop.
+ * objects, there can be at most one entry referencing a pinned object.
+ * Hence, it's sufficient to look at the first returned tuple; we don't
+ * need to loop.
*/
tup = systable_getnext(scan);
if (HeapTupleIsValid(tup))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.103 2005/08/12 01:35:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.104 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
namestrcpy(&name, typeName);
values[i++] = NameGetDatum(&name); /* typname */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
values[i++] = Int16GetDatum(0); /* typlen */
values[i++] = BoolGetDatum(false); /* typbyval */
values[i++] = CharGetDatum(0); /* typtype */
int i;
/*
- * We assume that the caller validated the arguments individually, but
- * did not check for bad combinations.
+ * We assume that the caller validated the arguments individually, but did
+ * not check for bad combinations.
*
* Validate size specifications: either positive (fixed-length) or -1
* (varlena) or -2 (cstring). Pass-by-value types must have a fixed
(internalSize <= 0 || internalSize > (int16) sizeof(Datum)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("internal size %d is invalid for passed-by-value type",
- internalSize)));
+ errmsg("internal size %d is invalid for passed-by-value type",
+ internalSize)));
/* Only varlena types can be toasted */
if (storage != 'p' && internalSize != -1)
namestrcpy(&name, typeName);
values[i++] = NameGetDatum(&name); /* typname */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
values[i++] = Int16GetDatum(internalSize); /* typlen */
values[i++] = BoolGetDatum(passedByValue); /* typbyval */
values[i++] = CharGetDatum(typeType); /* typtype */
values[i++] = Int32GetDatum(typNDims); /* typndims */
/*
- * initialize the default binary value for this type. Check for nulls
- * of course.
+ * initialize the default binary value for this type. Check for nulls of
+ * course.
*/
if (defaultTypeBin)
values[i] = DirectFunctionCall1(textin,
*/
if (defaultTypeValue)
values[i] = DirectFunctionCall1(textin,
- CStringGetDatum(defaultTypeValue));
+ CStringGetDatum(defaultTypeValue));
else
nulls[i] = 'n';
i++; /* typdefault */
void
GenerateTypeDependencies(Oid typeNamespace,
Oid typeObjectId,
- Oid relationOid, /* only for 'c'atalog
- * types */
+ Oid relationOid, /* only for 'c'atalog types */
char relationKind, /* ditto */
Oid owner,
Oid inputProcedure,
/*
* If the type is a rowtype for a relation, mark it as internally
- * dependent on the relation, *unless* it is a stand-alone composite
- * type relation. For the latter case, we have to reverse the
- * dependency.
+ * dependent on the relation, *unless* it is a stand-alone composite type
+ * relation. For the latter case, we have to reverse the dependency.
*
* In the former case, this allows the type to be auto-dropped when the
- * relation is, and not otherwise. And in the latter, of course we get
- * the opposite effect.
+ * relation is, and not otherwise. And in the latter, of course we get the
+ * opposite effect.
*/
if (OidIsValid(relationOid))
{
}
/*
- * If the type is an array type, mark it auto-dependent on the base
- * type. (This is a compromise between the typical case where the
- * array type is automatically generated and the case where it is
- * manually created: we'd prefer INTERNAL for the former case and
- * NORMAL for the latter.)
+ * If the type is an array type, mark it auto-dependent on the base type.
+ * (This is a compromise between the typical case where the array type is
+ * automatically generated and the case where it is manually created: we'd
+ * prefer INTERNAL for the former case and NORMAL for the latter.)
*/
if (OidIsValid(elementType))
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.29 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.30 2005/10/15 02:49:14 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
DefElem *defel = (DefElem *) lfirst(pl);
/*
- * sfunc1, stype1, and initcond1 are accepted as obsolete
- * spellings for sfunc, stype, initcond.
+ * sfunc1, stype1, and initcond1 are accepted as obsolete spellings
+ * for sfunc, stype, initcond.
*/
if (pg_strcasecmp(defel->defname, "sfunc") == 0)
transfuncName = defGetQualifiedName(defel);
/*
* look up the aggregate's base type (input datatype) and transtype.
*
- * We have historically allowed the command to look like basetype = 'ANY'
- * so we must do a case-insensitive comparison for the name ANY. Ugh.
+ * We have historically allowed the command to look like basetype = 'ANY' so
+ * we must do a case-insensitive comparison for the name ANY. Ugh.
*
- * basetype can be a pseudo-type, but transtype can't, since we need to
- * be able to store values of the transtype. However, we can allow
+ * basetype can be a pseudo-type, but transtype can't, since we need to be
+ * able to store values of the transtype. However, we can allow
* polymorphic transtype in some cases (AggregateCreate will check).
*/
if (pg_strcasecmp(TypeNameToString(baseType), "ANY") == 0)
ObjectAddress object;
/*
- * if a basetype is passed in, then attempt to find an aggregate for
- * that specific type.
+ * if a basetype is passed in, then attempt to find an aggregate for that
+ * specific type.
*
- * else attempt to find an aggregate with a basetype of ANYOID. This
- * means that the aggregate is to apply to all basetypes (eg, COUNT).
+ * else attempt to find an aggregate with a basetype of ANYOID. This means
+ * that the aggregate is to apply to all basetypes (eg, COUNT).
*/
if (aggType)
basetypeID = typenameTypeId(aggType);
/* Permission check: must own agg or its namespace */
if (!pg_proc_ownercheck(procOid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
- GetUserId()))
+ !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
+ GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
NameListToString(aggName));
AclResult aclresult;
/*
- * if a basetype is passed in, then attempt to find an aggregate for
- * that specific type; else attempt to find an aggregate with a
- * basetype of ANYOID. This means that the aggregate applies to all
- * basetypes (eg, COUNT).
+ * if a basetype is passed in, then attempt to find an aggregate for that
+ * specific type; else attempt to find an aggregate with a basetype of
+ * ANYOID. This means that the aggregate applies to all basetypes (eg,
+ * COUNT).
*/
if (basetype)
basetypeOid = typenameTypeId(basetype);
if (basetypeOid == ANYOID)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
- errmsg("function %s(*) already exists in schema \"%s\"",
- newname,
- get_namespace_name(namespaceOid))));
+ errmsg("function %s(*) already exists in schema \"%s\"",
+ newname,
+ get_namespace_name(namespaceOid))));
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes.values),
+ procForm->proargtypes.values),
get_namespace_name(namespaceOid))));
}
AclResult aclresult;
/*
- * if a basetype is passed in, then attempt to find an aggregate for
- * that specific type; else attempt to find an aggregate with a
- * basetype of ANYOID. This means that the aggregate applies to all
- * basetypes (eg, COUNT).
+ * if a basetype is passed in, then attempt to find an aggregate for that
+ * specific type; else attempt to find an aggregate with a basetype of
+ * ANYOID. This means that the aggregate applies to all basetypes (eg,
+ * COUNT).
*/
if (basetype)
basetypeOid = typenameTypeId(basetype);
}
/*
- * Modify the owner --- okay to scribble on tup because it's a
- * copy
+ * Modify the owner --- okay to scribble on tup because it's a copy
*/
procForm->proowner = newOwnerId;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.14 2005/08/01 04:03:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.15 2005/10/15 02:49:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/*
* RENAME TABLE requires that we (still) hold
- * CREATE rights on the containing namespace,
- * as well as ownership of the table.
+ * CREATE rights on the containing namespace, as
+ * well as ownership of the table.
*/
Oid namespaceId = get_rel_namespace(relid);
AclResult aclresult;
ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
- get_namespace_name(namespaceId));
+ get_namespace_name(namespaceId));
renamerel(relid, stmt->newname);
break;
renameatt(relid,
stmt->subname, /* old att name */
stmt->newname, /* new att name */
- interpretInhOption(stmt->relation->inhOpt), /* recursive? */
+ interpretInhOption(stmt->relation->inhOpt), /* recursive? */
false); /* recursing already? */
break;
case OBJECT_TRIGGER:
AlterFunctionNamespace(stmt->object, stmt->objarg,
stmt->newschema);
break;
-
+
case OBJECT_SEQUENCE:
case OBJECT_TABLE:
CheckRelationOwnership(stmt->relation, true);
AlterTableNamespace(stmt->relation, stmt->newschema);
break;
-
+
case OBJECT_TYPE:
case OBJECT_DOMAIN:
AlterTypeNamespace(stmt->object, stmt->newschema);
break;
-
+
default:
elog(ERROR, "unrecognized AlterObjectSchemaStmt type: %d",
(int) stmt->objectType);
void
ExecAlterOwnerStmt(AlterOwnerStmt *stmt)
{
- Oid newowner = get_roleid_checked(stmt->newowner);
+ Oid newowner = get_roleid_checked(stmt->newowner);
switch (stmt->objectType)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.88 2005/07/29 19:30:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.89 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
elevel = DEBUG2;
/*
- * Use the current context for storing analysis info. vacuum.c
- * ensures that this context will be cleared when I return, thus
- * releasing the memory allocated here.
+ * Use the current context for storing analysis info. vacuum.c ensures
+ * that this context will be cleared when I return, thus releasing the
+ * memory allocated here.
*/
anl_context = CurrentMemoryContext;
CHECK_FOR_INTERRUPTS();
/*
- * Race condition -- if the pg_class tuple has gone away since the
- * last time we saw it, we don't need to process it.
+ * Race condition -- if the pg_class tuple has gone away since the last
+ * time we saw it, we don't need to process it.
*/
if (!SearchSysCacheExists(RELOID,
ObjectIdGetDatum(relid),
return;
/*
- * Open the class, getting only a read lock on it, and check
- * permissions. Permissions check should match vacuum's check!
+ * Open the class, getting only a read lock on it, and check permissions.
+ * Permissions check should match vacuum's check!
*/
onerel = relation_open(relid, AccessShareLock);
}
/*
- * Check that it's a plain table; we used to do this in get_rel_oids()
- * but seems safer to check after we've locked the relation.
+ * Check that it's a plain table; we used to do this in get_rel_oids() but
+ * seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != RELKIND_RELATION)
{
/*
* Silently ignore tables that are temp tables of other backends ---
- * trying to analyze these is rather pointless, since their contents
- * are probably not up-to-date on disk. (We don't throw a warning
- * here; it would just lead to chatter during a database-wide
- * ANALYZE.)
+ * trying to analyze these is rather pointless, since their contents are
+ * probably not up-to-date on disk. (We don't throw a warning here; it
+ * would just lead to chatter during a database-wide ANALYZE.)
*/
if (isOtherTempNamespace(RelationGetNamespace(onerel)))
{
}
/*
- * Open all indexes of the relation, and see if there are any
- * analyzable columns in the indexes. We do not analyze index columns
- * if there was an explicit column list in the ANALYZE command,
- * however.
+ * Open all indexes of the relation, and see if there are any analyzable
+ * columns in the indexes. We do not analyze index columns if there was
+ * an explicit column list in the ANALYZE command, however.
*/
vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);
hasindex = (nindexes > 0);
indexpr_item = lnext(indexpr_item);
/*
- * Can't analyze if the opclass uses a storage
- * type different from the expression result type.
- * We'd get confused because the type shown in
- * pg_attribute for the index column doesn't match
- * what we are getting from the expression.
- * Perhaps this can be fixed someday, but for now,
- * punt.
+ * Can't analyze if the opclass uses a storage type
+ * different from the expression result type. We'd get
+ * confused because the type shown in pg_attribute for
+ * the index column doesn't match what we are getting
+ * from the expression. Perhaps this can be fixed
+ * someday, but for now, punt.
*/
if (exprType(indexkey) !=
Irel[ind]->rd_att->attrs[i]->atttypid)
{
/*
* We report that the table is empty; this is just so that the
- * autovacuum code doesn't go nuts trying to get stats about
- * a zero-column table.
+ * autovacuum code doesn't go nuts trying to get stats about a
+ * zero-column table.
*/
if (!vacstmt->vacuum)
pgstat_report_analyze(RelationGetRelid(onerel),
onerel->rd_rel->relisshared,
- 0, 0);
+ 0, 0);
vac_close_indexes(nindexes, Irel, AccessShareLock);
relation_close(onerel, AccessShareLock);
}
/*
- * Determine how many rows we need to sample, using the worst case
- * from all analyzable columns. We use a lower bound of 100 rows to
- * avoid possible overflow in Vitter's algorithm.
+ * Determine how many rows we need to sample, using the worst case from
+ * all analyzable columns. We use a lower bound of 100 rows to avoid
+ * possible overflow in Vitter's algorithm.
*/
targrows = 100;
for (i = 0; i < attr_cnt; i++)
&totalrows, &totaldeadrows);
/*
- * Compute the statistics. Temporary results during the calculations
- * for each column are stored in a child context. The calc routines
- * are responsible to make sure that whatever they store into the
- * VacAttrStats structure is allocated in anl_context.
+ * Compute the statistics. Temporary results during the calculations for
+ * each column are stored in a child context. The calc routines are
+ * responsible to make sure that whatever they store into the VacAttrStats
+ * structure is allocated in anl_context.
*/
if (numrows > 0)
{
/*
* Emit the completed stats rows into pg_statistic, replacing any
- * previous statistics for the target columns. (If there are
- * stats in pg_statistic for columns we didn't process, we leave
- * them alone.)
+ * previous statistics for the target columns. (If there are stats in
+ * pg_statistic for columns we didn't process, we leave them alone.)
*/
update_attstats(relid, attr_cnt, vacattrstats);
}
/*
- * If we are running a standalone ANALYZE, update pages/tuples stats
- * in pg_class. We know the accurate page count from the smgr, but
- * only an approximate number of tuples; therefore, if we are part of
- * VACUUM ANALYZE do *not* overwrite the accurate count already
- * inserted by VACUUM. The same consideration applies to indexes.
+ * If we are running a standalone ANALYZE, update pages/tuples stats in
+ * pg_class. We know the accurate page count from the smgr, but only an
+ * approximate number of tuples; therefore, if we are part of VACUUM
+ * ANALYZE do *not* overwrite the accurate count already inserted by
+ * VACUUM. The same consideration applies to indexes.
*/
if (!vacstmt->vacuum)
{
/* report results to the stats collector, too */
pgstat_report_analyze(RelationGetRelid(onerel),
onerel->rd_rel->relisshared,
- totalrows, totaldeadrows);
+ totalrows, totaldeadrows);
}
/* Done with indexes */
/*
* Close source relation now, but keep lock so that no one deletes it
- * before we commit. (If someone did, they'd fail to clean up the
- * entries we made in pg_statistic.)
+ * before we commit. (If someone did, they'd fail to clean up the entries
+ * we made in pg_statistic.)
*/
relation_close(onerel, NoLock);
}
/*
* Need an EState for evaluation of index expressions and
- * partial-index predicates. Create it in the per-index context
- * to be sure it gets cleaned up at the bottom of the loop.
+ * partial-index predicates. Create it in the per-index context to be
+ * sure it gets cleaned up at the bottom of the loop.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
{
/*
* Evaluate the index row to compute expression values. We
- * could do this by hand, but FormIndexDatum is
- * convenient.
+ * could do this by hand, but FormIndexDatum is convenient.
*/
FormIndexDatum(indexInfo,
slot,
}
/*
- * Having counted the number of rows that pass the predicate in
- * the sample, we can estimate the total number of rows in the
- * index.
+ * Having counted the number of rows that pass the predicate in the
+ * sample, we can estimate the total number of rows in the index.
*/
thisdata->tupleFract = (double) numindexrows / (double) numrows;
totalindexrows = ceil(thisdata->tupleFract * totalrows);
stats->tupattnum = attnum;
/*
- * Call the type-specific typanalyze function. If none is specified,
- * use std_typanalyze().
+ * Call the type-specific typanalyze function. If none is specified, use
+ * std_typanalyze().
*/
if (OidIsValid(stats->attrtype->typanalyze))
ok = DatumGetBool(OidFunctionCall1(stats->attrtype->typanalyze,
bs->N = nblocks; /* measured table size */
/*
- * If we decide to reduce samplesize for tables that have less or not
- * much more than samplesize blocks, here is the place to do it.
+ * If we decide to reduce samplesize for tables that have less or not much
+ * more than samplesize blocks, here is the place to do it.
*/
bs->n = samplesize;
bs->t = 0; /* blocks scanned so far */
vacuum_delay_point();
/*
- * We must maintain a pin on the target page's buffer to ensure
- * that the maxoffset value stays good (else concurrent VACUUM
- * might delete tuples out from under us). Hence, pin the page
- * until we are done looking at it. We don't maintain a lock on
- * the page, so tuples could get added to it, but we ignore such
- * tuples.
+ * We must maintain a pin on the target page's buffer to ensure that
+ * the maxoffset value stays good (else concurrent VACUUM might delete
+ * tuples out from under us). Hence, pin the page until we are done
+ * looking at it. We don't maintain a lock on the page, so tuples
+ * could get added to it, but we ignore such tuples.
*/
targbuffer = ReadBuffer(onerel, targblock);
LockBuffer(targbuffer, BUFFER_LOCK_SHARE);
/*
* The first targrows live rows are simply copied into the
* reservoir. Then we start replacing tuples in the sample
- * until we reach the end of the relation. This algorithm
- * is from Jeff Vitter's paper (see full citation below).
- * It works by repeatedly computing the number of tuples
- * to skip before selecting a tuple, which replaces a
- * randomly chosen element of the reservoir (current set
- * of tuples). At all times the reservoir is a true
- * random sample of the tuples we've passed over so far,
- * so when we fall off the end of the relation we're done.
+ * until we reach the end of the relation. This algorithm is
+ * from Jeff Vitter's paper (see full citation below). It
+ * works by repeatedly computing the number of tuples to skip
+ * before selecting a tuple, which replaces a randomly chosen
+ * element of the reservoir (current set of tuples). At all
+ * times the reservoir is a true random sample of the tuples
+ * we've passed over so far, so when we fall off the end of
+ * the relation we're done.
*/
if (numrows < targrows)
rows[numrows++] = heap_copytuple(&targtuple);
else
{
/*
- * t in Vitter's paper is the number of records
- * already processed. If we need to compute a new S
- * value, we must use the not-yet-incremented value of
- * liverows as t.
+ * t in Vitter's paper is the number of records already
+ * processed. If we need to compute a new S value, we
+ * must use the not-yet-incremented value of liverows as
+ * t.
*/
if (rowstoskip < 0)
rowstoskip = get_next_S(liverows, targrows, &rstate);
if (rowstoskip <= 0)
{
/*
- * Found a suitable tuple, so save it, replacing
- * one old tuple at random
+ * Found a suitable tuple, so save it, replacing one
+ * old tuple at random
*/
int k = (int) (targrows * random_fract());
}
/*
- * If we didn't find as many tuples as we wanted then we're done. No
- * sort is needed, since they're already in order.
+ * If we didn't find as many tuples as we wanted then we're done. No sort
+ * is needed, since they're already in order.
*
- * Otherwise we need to sort the collected tuples by position
- * (itempointer). It's not worth worrying about corner cases where
- * the tuples are already sorted.
+ * Otherwise we need to sort the collected tuples by position (itempointer).
+ * It's not worth worrying about corner cases where the tuples are already
+ * sorted.
*/
if (numrows == targrows)
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
StdAnalyzeData *mystats = (StdAnalyzeData *) stats->extra_data;
/*
- * We track up to 2*n values for an n-element MCV list; but at least
- * 10
+ * We track up to 2*n values for an n-element MCV list; but at least 10
*/
track_max = 2 * num_mcv;
if (track_max < 10)
/*
* If it's a variable-width field, add up widths for average width
- * calculation. Note that if the value is toasted, we use the
- * toasted width. We don't bother with this calculation if it's a
- * fixed-width type.
+ * calculation. Note that if the value is toasted, we use the toasted
+ * width. We don't bother with this calculation if it's a fixed-width
+ * type.
*/
if (is_varlena)
{
/*
* If the value is toasted, we want to detoast it just once to
- * avoid repeated detoastings and resultant excess memory
- * usage during the comparisons. Also, check to see if the
- * value is excessively wide, and if so don't detoast at all
- * --- just ignore the value.
+ * avoid repeated detoastings and resultant excess memory usage
+ * during the comparisons. Also, check to see if the value is
+ * excessively wide, and if so don't detoast at all --- just
+ * ignore the value.
*/
if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
{
nmultiple == track_cnt)
{
/*
- * Our track list includes every value in the sample, and
- * every value appeared more than once. Assume the column has
- * just these values.
+ * Our track list includes every value in the sample, and every
+ * value appeared more than once. Assume the column has just
+ * these values.
*/
stats->stadistinct = track_cnt;
}
}
/*
- * If we estimated the number of distinct values at more than 10%
- * of the total row count (a very arbitrary limit), then assume
- * that stadistinct should scale with the row count rather than be
- * a fixed value.
+ * If we estimated the number of distinct values at more than 10% of
+ * the total row count (a very arbitrary limit), then assume that
+ * stadistinct should scale with the row count rather than be a fixed
+ * value.
*/
if (stats->stadistinct > 0.1 * totalrows)
stats->stadistinct = -(stats->stadistinct / totalrows);
/*
- * Decide how many values are worth storing as most-common values.
- * If we are able to generate a complete MCV list (all the values
- * in the sample will fit, and we think these are all the ones in
- * the table), then do so. Otherwise, store only those values
- * that are significantly more common than the (estimated)
- * average. We set the threshold rather arbitrarily at 25% more
- * than average, with at least 2 instances in the sample.
+ * Decide how many values are worth storing as most-common values. If
+ * we are able to generate a complete MCV list (all the values in the
+ * sample will fit, and we think these are all the ones in the table),
+ * then do so. Otherwise, store only those values that are
+ * significantly more common than the (estimated) average. We set the
+ * threshold rather arbitrarily at 25% more than average, with at
+ * least 2 instances in the sample.
*/
if (track_cnt < track_max && toowide_cnt == 0 &&
stats->stadistinct > 0 &&
stats->stats_valid = true;
stats->stanullfrac = 1.0;
if (is_varwidth)
- stats->stawidth = 0; /* "unknown" */
+ stats->stawidth = 0; /* "unknown" */
else
stats->stawidth = stats->attrtype->typlen;
- stats->stadistinct = 0.0; /* "unknown" */
+ stats->stadistinct = 0.0; /* "unknown" */
}
/* We don't need to bother cleaning up any of our temporary palloc's */
/*
* If it's a variable-width field, add up widths for average width
- * calculation. Note that if the value is toasted, we use the
- * toasted width. We don't bother with this calculation if it's a
- * fixed-width type.
+ * calculation. Note that if the value is toasted, we use the toasted
+ * width. We don't bother with this calculation if it's a fixed-width
+ * type.
*/
if (is_varlena)
{
/*
* If the value is toasted, we want to detoast it just once to
- * avoid repeated detoastings and resultant excess memory
- * usage during the comparisons. Also, check to see if the
- * value is excessively wide, and if so don't detoast at all
- * --- just ignore the value.
+ * avoid repeated detoastings and resultant excess memory usage
+ * during the comparisons. Also, check to see if the value is
+ * excessively wide, and if so don't detoast at all --- just
+ * ignore the value.
*/
if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
{
sizeof(ScalarItem), compare_scalars);
/*
- * Now scan the values in order, find the most common ones, and
- * also accumulate ordering-correlation statistics.
+ * Now scan the values in order, find the most common ones, and also
+ * accumulate ordering-correlation statistics.
*
- * To determine which are most common, we first have to count the
- * number of duplicates of each value. The duplicates are
- * adjacent in the sorted list, so a brute-force approach is to
- * compare successive datum values until we find two that are not
- * equal. However, that requires N-1 invocations of the datum
- * comparison routine, which are completely redundant with work
- * that was done during the sort. (The sort algorithm must at
- * some point have compared each pair of items that are adjacent
- * in the sorted order; otherwise it could not know that it's
- * ordered the pair correctly.) We exploit this by having
+ * To determine which are most common, we first have to count the number
+ * of duplicates of each value. The duplicates are adjacent in the
+ * sorted list, so a brute-force approach is to compare successive
+ * datum values until we find two that are not equal. However, that
+ * requires N-1 invocations of the datum comparison routine, which are
+ * completely redundant with work that was done during the sort. (The
+ * sort algorithm must at some point have compared each pair of items
+ * that are adjacent in the sorted order; otherwise it could not know
+ * that it's ordered the pair correctly.) We exploit this by having
* compare_scalars remember the highest tupno index that each
* ScalarItem has been found equal to. At the end of the sort, a
- * ScalarItem's tupnoLink will still point to itself if and only
- * if it is the last item of its group of duplicates (since the
- * group will be ordered by tupno).
+ * ScalarItem's tupnoLink will still point to itself if and only if it
+ * is the last item of its group of duplicates (since the group will
+ * be ordered by tupno).
*/
corr_xysum = 0;
ndistinct = 0;
{
/*
* Found a new item for the mcv list; find its
- * position, bubbling down old items if needed.
- * Loop invariant is that j points at an empty/
- * replaceable slot.
+ * position, bubbling down old items if needed. Loop
+ * invariant is that j points at an empty/ replaceable
+ * slot.
*/
int j;
else if (toowide_cnt == 0 && nmultiple == ndistinct)
{
/*
- * Every value in the sample appeared more than once. Assume
- * the column has just these values.
+ * Every value in the sample appeared more than once. Assume the
+ * column has just these values.
*/
stats->stadistinct = ndistinct;
}
}
/*
- * If we estimated the number of distinct values at more than 10%
- * of the total row count (a very arbitrary limit), then assume
- * that stadistinct should scale with the row count rather than be
- * a fixed value.
+ * If we estimated the number of distinct values at more than 10% of
+ * the total row count (a very arbitrary limit), then assume that
+ * stadistinct should scale with the row count rather than be a fixed
+ * value.
*/
if (stats->stadistinct > 0.1 * totalrows)
stats->stadistinct = -(stats->stadistinct / totalrows);
/*
- * Decide how many values are worth storing as most-common values.
- * If we are able to generate a complete MCV list (all the values
- * in the sample will fit, and we think these are all the ones in
- * the table), then do so. Otherwise, store only those values
- * that are significantly more common than the (estimated)
- * average. We set the threshold rather arbitrarily at 25% more
- * than average, with at least 2 instances in the sample. Also,
- * we won't suppress values that have a frequency of at least 1/K
- * where K is the intended number of histogram bins; such values
- * might otherwise cause us to emit duplicate histogram bin
- * boundaries.
+ * Decide how many values are worth storing as most-common values. If
+ * we are able to generate a complete MCV list (all the values in the
+ * sample will fit, and we think these are all the ones in the table),
+ * then do so. Otherwise, store only those values that are
+ * significantly more common than the (estimated) average. We set the
+ * threshold rather arbitrarily at 25% more than average, with at
+ * least 2 instances in the sample. Also, we won't suppress values
+ * that have a frequency of at least 1/K where K is the intended
+ * number of histogram bins; such values might otherwise cause us to
+ * emit duplicate histogram bin boundaries.
*/
if (track_cnt == ndistinct && toowide_cnt == 0 &&
stats->stadistinct > 0 &&
}
/*
- * Generate a histogram slot entry if there are at least two
- * distinct values not accounted for in the MCV list. (This
- * ensures the histogram won't collapse to empty or a singleton.)
+ * Generate a histogram slot entry if there are at least two distinct
+ * values not accounted for in the MCV list. (This ensures the
+ * histogram won't collapse to empty or a singleton.)
*/
num_hist = ndistinct - num_mcv;
if (num_hist > num_bins)
/*
* Collapse out the MCV items from the values[] array.
*
- * Note we destroy the values[] array here... but we don't need
- * it for anything more. We do, however, still need
- * values_cnt. nvals will be the number of remaining entries
- * in values[].
+ * Note we destroy the values[] array here... but we don't need it
+ * for anything more. We do, however, still need values_cnt.
+ * nvals will be the number of remaining entries in values[].
*/
if (num_mcv > 0)
{
stats->stats_valid = true;
stats->stanullfrac = 1.0;
if (is_varwidth)
- stats->stawidth = 0; /* "unknown" */
+ stats->stawidth = 0; /* "unknown" */
else
stats->stawidth = stats->attrtype->typlen;
- stats->stadistinct = 0.0; /* "unknown" */
+ stats->stadistinct = 0.0; /* "unknown" */
}
/* We don't need to bother cleaning up any of our temporary palloc's */
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.125 2005/10/06 21:30:32 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.126 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
static List *pendingNotifies = NIL;
-static List *upperPendingNotifies = NIL; /* list of upper-xact
- * lists */
+static List *upperPendingNotifies = NIL; /* list of upper-xact lists */
/*
* State for inbound notifies consists of two flags: one saying whether
if (!AsyncExistsPendingNotify(relname))
{
/*
- * The name list needs to live until end of transaction, so store
- * it in the transaction context.
+ * The name list needs to live until end of transaction, so store it
+ * in the transaction context.
*/
MemoryContext oldcontext;
Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(tuple);
if (listener->listenerpid == MyProcPid &&
- strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
+ strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
{
alreadyListener = true;
/* No need to scan the rest of the table */
Form_pg_listener listener = (Form_pg_listener) GETSTRUCT(tuple);
if (listener->listenerpid == MyProcPid &&
- strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
+ strncmp(NameStr(listener->relname), relname, NAMEDATALEN) == 0)
{
/* Found the matching tuple, delete it */
simple_heap_delete(lRel, &tuple->t_self);
/*
- * We assume there can be only one match, so no need to scan
- * the rest of the table
+ * We assume there can be only one match, so no need to scan the
+ * rest of the table
*/
break;
}
Async_UnlistenOnExit(int code, Datum arg)
{
/*
- * We need to start/commit a transaction for the unlisten, but if
- * there is already an active transaction we had better abort that one
- * first. Otherwise we'd end up committing changes that probably
- * ought to be discarded.
+ * We need to start/commit a transaction for the unlisten, but if there is
+ * already an active transaction we had better abort that one first.
+ * Otherwise we'd end up committing changes that probably ought to be
+ * discarded.
*/
AbortOutOfAnyTransaction();
/* Now we can do the unlisten */
*--------------------------------------------------------------
* AtPrepare_Notify
*
- * This is called at the prepare phase of a two-phase
+ * This is called at the prepare phase of a two-phase
* transaction. Save the state for possible commit later.
*--------------------------------------------------------------
*/
void
AtPrepare_Notify(void)
{
- ListCell *p;
+ ListCell *p;
foreach(p, pendingNotifies)
{
/*
* We can clear the state immediately, rather than needing a separate
- * PostPrepare call, because if the transaction fails we'd just
- * discard the state anyway.
+ * PostPrepare call, because if the transaction fails we'd just discard
+ * the state anyway.
*/
ClearPendingNotifies();
}
nulls[Natts_pg_listener];
if (pendingNotifies == NIL)
- return; /* no NOTIFY statements in this
- * transaction */
+ return; /* no NOTIFY statements in this transaction */
/*
- * NOTIFY is disabled if not normal processing mode. This test used to
- * be in xact.c, but it seems cleaner to do it here.
+ * NOTIFY is disabled if not normal processing mode. This test used to be
+ * in xact.c, but it seems cleaner to do it here.
*/
if (!IsNormalProcessingMode())
{
if (listenerPID == MyProcPid)
{
/*
- * Self-notify: no need to bother with table update. Indeed,
- * we *must not* clear the notification field in this path, or
- * we could lose an outside notify, which'd be bad for
- * applications that ignore self-notify messages.
+ * Self-notify: no need to bother with table update. Indeed, we
+ * *must not* clear the notification field in this path, or we
+ * could lose an outside notify, which'd be bad for applications
+ * that ignore self-notify messages.
*/
if (Trace_notify)
listenerPID);
/*
- * If someone has already notified this listener, we don't
- * bother modifying the table, but we do still send a SIGUSR2
- * signal, just in case that backend missed the earlier signal
- * for some reason. It's OK to send the signal first, because
- * the other guy can't read pg_listener until we unlock it.
+ * If someone has already notified this listener, we don't bother
+ * modifying the table, but we do still send a SIGUSR2 signal,
+ * just in case that backend missed the earlier signal for some
+ * reason. It's OK to send the signal first, because the other
+ * guy can't read pg_listener until we unlock it.
*/
if (kill(listenerPID, SIGUSR2) < 0)
{
/*
- * Get rid of pg_listener entry if it refers to a PID that
- * no longer exists. Presumably, that backend crashed
- * without deleting its pg_listener entries. This code
- * used to only delete the entry if errno==ESRCH, but as
- * far as I can see we should just do it for any failure
- * (certainly at least for EPERM too...)
+ * Get rid of pg_listener entry if it refers to a PID that no
+ * longer exists. Presumably, that backend crashed without
+ * deleting its pg_listener entries. This code used to only
+ * delete the entry if errno==ESRCH, but as far as I can see
+ * we should just do it for any failure (certainly at least
+ * for EPERM too...)
*/
simple_heap_delete(lRel, &lTuple->t_self);
}
else if (listener->notification == 0)
{
- HTSU_Result result;
+ HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
/*
* We cannot use simple_heap_update here because the tuple
* could have been modified by an uncommitted transaction;
- * specifically, since UNLISTEN releases exclusive lock on
- * the table before commit, the other guy could already
- * have tried to unlisten. There are no other cases where
- * we should be able to see an uncommitted update or
- * delete. Therefore, our response to a
- * HeapTupleBeingUpdated result is just to ignore it. We
- * do *not* wait for the other guy to commit --- that
- * would risk deadlock, and we don't want to block while
- * holding the table lock anyway for performance reasons.
- * We also ignore HeapTupleUpdated, which could occur if
- * the other guy commits between our heap_getnext and
+ * specifically, since UNLISTEN releases exclusive lock on the
+ * table before commit, the other guy could already have tried
+ * to unlisten. There are no other cases where we should be
+ * able to see an uncommitted update or delete. Therefore, our
+ * response to a HeapTupleBeingUpdated result is just to
+ * ignore it. We do *not* wait for the other guy to commit
+ * --- that would risk deadlock, and we don't want to block
+ * while holding the table lock anyway for performance
+ * reasons. We also ignore HeapTupleUpdated, which could occur
+ * if the other guy commits between our heap_getnext and
* heap_update calls.
*/
result = heap_update(lRel, &lTuple->t_self, rTuple,
/*
* We do NOT release the lock on pg_listener here; we need to hold it
- * until end of transaction (which is about to happen, anyway) to
- * ensure that notified backends see our tuple updates when they look.
- * Else they might disregard the signal, which would make the
- * application programmer very unhappy.
+ * until end of transaction (which is about to happen, anyway) to ensure
+ * that notified backends see our tuple updates when they look. Else they
+ * might disregard the signal, which would make the application programmer
+ * very unhappy.
*/
heap_close(lRel, NoLock);
GetCurrentTransactionNestLevel() - 2);
/*
- * We could try to eliminate duplicates here, but it seems not
- * worthwhile.
+ * We could try to eliminate duplicates here, but it seems not worthwhile.
*/
pendingNotifies = list_concat(parentPendingNotifies, pendingNotifies);
}
* subxact are no longer interesting, and the space will be freed when
* CurTransactionContext is recycled.
*
- * This routine could be called more than once at a given nesting level
- * if there is trouble during subxact abort. Avoid dumping core by
- * using GetCurrentTransactionNestLevel as the indicator of how far
- * we need to prune the list.
+ * This routine could be called more than once at a given nesting level if
+ * there is trouble during subxact abort. Avoid dumping core by using
+ * GetCurrentTransactionNestLevel as the indicator of how far we need to
+ * prune the list.
*/
while (list_length(upperPendingNotifies) > my_level - 2)
{
/*
* Note: this is a SIGNAL HANDLER. You must be very wary what you do
- * here. Some helpful soul had this routine sprinkled with TPRINTFs,
- * which would likely lead to corruption of stdio buffers if they were
- * ever turned on.
+ * here. Some helpful soul had this routine sprinkled with TPRINTFs, which
+ * would likely lead to corruption of stdio buffers if they were ever
+ * turned on.
*/
/* Don't joggle the elbow of proc_exit */
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
/*
- * We may be called while ImmediateInterruptOK is true; turn it
- * off while messing with the NOTIFY state. (We would have to
- * save and restore it anyway, because PGSemaphore operations
- * inside ProcessIncomingNotify() might reset it.)
+ * We may be called while ImmediateInterruptOK is true; turn it off
+ * while messing with the NOTIFY state. (We would have to save and
+ * restore it anyway, because PGSemaphore operations inside
+ * ProcessIncomingNotify() might reset it.)
*/
ImmediateInterruptOK = false;
/*
* I'm not sure whether some flavors of Unix might allow another
- * SIGUSR2 occurrence to recursively interrupt this routine. To
- * cope with the possibility, we do the same sort of dance that
- * EnableNotifyInterrupt must do --- see that routine for
- * comments.
+ * SIGUSR2 occurrence to recursively interrupt this routine. To cope
+ * with the possibility, we do the same sort of dance that
+ * EnableNotifyInterrupt must do --- see that routine for comments.
*/
notifyInterruptEnabled = 0; /* disable any recursive signal */
notifyInterruptOccurred = 1; /* do at least one iteration */
}
/*
- * Restore ImmediateInterruptOK, and check for interrupts if
- * needed.
+ * Restore ImmediateInterruptOK, and check for interrupts if needed.
*/
ImmediateInterruptOK = save_ImmediateInterruptOK;
if (save_ImmediateInterruptOK)
else
{
/*
- * In this path it is NOT SAFE to do much of anything, except
- * this:
+ * In this path it is NOT SAFE to do much of anything, except this:
*/
notifyInterruptOccurred = 1;
}
return; /* not really idle */
/*
- * This code is tricky because we are communicating with a signal
- * handler that could interrupt us at any point. If we just checked
- * notifyInterruptOccurred and then set notifyInterruptEnabled, we
- * could fail to respond promptly to a signal that happens in between
- * those two steps. (A very small time window, perhaps, but Murphy's
- * Law says you can hit it...) Instead, we first set the enable flag,
- * then test the occurred flag. If we see an unserviced interrupt has
- * occurred, we re-clear the enable flag before going off to do the
- * service work. (That prevents re-entrant invocation of
- * ProcessIncomingNotify() if another interrupt occurs.) If an
- * interrupt comes in between the setting and clearing of
- * notifyInterruptEnabled, then it will have done the service work and
- * left notifyInterruptOccurred zero, so we have to check again after
- * clearing enable. The whole thing has to be in a loop in case
- * another interrupt occurs while we're servicing the first. Once we
- * get out of the loop, enable is set and we know there is no
- * unserviced interrupt.
+ * This code is tricky because we are communicating with a signal handler
+ * that could interrupt us at any point. If we just checked
+ * notifyInterruptOccurred and then set notifyInterruptEnabled, we could
+ * fail to respond promptly to a signal that happens in between those two
+ * steps. (A very small time window, perhaps, but Murphy's Law says you
+ * can hit it...) Instead, we first set the enable flag, then test the
+ * occurred flag. If we see an unserviced interrupt has occurred, we
+ * re-clear the enable flag before going off to do the service work.
+ * (That prevents re-entrant invocation of ProcessIncomingNotify() if
+ * another interrupt occurs.) If an interrupt comes in between the setting
+ * and clearing of notifyInterruptEnabled, then it will have done the
+ * service work and left notifyInterruptOccurred zero, so we have to check
+ * again after clearing enable. The whole thing has to be in a loop in
+ * case another interrupt occurs while we're servicing the first. Once we
+ * get out of the loop, enable is set and we know there is no unserviced
+ * interrupt.
*
- * NB: an overenthusiastic optimizing compiler could easily break this
- * code. Hopefully, they all understand what "volatile" means these
- * days.
+ * NB: an overenthusiastic optimizing compiler could easily break this code.
+ * Hopefully, they all understand what "volatile" means these days.
*/
for (;;)
{
* Rewrite the tuple with 0 in notification column.
*
* simple_heap_update is safe here because no one else would have
- * tried to UNLISTEN us, so there can be no uncommitted
- * changes.
+ * tried to UNLISTEN us, so there can be no uncommitted changes.
*/
rTuple = heap_modifytuple(lTuple, tdesc, value, nulls, repl);
simple_heap_update(lRel, &lTuple->t_self, rTuple);
/*
* We do NOT release the lock on pg_listener here; we need to hold it
- * until end of transaction (which is about to happen, anyway) to
- * ensure that other backends see our tuple updates when they look.
- * Otherwise, a transaction started after this one might mistakenly
- * think it doesn't need to send this backend a new NOTIFY.
+ * until end of transaction (which is about to happen, anyway) to ensure
+ * that other backends see our tuple updates when they look. Otherwise, a
+ * transaction started after this one might mistakenly think it doesn't
+ * need to send this backend a new NOTIFY.
*/
heap_close(lRel, NoLock);
CommitTransactionCommand();
/*
- * Must flush the notify messages to ensure frontend gets them
- * promptly.
+ * Must flush the notify messages to ensure frontend gets them promptly.
*/
pq_flush();
/*
* NOTE: we do not do pq_flush() here. For a self-notify, it will
* happen at the end of the transaction, and for incoming notifies
- * ProcessIncomingNotify will do it after finding all the
- * notifies.
+ * ProcessIncomingNotify will do it after finding all the notifies.
*/
}
else
ClearPendingNotifies(void)
{
/*
- * We used to have to explicitly deallocate the list members and
- * nodes, because they were malloc'd. Now, since we know they are
- * palloc'd in CurTransactionContext, we need not do that --- they'll
- * go away automatically at transaction exit. We need only reset the
- * list head pointer.
+ * We used to have to explicitly deallocate the list members and nodes,
+ * because they were malloc'd. Now, since we know they are palloc'd in
+ * CurTransactionContext, we need not do that --- they'll go away
+ * automatically at transaction exit. We need only reset the list head
+ * pointer.
*/
pendingNotifies = NIL;
}
void *recdata, uint32 len)
{
/*
- * Set up to issue the NOTIFY at the end of my own
- * current transaction. (XXX this has some issues if my own
- * transaction later rolls back, or if there is any significant
- * delay before I commit. OK for now because we disallow
- * COMMIT PREPARED inside a transaction block.)
+ * Set up to issue the NOTIFY at the end of my own current transaction.
+ * (XXX this has some issues if my own transaction later rolls back, or if
+ * there is any significant delay before I commit. OK for now because we
+ * disallow COMMIT PREPARED inside a transaction block.)
*/
Async_Notify((char *) recdata);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.139 2005/08/26 03:07:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.140 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("index \"%s\" for table \"%s\" does not exist",
- stmt->indexname, stmt->relation->relname)));
+ errmsg("index \"%s\" for table \"%s\" does not exist",
+ stmt->indexname, stmt->relation->relname)));
}
/* All other checks are done in cluster_rel() */
else
{
/*
- * This is the "multi relation" case. We need to cluster all
- * tables that have some index with indisclustered set.
+ * This is the "multi relation" case. We need to cluster all tables
+ * that have some index with indisclustered set.
*/
MemoryContext cluster_context;
List *rvs;
ListCell *rv;
/*
- * We cannot run this form of CLUSTER inside a user transaction
- * block; we'd be holding locks way too long.
+ * We cannot run this form of CLUSTER inside a user transaction block;
+ * we'd be holding locks way too long.
*/
PreventTransactionChain((void *) stmt, "CLUSTER");
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of PortalContext, it will go away even in case
- * of error.
+ * Since it is a child of PortalContext, it will go away even in case of
+ * error.
*/
cluster_context = AllocSetContextCreate(PortalContext,
"Cluster",
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives
- * in cluster_context.
+ * Build the list of relations to cluster. Note that this lives in
+ * cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
CHECK_FOR_INTERRUPTS();
/*
- * Since we may open a new transaction for each relation, we have to
- * check that the relation still is what we think it is.
+ * Since we may open a new transaction for each relation, we have to check
+ * that the relation still is what we think it is.
*
- * If this is a single-transaction CLUSTER, we can skip these tests. We
- * *must* skip the one on indisclustered since it would reject an
- * attempt to cluster a not-previously-clustered index.
+ * If this is a single-transaction CLUSTER, we can skip these tests. We *must*
+ * skip the one on indisclustered since it would reject an attempt to
+ * cluster a not-previously-clustered index.
*/
if (recheck)
{
}
/*
- * We grab exclusive access to the target rel and index for the
- * duration of the transaction. (This is redundant for the single-
- * transaction case, since cluster() already did it.) The index lock
- * is taken inside check_index_is_clusterable.
+ * We grab exclusive access to the target rel and index for the duration
+ * of the transaction. (This is redundant for the single- transaction
+ * case, since cluster() already did it.) The index lock is taken inside
+ * check_index_is_clusterable.
*/
OldHeap = heap_open(rvtc->tableOid, AccessExclusiveLock);
RelationGetRelationName(OldHeap))));
/*
- * Disallow clustering on incomplete indexes (those that might not
- * index every row of the relation). We could relax this by making a
- * separate seqscan pass over the table to copy the missing rows, but
- * that seems expensive and tedious.
+ * Disallow clustering on incomplete indexes (those that might not index
+ * every row of the relation). We could relax this by making a separate
+ * seqscan pass over the table to copy the missing rows, but that seems
+ * expensive and tedious.
*/
if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster on partial index \"%s\"",
RelationGetRelationName(OldIndex))));
-
+
if (!OldIndex->rd_am->amindexnulls)
{
AttrNumber colno;
/*
- * If the AM doesn't index nulls, then it's a partial index unless
- * we can prove all the rows are non-null. Note we only need look
- * at the first column; multicolumn-capable AMs are *required* to
- * index nulls in columns after the first.
+ * If the AM doesn't index nulls, then it's a partial index unless we
+ * can prove all the rows are non-null. Note we only need look at the
+ * first column; multicolumn-capable AMs are *required* to index nulls
+ * in columns after the first.
*/
colno = OldIndex->rd_index->indkey.values[0];
if (colno > 0)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster on index \"%s\" because access method\n"
"does not handle null values",
- RelationGetRelationName(OldIndex)),
+ RelationGetRelationName(OldIndex)),
errhint("You may be able to work around this by marking column \"%s\" NOT NULL%s",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname),
- recheck ? ",\nor use ALTER TABLE ... SET WITHOUT CLUSTER to remove the cluster\n"
- "specification from the table." : ".")));
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname),
+ recheck ? ",\nor use ALTER TABLE ... SET WITHOUT CLUSTER to remove the cluster\n"
+ "specification from the table." : ".")));
}
else if (colno < 0)
{
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster on expressional index \"%s\" because its index access\n"
"method does not handle null values",
- RelationGetRelationName(OldIndex))));
+ RelationGetRelationName(OldIndex))));
}
/*
- * Disallow clustering system relations. This will definitely NOT
- * work for shared relations (we have no way to update pg_class rows
- * in other databases), nor for nailed-in-cache relations (the
- * relfilenode values for those are hardwired, see relcache.c). It
- * might work for other system relations, but I ain't gonna risk it.
+ * Disallow clustering system relations. This will definitely NOT work
+ * for shared relations (we have no way to update pg_class rows in other
+ * databases), nor for nailed-in-cache relations (the relfilenode values
+ * for those are hardwired, see relcache.c). It might work for other
+ * system relations, but I ain't gonna risk it.
*/
if (IsSystemRelation(OldHeap))
ereport(ERROR,
RelationGetRelationName(OldHeap))));
/*
- * Don't allow cluster on temp tables of other backends ... their
- * local buffer manager is not going to cope.
+ * Don't allow cluster on temp tables of other backends ... their local
+ * buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temporary tables of other sessions")));
+ errmsg("cannot cluster temporary tables of other sessions")));
/* Drop relcache refcnt on OldIndex, but keep lock */
index_close(OldIndex);
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
/*
- * Unset the bit if set. We know it's wrong because we checked
- * this earlier.
+ * Unset the bit if set. We know it's wrong because we checked this
+ * earlier.
*/
if (indexForm->indisclustered)
{
heap_close(OldHeap, NoLock);
/*
- * Create the new heap, using a temporary name in the same namespace
- * as the existing table. NOTE: there is some risk of collision with
- * user relnames. Working around this seems more trouble than it's
- * worth; in particular, we can't create the new heap in a different
- * namespace from the old, or we will have problems with the TEMP
- * status of temp tables.
+ * Create the new heap, using a temporary name in the same namespace as
+ * the existing table. NOTE: there is some risk of collision with user
+ * relnames. Working around this seems more trouble than it's worth; in
+ * particular, we can't create the new heap in a different namespace from
+ * the old, or we will have problems with the TEMP status of temp tables.
*/
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tableOid);
OIDNewHeap = make_new_heap(tableOid, NewHeapName, tableSpace);
/*
- * We don't need CommandCounterIncrement() because make_new_heap did
- * it.
+ * We don't need CommandCounterIncrement() because make_new_heap did it.
*/
/*
/* performDeletion does CommandCounterIncrement at end */
/*
- * Rebuild each index on the relation (but not the toast table, which
- * is all-new at this point). We do not need
- * CommandCounterIncrement() because reindex_relation does it.
+ * Rebuild each index on the relation (but not the toast table, which is
+ * all-new at this point). We do not need CommandCounterIncrement()
+ * because reindex_relation does it.
*/
reindex_relation(tableOid, false);
}
allowSystemTableMods);
/*
- * Advance command counter so that the newly-created relation's
- * catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's catalog
+ * tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
* If necessary, create a TOAST table for the new relation. Note that
- * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
- * that the TOAST table will be visible for insertion.
+ * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
+ * the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(OIDNewHeap, true);
OldIndex = index_open(OIDOldIndex);
/*
- * Their tuple descriptors should be exactly alike, but here we only
- * need assume that they have the same number of columns.
+ * Their tuple descriptors should be exactly alike, but here we only need
+ * assume that they have the same number of columns.
*/
oldTupDesc = RelationGetDescr(OldHeap);
newTupDesc = RelationGetDescr(NewHeap);
* We cannot simply pass the tuple to heap_insert(), for several
* reasons:
*
- * 1. heap_insert() will overwrite the commit-status fields of the
- * tuple it's handed. This would trash the source relation, which is
- * bad news if we abort later on. (This was a bug in releases thru
- * 7.0)
+ * 1. heap_insert() will overwrite the commit-status fields of the tuple
+ * it's handed. This would trash the source relation, which is bad
+ * news if we abort later on. (This was a bug in releases thru 7.0)
*
- * 2. We'd like to squeeze out the values of any dropped columns,
- * both to save space and to ensure we have no corner-case failures.
- * (It's possible for example that the new table hasn't got a TOAST
- * table and so is unable to store any large values of dropped cols.)
+ * 2. We'd like to squeeze out the values of any dropped columns, both to
+ * save space and to ensure we have no corner-case failures. (It's
+ * possible for example that the new table hasn't got a TOAST table
+ * and so is unable to store any large values of dropped cols.)
*
* 3. The tuple might not even be legal for the new table; this is
* currently only known to happen as an after-effect of ALTER TABLE
CatalogCloseIndexes(indstate);
/*
- * If we have toast tables associated with the relations being
- * swapped, change their dependency links to re-associate them with
- * their new owning relations. Otherwise the wrong one will get
- * dropped ...
+ * If we have toast tables associated with the relations being swapped,
+ * change their dependency links to re-associate them with their new
+ * owning relations. Otherwise the wrong one will get dropped ...
*
* NOTE: it is possible that only one table has a toast table; this can
- * happen in CLUSTER if there were dropped columns in the old table,
- * and in ALTER TABLE when adding or changing type of columns.
+ * happen in CLUSTER if there were dropped columns in the old table, and
+ * in ALTER TABLE when adding or changing type of columns.
*
- * NOTE: at present, a TOAST table's only dependency is the one on its
- * owning table. If more are ever created, we'd need to use something
- * more selective than deleteDependencyRecordsFor() to get rid of only
- * the link we want.
+ * NOTE: at present, a TOAST table's only dependency is the one on its owning
+ * table. If more are ever created, we'd need to use something more
+ * selective than deleteDependencyRecordsFor() to get rid of only the link
+ * we want.
*/
if (relform1->reltoastrelid || relform2->reltoastrelid)
{
/*
* Blow away the old relcache entries now. We need this kluge because
- * relcache.c keeps a link to the smgr relation for the physical file,
- * and that will be out of date as soon as we do
- * CommandCounterIncrement. Whichever of the rels is the second to be
- * cleared during cache invalidation will have a dangling reference to
- * an already-deleted smgr relation. Rather than trying to avoid this
- * by ordering operations just so, it's easiest to not have the
- * relcache entries there at all. (Fortunately, since one of the
- * entries is local in our transaction, it's sufficient to clear out
- * our own relcache this way; the problem cannot arise for other
- * backends when they see our update on the non-local relation.)
+ * relcache.c keeps a link to the smgr relation for the physical file, and
+ * that will be out of date as soon as we do CommandCounterIncrement.
+ * Whichever of the rels is the second to be cleared during cache
+ * invalidation will have a dangling reference to an already-deleted smgr
+ * relation. Rather than trying to avoid this by ordering operations just
+ * so, it's easiest to not have the relcache entries there at all.
+ * (Fortunately, since one of the entries is local in our transaction,
+ * it's sufficient to clear out our own relcache this way; the problem
+ * cannot arise for other backends when they see our update on the
+ * non-local relation.)
*/
RelationForgetRelation(r1);
RelationForgetRelation(r2);
/*
* Get all indexes that have indisclustered set and are owned by
- * appropriate user. System relations or nailed-in relations cannot
- * ever have indisclustered set, because CLUSTER will refuse to set it
- * when called with one of them as argument.
+ * appropriate user. System relations or nailed-in relations cannot ever
+ * have indisclustered set, because CLUSTER will refuse to set it when
+ * called with one of them as argument.
*/
indRelation = heap_open(IndexRelationId, AccessShareLock);
ScanKeyInit(&entry,
continue;
/*
- * We have to build the list in a different memory context so it
- * will survive the cross-transaction processing
+ * We have to build the list in a different memory context so it will
+ * survive the cross-transaction processing
*/
old_context = MemoryContextSwitchTo(cluster_context);
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.83 2005/04/14 20:03:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.84 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
tgtrel = makeRangeVarFromNameList(relname);
/*
- * Open the relation. We do this mainly to acquire a lock that
- * ensures no one else drops the relation before we commit. (If they
- * did, they'd fail to remove the entry we are about to make in
- * pg_description.)
+ * Open the relation. We do this mainly to acquire a lock that ensures no
+ * one else drops the relation before we commit. (If they did, they'd
+ * fail to remove the entry we are about to make in pg_description.)
*/
relation = relation_openrv(tgtrel, AccessShareLock);
database = strVal(linitial(qualname));
/*
- * We cannot currently support cross-database comments (since other
- * DBs cannot see pg_description of this database). So, we reject
- * attempts to comment on a database other than the current one.
- * Someday this might be improved, but it would take a redesigned
- * infrastructure.
+ * We cannot currently support cross-database comments (since other DBs
+ * cannot see pg_description of this database). So, we reject attempts to
+ * comment on a database other than the current one. Someday this might be
+ * improved, but it would take a redesigned infrastructure.
*
- * When loading a dump, we may see a COMMENT ON DATABASE for the old name
- * of the database. Erroring out would prevent pg_restore from
- * completing (which is really pg_restore's fault, but for now we will
- * work around the problem here). Consensus is that the best fix is
- * to treat wrong database name as a WARNING not an ERROR.
+ * When loading a dump, we may see a COMMENT ON DATABASE for the old name of
+ * the database. Erroring out would prevent pg_restore from completing
+ * (which is really pg_restore's fault, but for now we will work around
+ * the problem here). Consensus is that the best fix is to treat wrong
+ * database name as a WARNING not an ERROR.
*/
/* First get the database OID */
/* Only allow comments on the current database */
if (oid != MyDatabaseId)
{
- ereport(WARNING, /* throw just a warning so pg_restore
- * doesn't fail */
+ ereport(WARNING, /* throw just a warning so pg_restore doesn't
+ * fail */
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("database comments may only be applied to the current database")));
return;
ForwardScanDirection)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple rules named \"%s\"", rulename),
- errhint("Specify a relation name as well as a rule name.")));
+ errmsg("there are multiple rules named \"%s\"", rulename),
+ errhint("Specify a relation name as well as a rule name.")));
heap_endscan(scanDesc);
heap_close(RewriteRelation, AccessShareLock);
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("rule \"%s\" for relation \"%s\" does not exist",
- rulename, RelationGetRelationName(relation))));
+ errmsg("rule \"%s\" for relation \"%s\" does not exist",
+ rulename, RelationGetRelationName(relation))));
Assert(reloid == ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class);
ruleoid = HeapTupleGetOid(tuple);
ReleaseSysCache(tuple);
RelationGetRelationName(relation));
/*
- * Fetch the trigger tuple from pg_trigger. There can be only one
- * because of the unique index.
+ * Fetch the trigger tuple from pg_trigger. There can be only one because
+ * of the unique index.
*/
pg_trigger = heap_open(TriggerRelationId, AccessShareLock);
ScanKeyInit(&entry[0],
RelationGetRelationName(relation));
/*
- * Fetch the constraint tuple from pg_constraint. There may be more
- * than one match, because constraints are not required to have unique
- * names; if so, error out.
+ * Fetch the constraint tuple from pg_constraint. There may be more than
+ * one match, because constraints are not required to have unique names;
+ * if so, error out.
*/
pg_constraint = heap_open(ConstraintRelationId, AccessShareLock);
if (OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("table \"%s\" has multiple constraints named \"%s\"",
- RelationGetRelationName(relation), conName)));
+ errmsg("table \"%s\" has multiple constraints named \"%s\"",
+ RelationGetRelationName(relation), conName)));
conOid = HeapTupleGetOid(tuple);
}
}
if (!OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" for table \"%s\" does not exist",
- conName, RelationGetRelationName(relation))));
+ errmsg("constraint \"%s\" for table \"%s\" does not exist",
+ conName, RelationGetRelationName(relation))));
/* Call CreateComments() to create/drop the comments */
CreateComments(conOid, ConstraintRelationId, 0, comment);
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to comment on procedural language")));
+ errmsg("must be superuser to comment on procedural language")));
/* Call CreateComments() to create/drop the comments */
CreateComments(oid, LanguageRelationId, 0, comment);
* strings.
*/
loid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(strVal(node))));
+ CStringGetDatum(strVal(node))));
break;
default:
elog(ERROR, "unrecognized node type: %d",
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.22 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.23 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
to_encoding_name)));
/*
- * Check the existence of the conversion function. Function name could
- * be a qualified name.
+ * Check the existence of the conversion function. Function name could be
+ * a qualified name.
*/
funcoid = LookupFuncName(func_name, sizeof(funcargs) / sizeof(Oid),
funcargs, false);
NameListToString(func_name));
/*
- * All seem ok, go ahead (possible failure would be a duplicate
- * conversion name)
+ * All seem ok, go ahead (possible failure would be a duplicate conversion
+ * name)
*/
ConversionCreate(conversion_name, namespaceId, GetUserId(),
from_encoding, to_encoding, funcoid, stmt->def);
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("conversion \"%s\" already exists in schema \"%s\"",
- newname, get_namespace_name(namespaceOid))));
+ errmsg("conversion \"%s\" already exists in schema \"%s\"",
+ newname, get_namespace_name(namespaceOid))));
/* must be owner */
- if (!pg_conversion_ownercheck(conversionOid,GetUserId()))
+ if (!pg_conversion_ownercheck(conversionOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
NameListToString(name));
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_conversion_ownercheck(HeapTupleGetOid(tup),GetUserId()))
+ if (!pg_conversion_ownercheck(HeapTupleGetOid(tup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
NameListToString(name));
}
/*
- * Modify the owner --- okay to scribble on tup because it's a
- * copy
+ * Modify the owner --- okay to scribble on tup because it's a copy
*/
convForm->conowner = newOwnerId;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.252 2005/10/03 23:43:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.253 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool fe_eof; /* true if detected end of copy data */
EolType eol_type; /* EOL type of input */
int client_encoding; /* remote side's character encoding */
- bool need_transcoding; /* client encoding diff from server? */
+ bool need_transcoding; /* client encoding diff from server? */
bool client_only_encoding; /* encoding not valid on server? */
/* parameters from the COPY command */
bool csv_mode; /* Comma Separated Value format? */
bool header_line; /* CSV header line? */
char *null_print; /* NULL marker string (server encoding!) */
- int null_print_len; /* length of same */
+ int null_print_len; /* length of same */
char *delim; /* column delimiter (must be 1 byte) */
char *quote; /* CSV quote char (must be 1 byte) */
char *escape; /* CSV escape char (must be 1 byte) */
- List *force_quote_atts; /* integer list of attnums to FQ */
- List *force_notnull_atts; /* integer list of attnums to FNN */
+ List *force_quote_atts; /* integer list of attnums to FQ */
+ List *force_notnull_atts; /* integer list of attnums to FNN */
/* these are just for error messages, see copy_in_error_callback */
const char *cur_relname; /* table name for error messages */
/*
* These variables are used to reduce overhead in textual COPY FROM.
*
- * attribute_buf holds the separated, de-escaped text for each field of
- * the current line. The CopyReadAttributes functions return arrays of
+ * attribute_buf holds the separated, de-escaped text for each field of the
+ * current line. The CopyReadAttributes functions return arrays of
* pointers into this buffer. We avoid palloc/pfree overhead by re-using
* the buffer on each cycle.
*/
StringInfoData attribute_buf;
/*
- * Similarly, line_buf holds the whole input line being processed.
- * The input cycle is first to read the whole line into line_buf,
- * convert it to server encoding there, and then extract the individual
- * attribute fields into attribute_buf. line_buf is preserved unmodified
- * so that we can display it in error messages if appropriate.
+ * Similarly, line_buf holds the whole input line being processed. The
+ * input cycle is first to read the whole line into line_buf, convert it
+ * to server encoding there, and then extract the individual attribute
+ * fields into attribute_buf. line_buf is preserved unmodified so that we
+ * can display it in error messages if appropriate.
*/
StringInfoData line_buf;
- bool line_buf_converted; /* converted to server encoding? */
+ bool line_buf_converted; /* converted to server encoding? */
/*
* Finally, raw_buf holds raw data read from the data source (file or
- * client connection). CopyReadLine parses this data sufficiently to
+ * client connection). CopyReadLine parses this data sufficiently to
* locate line boundaries, then transfers the data to line_buf and
* converts it. Note: we guarantee that there is a \0 at
* raw_buf[raw_buf_len].
static bool CopyReadLine(CopyState cstate);
static bool CopyReadLineText(CopyState cstate);
static bool CopyReadLineCSV(CopyState cstate);
-static int CopyReadAttributesText(CopyState cstate, int maxfields,
- char **fieldvals);
-static int CopyReadAttributesCSV(CopyState cstate, int maxfields,
- char **fieldvals);
+static int CopyReadAttributesText(CopyState cstate, int maxfields,
+ char **fieldvals);
+static int CopyReadAttributesCSV(CopyState cstate, int maxfields,
+ char **fieldvals);
static Datum CopyReadBinaryAttribute(CopyState cstate,
- int column_no, FmgrInfo *flinfo,
- Oid typioparam, int32 typmod,
- bool *isnull);
+ int column_no, FmgrInfo *flinfo,
+ Oid typioparam, int32 typmod,
+ bool *isnull);
static void CopyAttributeOutText(CopyState cstate, char *server_string);
static void CopyAttributeOutCSV(CopyState cstate, char *server_string,
- bool use_quote);
+ bool use_quote);
static List *CopyGetAttnums(Relation rel, List *attnamelist);
static char *limit_printout_length(const char *str);
static void CopySendString(CopyState cstate, const char *str);
static void CopySendChar(CopyState cstate, char c);
static void CopySendEndOfRow(CopyState cstate);
-static int CopyGetData(CopyState cstate, void *databuf,
- int minread, int maxread);
+static int CopyGetData(CopyState cstate, void *databuf,
+ int minread, int maxread);
static void CopySendInt32(CopyState cstate, int32 val);
static bool CopyGetInt32(CopyState cstate, int32 *val);
static void CopySendInt16(CopyState cstate, int16 val);
if (cstate->binary)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY BINARY is not supported to stdout or from stdin")));
+ errmsg("COPY BINARY is not supported to stdout or from stdin")));
pq_putemptymessage('H');
/* grottiness needed for old COPY OUT protocol */
pq_startcopyout();
if (cstate->binary)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY BINARY is not supported to stdout or from stdin")));
+ errmsg("COPY BINARY is not supported to stdout or from stdin")));
pq_putemptymessage('B');
/* grottiness needed for old COPY OUT protocol */
pq_startcopyout();
if (cstate->binary)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY BINARY is not supported to stdout or from stdin")));
+ errmsg("COPY BINARY is not supported to stdout or from stdin")));
pq_putemptymessage('G');
cstate->copy_dest = COPY_OLD_FE;
}
if (cstate->binary)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY BINARY is not supported to stdout or from stdin")));
+ errmsg("COPY BINARY is not supported to stdout or from stdin")));
pq_putemptymessage('D');
cstate->copy_dest = COPY_OLD_FE;
}
* CopyGetData reads data from the source (file or frontend)
*
* We attempt to read at least minread, and at most maxread, bytes from
- * the source. The actual number of bytes read is returned; if this is
+ * the source. The actual number of bytes read is returned; if this is
* less than minread, EOF was detected.
*
* Note: when copying from the frontend, we expect a proper EOF mark per
static int
CopyGetData(CopyState cstate, void *databuf, int minread, int maxread)
{
- int bytesread = 0;
+ int bytesread = 0;
switch (cstate->copy_dest)
{
errmsg("could not read from COPY file: %m")));
break;
case COPY_OLD_FE:
+
/*
* We cannot read more than minread bytes (which in practice is 1)
* because old protocol doesn't have any clear way of separating
- * the COPY stream from following data. This is slow, but not
- * any slower than the code path was originally, and we don't
- * care much anymore about the performance of old protocol.
+ * the COPY stream from following data. This is slow, but not any
+ * slower than the code path was originally, and we don't care
+ * much anymore about the performance of old protocol.
*/
if (pq_getbytes((char *) databuf, minread))
{
if (mtype == EOF)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
if (pq_getmessage(cstate->fe_msgbuf, 0))
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
switch (mtype)
{
case 'd': /* CopyData */
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("COPY from stdin failed: %s",
- pq_getmsgstring(cstate->fe_msgbuf))));
+ pq_getmsgstring(cstate->fe_msgbuf))));
break;
case 'H': /* Flush */
case 'S': /* Sync */
/*
- * Ignore Flush/Sync for the convenience of
- * client libraries (such as libpq) that may
- * send those without noticing that the
- * command they just sent was COPY.
+ * Ignore Flush/Sync for the convenience of client
+ * libraries (such as libpq) that may send those
+ * without noticing that the command they just
+ * sent was COPY.
*/
goto readmessage;
default:
static bool
CopyLoadRawBuf(CopyState cstate)
{
- int nbytes;
- int inbytes;
+ int nbytes;
+ int inbytes;
if (cstate->raw_buf_index < cstate->raw_buf_len)
{
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("COPY delimiter must be a single character")));
- /* Check header */
+ /* Check header */
if (!cstate->csv_mode && cstate->header_line)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
if (force_quote != NIL && is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force quote only available using COPY TO")));
+ errmsg("COPY force quote only available using COPY TO")));
/* Check force_notnull */
if (!cstate->csv_mode && force_notnull != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force not null available only in CSV mode")));
+ errmsg("COPY force not null available only in CSV mode")));
if (force_notnull != NIL && !is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force not null only available using COPY FROM")));
+ errmsg("COPY force not null only available using COPY FROM")));
/* Don't allow the delimiter to appear in the null string. */
if (strchr(cstate->null_print, cstate->delim[0]) != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY delimiter must not appear in the NULL specification")));
+ errmsg("COPY delimiter must not appear in the NULL specification")));
/* Don't allow the CSV quote char to appear in the null string. */
if (cstate->csv_mode &&
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a file"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
/* Don't allow COPY w/ OIDs to or from a table without them */
if (cstate->oids && !cstate->rel->rd_rel->relhasoids)
if (!list_member_int(cstate->attnumlist, attnum))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
- NameStr(attr[attnum - 1]->attname))));
+ errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
+ NameStr(attr[attnum - 1]->attname))));
}
}
if (!list_member_int(cstate->attnumlist, attnum))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("FORCE NOT NULL column \"%s\" not referenced by COPY",
- NameStr(attr[attnum - 1]->attname))));
+ errmsg("FORCE NOT NULL column \"%s\" not referenced by COPY",
+ NameStr(attr[attnum - 1]->attname))));
}
}
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy to non-table relation \"%s\"",
- RelationGetRelationName(cstate->rel))));
+ errmsg("cannot copy to non-table relation \"%s\"",
+ RelationGetRelationName(cstate->rel))));
}
if (pipe)
{
if (cstate->copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for reading: %m",
- filename)));
+ errmsg("could not open file \"%s\" for reading: %m",
+ filename)));
fstat(fileno(cstate->copy_file), &st);
if (S_ISDIR(st.st_mode))
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy from non-table relation \"%s\"",
- RelationGetRelationName(cstate->rel))));
+ errmsg("cannot copy from non-table relation \"%s\"",
+ RelationGetRelationName(cstate->rel))));
}
if (pipe)
{
struct stat st;
/*
- * Prevent write to relative path ... too easy to shoot
- * oneself in the foot by overwriting a database file ...
+ * Prevent write to relative path ... too easy to shoot oneself in
+ * the foot by overwriting a database file ...
*/
if (!is_absolute_path(filename))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("relative path not allowed for COPY to file")));
+ errmsg("relative path not allowed for COPY to file")));
oumask = umask((mode_t) 022);
cstate->copy_file = AllocateFile(filename, PG_BINARY_W);
if (cstate->copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for writing: %m",
- filename)));
+ errmsg("could not open file \"%s\" for writing: %m",
+ filename)));
fstat(fileno(cstate->copy_file), &st);
if (S_ISDIR(st.st_mode))
}
/*
- * Close the relation. If reading, we can release the AccessShareLock
- * we got; if writing, we should hold the lock until end of
- * transaction to ensure that updates will be committed before lock is
- * released.
+ * Close the relation. If reading, we can release the AccessShareLock we
+ * got; if writing, we should hold the lock until end of transaction to
+ * ensure that updates will be committed before lock is released.
*/
heap_close(cstate->rel, (is_from ? NoLock : AccessShareLock));
{
/*
* Make sure we turn off old-style COPY OUT mode upon error. It is
- * okay to do this in all cases, since it does nothing if the mode
- * is not on.
+ * okay to do this in all cases, since it does nothing if the mode is
+ * not on.
*/
pq_endcopyout(true);
PG_RE_THROW();
attr = tupDesc->attrs;
num_phys_attrs = tupDesc->natts;
attr_count = list_length(cstate->attnumlist);
- null_print_client = cstate->null_print; /* default */
+ null_print_client = cstate->null_print; /* default */
/* Get info about the columns we need to process. */
out_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo));
/*
* Create a temporary memory context that we can reset once per row to
- * recover palloc'd memory. This avoids any problems with leaks
- * inside datatype output routines, and should be faster than retail
- * pfree's anyway. (We don't need a whole econtext as CopyFrom does.)
+ * recover palloc'd memory. This avoids any problems with leaks inside
+ * datatype output routines, and should be faster than retail pfree's
+ * anyway. (We don't need a whole econtext as CopyFrom does.)
*/
mycontext = AllocSetContextCreate(CurrentMemoryContext,
"COPY TO",
/* if a header has been requested send the line */
if (cstate->header_line)
{
- bool hdr_delim = false;
-
+ bool hdr_delim = false;
+
foreach(cur, cstate->attnumlist)
{
int attnum = lfirst_int(cur);
- char *colname;
+ char *colname;
if (hdr_delim)
CopySendChar(cstate, cstate->delim[0]);
if (cstate->oids)
{
string = DatumGetCString(DirectFunctionCall1(oidout,
- ObjectIdGetDatum(HeapTupleGetOid(tuple))));
+ ObjectIdGetDatum(HeapTupleGetOid(tuple))));
CopySendString(cstate, string);
need_delim = true;
}
if (cstate->cur_attname && cstate->cur_attval)
{
/* error is relevant to a particular column */
- char *attval;
+ char *attval;
attval = limit_printout_length(cstate->cur_attval);
errcontext("COPY %s, line %d, column %s: \"%s\"",
/* error is relevant to a particular line */
if (cstate->line_buf_converted || !cstate->need_transcoding)
{
- char *lineval;
+ char *lineval;
lineval = limit_printout_length(cstate->line_buf.data);
errcontext("COPY %s, line %d: \"%s\"",
else
{
/*
- * Here, the line buffer is still in a foreign encoding,
- * and indeed it's quite likely that the error is precisely
- * a failure to do encoding conversion (ie, bad data). We
- * dare not try to convert it, and at present there's no way
- * to regurgitate it without conversion. So we have to punt
- * and just report the line number.
+ * Here, the line buffer is still in a foreign encoding, and
+ * indeed it's quite likely that the error is precisely a
+ * failure to do encoding conversion (ie, bad data). We dare
+ * not try to convert it, and at present there's no way to
+ * regurgitate it without conversion. So we have to punt and
+ * just report the line number.
*/
errcontext("COPY %s, line %d",
cstate->cur_relname, cstate->cur_lineno);
/*
* We need a ResultRelInfo so we can use the regular executor's
- * index-entry-making machinery. (There used to be a huge amount of
- * code here that basically duplicated execUtils.c ...)
+ * index-entry-making machinery. (There used to be a huge amount of code
+ * here that basically duplicated execUtils.c ...)
*/
resultRelInfo = makeNode(ResultRelInfo);
resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
/*
* Pick up the required catalog information for each attribute in the
- * relation, including the input function, the element type (to pass
- * to the input function), and info about defaults and constraints.
- * (Which input function we use depends on text/binary format choice.)
+ * relation, including the input function, the element type (to pass to
+ * the input function), and info about defaults and constraints. (Which
+ * input function we use depends on text/binary format choice.)
*/
in_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo));
typioparams = (Oid *) palloc(num_phys_attrs * sizeof(Oid));
/* Fetch the input function and typioparam info */
if (cstate->binary)
getTypeBinaryInputInfo(attr[attnum - 1]->atttypid,
- &in_func_oid, &typioparams[attnum - 1]);
+ &in_func_oid, &typioparams[attnum - 1]);
else
getTypeInputInfo(attr[attnum - 1]->atttypid,
&in_func_oid, &typioparams[attnum - 1]);
Node *node;
/*
- * Easiest way to do this is to use parse_coerce.c to set up
- * an expression that checks the constraints. (At present,
- * the expression might contain a length-coercion-function
- * call and/or CoerceToDomain nodes.) The bottom of the
- * expression is a Param node so that we can fill in the
- * actual datum during the data input loop.
+ * Easiest way to do this is to use parse_coerce.c to set up an
+ * expression that checks the constraints. (At present, the
+ * expression might contain a length-coercion-function call and/or
+ * CoerceToDomain nodes.) The bottom of the expression is a Param
+ * node so that we can fill in the actual datum during the data
+ * input loop.
*/
prm = makeNode(Param);
prm->paramkind = PARAM_EXEC;
AfterTriggerBeginQuery();
/*
- * Check BEFORE STATEMENT insertion triggers. It's debateable whether
- * we should do this for COPY, since it's not really an "INSERT"
- * statement as such. However, executing these triggers maintains
- * consistency with the EACH ROW triggers that we already fire on
- * COPY.
+ * Check BEFORE STATEMENT insertion triggers. It's debateable whether we
+ * should do this for COPY, since it's not really an "INSERT" statement as
+ * such. However, executing these triggers maintains consistency with the
+ * EACH ROW triggers that we already fire on COPY.
*/
ExecBSInsertTriggers(estate, resultRelInfo);
if ((tmp >> 16) != 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unrecognized critical flags in COPY file header")));
+ errmsg("unrecognized critical flags in COPY file header")));
/* Header extension length */
if (!CopyGetInt32(cstate, &tmp) ||
tmp < 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (missing length)")));
+ errmsg("invalid COPY file header (missing length)")));
/* Skip extension header, if present */
while (tmp-- > 0)
{
if (CopyGetData(cstate, readSig, 1, 1) != 1)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (wrong length)")));
+ errmsg("invalid COPY file header (wrong length)")));
}
}
/*
* EOF at start of line means we're done. If we see EOF after
- * some characters, we act as though it was newline followed
- * by EOF, ie, process the line and then exit loop on next
- * iteration.
+ * some characters, we act as though it was newline followed by
+ * EOF, ie, process the line and then exit loop on next iteration.
*/
if (done && cstate->line_buf.len == 0)
break;
cstate->cur_attname = "oid";
cstate->cur_attval = string;
loaded_oid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(string)));
+ CStringGetDatum(string)));
if (loaded_oid == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
cstate->cur_attval = string;
values[m] = FunctionCall3(&in_functions[m],
CStringGetDatum(string),
- ObjectIdGetDatum(typioparams[m]),
- Int32GetDatum(attr[m]->atttypmod));
+ ObjectIdGetDatum(typioparams[m]),
+ Int32GetDatum(attr[m]->atttypmod));
nulls[m] = ' ';
cstate->cur_attname = NULL;
cstate->cur_attval = NULL;
}
/*
- * Now compute and insert any defaults available for the columns
- * not provided by the input data. Anything not processed here or
- * above will remain NULL.
+ * Now compute and insert any defaults available for the columns not
+ * provided by the input data. Anything not processed here or above
+ * will remain NULL.
*/
for (i = 0; i < num_defaults; i++)
{
prmdata->isnull = (nulls[i] == 'n');
/*
- * Execute the constraint expression. Allow the
- * expression to replace the value (consider e.g. a
- * timestamp precision restriction).
+ * Execute the constraint expression. Allow the expression to
+ * replace the value (consider e.g. a timestamp precision
+ * restriction).
*/
values[i] = ExecEvalExpr(exprstate, econtext,
&isnull, NULL);
/* BEFORE ROW INSERT Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
* server encoding.
*
* Result is true if read was terminated by EOF, false if terminated
- * by newline. The terminating newline or EOF marker is not included
+ * by newline. The terminating newline or EOF marker is not included
* in the final value of line_buf.
*/
static bool
{
/*
* Reached EOF. In protocol version 3, we should ignore anything
- * after \. up to the protocol end of copy data. (XXX maybe
- * better not to treat \. as special?)
+ * after \. up to the protocol end of copy data. (XXX maybe better
+ * not to treat \. as special?)
*/
if (cstate->copy_dest == COPY_NEW_FE)
{
- do {
+ do
+ {
cstate->raw_buf_index = cstate->raw_buf_len;
} while (CopyLoadRawBuf(cstate));
}
result = false;
/*
- * The objective of this loop is to transfer the entire next input
- * line into line_buf. Hence, we only care for detecting newlines
- * (\r and/or \n) and the end-of-copy marker (\.).
+ * The objective of this loop is to transfer the entire next input line
+ * into line_buf. Hence, we only care for detecting newlines (\r and/or
+ * \n) and the end-of-copy marker (\.).
*
* For backwards compatibility we allow backslashes to escape newline
- * characters. Backslashes other than the end marker get put into the
+ * characters. Backslashes other than the end marker get put into the
* line_buf, since CopyReadAttributesText does its own escape processing.
*
* These four characters, and only these four, are assumed the same in
* frontend and backend encodings.
*
- * For speed, we try to move data to line_buf in chunks rather than
- * one character at a time. raw_buf_ptr points to the next character
- * to examine; any characters from raw_buf_index to raw_buf_ptr have
- * been determined to be part of the line, but not yet transferred
- * to line_buf.
+ * For speed, we try to move data to line_buf in chunks rather than one
+ * character at a time. raw_buf_ptr points to the next character to
+ * examine; any characters from raw_buf_index to raw_buf_ptr have been
+ * determined to be part of the line, but not yet transferred to line_buf.
*
- * For a little extra speed within the loop, we copy raw_buf and
- * raw_buf_len into local variables.
+ * For a little extra speed within the loop, we copy raw_buf and raw_buf_len
+ * into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
for (;;)
{
- int prev_raw_ptr;
- char c;
+ int prev_raw_ptr;
+ char c;
/* Load more data if needed */
if (raw_buf_ptr >= copy_buf_len || need_data)
{
/*
- * Transfer any approved data to line_buf; must do this to
- * be sure there is some room in raw_buf.
+ * Transfer any approved data to line_buf; must do this to be sure
+ * there is some room in raw_buf.
*/
if (raw_buf_ptr > cstate->raw_buf_index)
{
appendBinaryStringInfo(&cstate->line_buf,
- cstate->raw_buf + cstate->raw_buf_index,
+ cstate->raw_buf + cstate->raw_buf_index,
raw_buf_ptr - cstate->raw_buf_index);
cstate->raw_buf_index = raw_buf_ptr;
}
+
/*
- * Try to read some more data. This will certainly reset
+ * Try to read some more data. This will certainly reset
* raw_buf_index to zero, and raw_buf_ptr must go with it.
*/
if (!CopyLoadRawBuf(cstate))
hit_eof = true;
raw_buf_ptr = 0;
copy_buf_len = cstate->raw_buf_len;
+
/*
* If we are completely out of data, break out of the loop,
* reporting EOF.
/*
* If need more data, go back to loop top to load it.
*
- * Note that if we are at EOF, c will wind up as '\0'
- * because of the guaranteed pad of raw_buf.
+ * Note that if we are at EOF, c will wind up as '\0' because of
+ * the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
if (c == '\n')
{
- raw_buf_ptr++; /* eat newline */
- cstate->eol_type = EOL_CRNL; /* in case not set yet */
+ raw_buf_ptr++; /* eat newline */
+ cstate->eol_type = EOL_CRNL; /* in case not set yet */
}
else
{
if (cstate->eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("literal carriage return found in data"),
+ errmsg("literal carriage return found in data"),
errhint("Use \"\\r\" to represent carriage return.")));
+
/*
- * if we got here, it is the first line and we didn't
- * find \n, so don't consume the peeked character
+ * if we got here, it is the first line and we didn't find
+ * \n, so don't consume the peeked character
*/
cstate->eol_type = EOL_CR;
}
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("literal carriage return found in data"),
- errhint("Use \"\\r\" to represent carriage return.")));
+ errhint("Use \"\\r\" to represent carriage return.")));
/* If reach here, we have found the line terminator */
break;
}
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("literal newline found in data"),
errhint("Use \"\\n\" to represent newline.")));
- cstate->eol_type = EOL_NL; /* in case not set yet */
+ cstate->eol_type = EOL_NL; /* in case not set yet */
/* If reach here, we have found the line terminator */
break;
}
}
/*
- * In non-CSV mode, backslash quotes the following character
- * even if it's a newline, so we always advance to next character
+ * In non-CSV mode, backslash quotes the following character even
+ * if it's a newline, so we always advance to next character
*/
c = copy_raw_buf[raw_buf_ptr++];
{
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
}
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
errmsg("end-of-copy marker does not match previous newline style")));
/*
- * Transfer only the data before the \. into line_buf,
- * then discard the data and the \. sequence.
+ * Transfer only the data before the \. into line_buf, then
+ * discard the data and the \. sequence.
*/
if (prev_raw_ptr > cstate->raw_buf_index)
appendBinaryStringInfo(&cstate->line_buf,
- cstate->raw_buf + cstate->raw_buf_index,
- prev_raw_ptr - cstate->raw_buf_index);
+ cstate->raw_buf + cstate->raw_buf_index,
+ prev_raw_ptr - cstate->raw_buf_index);
cstate->raw_buf_index = raw_buf_ptr;
result = true; /* report EOF */
break;
/*
* Do we need to be careful about trailing bytes of multibyte
- * characters? (See note above about client_only_encoding)
+ * characters? (See note above about client_only_encoding)
*
- * We assume here that pg_encoding_mblen only looks at the first
- * byte of the character!
+ * We assume here that pg_encoding_mblen only looks at the first byte of
+ * the character!
*/
if (cstate->client_only_encoding)
{
s[0] = c;
mblen = pg_encoding_mblen(cstate->client_encoding, s);
- if (raw_buf_ptr + (mblen-1) > copy_buf_len)
+ if (raw_buf_ptr + (mblen - 1) > copy_buf_len)
{
if (hit_eof)
{
result = true;
break;
}
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
- raw_buf_ptr += mblen-1;
+ raw_buf_ptr += mblen - 1;
}
} /* end of outer loop */
bool need_data;
bool hit_eof;
char s[2];
- bool in_quote = false, last_was_esc = false;
+ bool in_quote = false,
+ last_was_esc = false;
char quotec = cstate->quote[0];
char escapec = cstate->escape[0];
result = false;
/*
- * The objective of this loop is to transfer the entire next input
- * line into line_buf. Hence, we only care for detecting newlines
- * (\r and/or \n) and the end-of-copy marker (\.).
+ * The objective of this loop is to transfer the entire next input line
+ * into line_buf. Hence, we only care for detecting newlines (\r and/or
+ * \n) and the end-of-copy marker (\.).
*
- * In CSV mode, \r and \n inside a quoted field are just part of the
- * data value and are put in line_buf. We keep just enough state
- * to know if we are currently in a quoted field or not.
+ * In CSV mode, \r and \n inside a quoted field are just part of the data
+ * value and are put in line_buf. We keep just enough state to know if we
+ * are currently in a quoted field or not.
*
- * These four characters, and the CSV escape and quote characters,
- * are assumed the same in frontend and backend encodings.
+ * These four characters, and the CSV escape and quote characters, are
+ * assumed the same in frontend and backend encodings.
*
- * For speed, we try to move data to line_buf in chunks rather than
- * one character at a time. raw_buf_ptr points to the next character
- * to examine; any characters from raw_buf_index to raw_buf_ptr have
- * been determined to be part of the line, but not yet transferred
- * to line_buf.
+ * For speed, we try to move data to line_buf in chunks rather than one
+ * character at a time. raw_buf_ptr points to the next character to
+ * examine; any characters from raw_buf_index to raw_buf_ptr have been
+ * determined to be part of the line, but not yet transferred to line_buf.
*
- * For a little extra speed within the loop, we copy raw_buf and
- * raw_buf_len into local variables.
+ * For a little extra speed within the loop, we copy raw_buf and raw_buf_len
+ * into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
for (;;)
{
- int prev_raw_ptr;
- char c;
+ int prev_raw_ptr;
+ char c;
/* Load more data if needed */
if (raw_buf_ptr >= copy_buf_len || need_data)
{
/*
- * Transfer any approved data to line_buf; must do this to
- * be sure there is some room in raw_buf.
+ * Transfer any approved data to line_buf; must do this to be sure
+ * there is some room in raw_buf.
*/
if (raw_buf_ptr > cstate->raw_buf_index)
{
appendBinaryStringInfo(&cstate->line_buf,
- cstate->raw_buf + cstate->raw_buf_index,
+ cstate->raw_buf + cstate->raw_buf_index,
raw_buf_ptr - cstate->raw_buf_index);
cstate->raw_buf_index = raw_buf_ptr;
}
+
/*
- * Try to read some more data. This will certainly reset
+ * Try to read some more data. This will certainly reset
* raw_buf_index to zero, and raw_buf_ptr must go with it.
*/
if (!CopyLoadRawBuf(cstate))
hit_eof = true;
raw_buf_ptr = 0;
copy_buf_len = cstate->raw_buf_len;
+
/*
* If we are completely out of data, break out of the loop,
* reporting EOF.
/*
* If character is '\\' or '\r', we may need to look ahead below.
- * Force fetch of the next character if we don't already have it.
- * We need to do this before changing CSV state, in case one of
- * these characters is also the quote or escape character.
+ * Force fetch of the next character if we don't already have it. We
+ * need to do this before changing CSV state, in case one of these
+ * characters is also the quote or escape character.
*
- * Note: old-protocol does not like forced prefetch, but it's OK
- * here since we cannot validly be at EOF.
+ * Note: old-protocol does not like forced prefetch, but it's OK here
+ * since we cannot validly be at EOF.
*/
if (c == '\\' || c == '\r')
{
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
}
- /*
- * Dealing with quotes and escapes here is mildly tricky. If the
- * quote char is also the escape char, there's no problem - we
- * just use the char as a toggle. If they are different, we need
- * to ensure that we only take account of an escape inside a quoted
- * field and immediately preceding a quote char, and not the
- * second in a escape-escape sequence.
- */
+ /*
+ * Dealing with quotes and escapes here is mildly tricky. If the quote
+ * char is also the escape char, there's no problem - we just use the
+ * char as a toggle. If they are different, we need to ensure that we
+ * only take account of an escape inside a quoted field and
+ * immediately preceding a quote char, and not the second in a
+ * escape-escape sequence.
+ */
if (in_quote && c == escapec)
- last_was_esc = ! last_was_esc;
- if (c == quotec && ! last_was_esc)
- in_quote = ! in_quote;
+ last_was_esc = !last_was_esc;
+ if (c == quotec && !last_was_esc)
+ in_quote = !in_quote;
if (c != escapec)
last_was_esc = false;
/*
- * Updating the line count for embedded CR and/or LF chars is
- * necessarily a little fragile - this test is probably about
- * the best we can do. (XXX it's arguable whether we should
- * do this at all --- is cur_lineno a physical or logical count?)
- */
+ * Updating the line count for embedded CR and/or LF chars is
+ * necessarily a little fragile - this test is probably about the best
+ * we can do. (XXX it's arguable whether we should do this at all ---
+ * is cur_lineno a physical or logical count?)
+ */
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
cstate->cur_lineno++;
/*
* If need more data, go back to loop top to load it.
*
- * Note that if we are at EOF, c will wind up as '\0'
- * because of the guaranteed pad of raw_buf.
+ * Note that if we are at EOF, c will wind up as '\0' because of
+ * the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
if (c == '\n')
{
- raw_buf_ptr++; /* eat newline */
- cstate->eol_type = EOL_CRNL; /* in case not set yet */
+ raw_buf_ptr++; /* eat newline */
+ cstate->eol_type = EOL_CRNL; /* in case not set yet */
}
else
{
if (cstate->eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unquoted carriage return found in data"),
+ errmsg("unquoted carriage return found in data"),
errhint("Use quoted CSV field to represent carriage return.")));
+
/*
- * if we got here, it is the first line and we didn't
- * find \n, so don't consume the peeked character
+ * if we got here, it is the first line and we didn't find
+ * \n, so don't consume the peeked character
*/
cstate->eol_type = EOL_CR;
}
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("unquoted newline found in data"),
- errhint("Use quoted CSV field to represent newline.")));
- cstate->eol_type = EOL_NL; /* in case not set yet */
+ errhint("Use quoted CSV field to represent newline.")));
+ cstate->eol_type = EOL_NL; /* in case not set yet */
/* If reach here, we have found the line terminator */
break;
}
*/
if (c == '\\' && cstate->line_buf.len == 0)
{
- char c2;
+ char c2;
/*
* If need more data, go back to loop top to load it.
}
/*
- * Note: we do not change c here since we aren't treating \
- * as escaping the next character.
+ * Note: we do not change c here since we aren't treating \ as
+ * escaping the next character.
*/
c2 = copy_raw_buf[raw_buf_ptr];
if (c2 == '.')
{
- raw_buf_ptr++; /* consume the '.' */
+ raw_buf_ptr++; /* consume the '.' */
/*
* Note: if we loop back for more data here, it does not
- * matter that the CSV state change checks are re-executed;
- * we will come back here with no important state changed.
+ * matter that the CSV state change checks are re-executed; we
+ * will come back here with no important state changed.
*/
if (cstate->eol_type == EOL_CRNL)
{
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
}
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
errmsg("end-of-copy marker does not match previous newline style")));
/*
- * Transfer only the data before the \. into line_buf,
- * then discard the data and the \. sequence.
+ * Transfer only the data before the \. into line_buf, then
+ * discard the data and the \. sequence.
*/
if (prev_raw_ptr > cstate->raw_buf_index)
appendBinaryStringInfo(&cstate->line_buf, cstate->raw_buf + cstate->raw_buf_index,
- prev_raw_ptr - cstate->raw_buf_index);
+ prev_raw_ptr - cstate->raw_buf_index);
cstate->raw_buf_index = raw_buf_ptr;
result = true; /* report EOF */
break;
/*
* Do we need to be careful about trailing bytes of multibyte
- * characters? (See note above about client_only_encoding)
+ * characters? (See note above about client_only_encoding)
*
- * We assume here that pg_encoding_mblen only looks at the first
- * byte of the character!
+ * We assume here that pg_encoding_mblen only looks at the first byte of
+ * the character!
*/
if (cstate->client_only_encoding)
{
s[0] = c;
mblen = pg_encoding_mblen(cstate->client_encoding, s);
- if (raw_buf_ptr + (mblen-1) > copy_buf_len)
+ if (raw_buf_ptr + (mblen - 1) > copy_buf_len)
{
if (hit_eof)
{
result = true;
break;
}
- raw_buf_ptr = prev_raw_ptr; /* undo fetch */
+ raw_buf_ptr = prev_raw_ptr; /* undo fetch */
need_data = true;
continue;
}
- raw_buf_ptr += mblen-1;
+ raw_buf_ptr += mblen - 1;
}
} /* end of outer loop */
* null_print is the null marker string. Note that this is compared to
* the pre-de-escaped input string.
*
- * The return value is the number of fields actually read. (We error out
+ * The return value is the number of fields actually read. (We error out
* if this would exceed maxfields, which is the length of fieldvals[].)
*/
static int
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to
- * do it this way because enlarging attribute_buf mid-stream would
- * invalidate pointers already stored into fieldvals[].
+ * then transfer data without any checks for enough space. We need to do
+ * it this way because enlarging attribute_buf mid-stream would invalidate
+ * pointers already stored into fieldvals[].
*/
if (cstate->attribute_buf.maxlen <= cstate->line_buf.len)
enlargeStringInfo(&cstate->attribute_buf, cstate->line_buf.len);
/* Scan data for field */
for (;;)
{
- char c;
+ char c;
end_ptr = cur_ptr;
if (cur_ptr >= line_end_ptr)
case '5':
case '6':
case '7':
- {
- /* handle \013 */
- int val;
-
- val = OCTVALUE(c);
- if (cur_ptr < line_end_ptr)
{
- c = *cur_ptr;
- if (ISOCTAL(c))
+ /* handle \013 */
+ int val;
+
+ val = OCTVALUE(c);
+ if (cur_ptr < line_end_ptr)
{
- cur_ptr++;
- val = (val << 3) + OCTVALUE(c);
- if (cur_ptr < line_end_ptr)
+ c = *cur_ptr;
+ if (ISOCTAL(c))
{
- c = *cur_ptr;
- if (ISOCTAL(c))
+ cur_ptr++;
+ val = (val << 3) + OCTVALUE(c);
+ if (cur_ptr < line_end_ptr)
{
- cur_ptr++;
- val = (val << 3) + OCTVALUE(c);
+ c = *cur_ptr;
+ if (ISOCTAL(c))
+ {
+ cur_ptr++;
+ val = (val << 3) + OCTVALUE(c);
+ }
}
}
}
+ c = val & 0377;
}
- c = val & 0377;
- }
- break;
+ break;
case 'x':
/* Handle \x3F */
if (cur_ptr < line_end_ptr)
{
- char hexchar = *cur_ptr;
+ char hexchar = *cur_ptr;
if (isxdigit((unsigned char) hexchar))
{
- int val = GetDecimalFromHex(hexchar);
+ int val = GetDecimalFromHex(hexchar);
cur_ptr++;
if (cur_ptr < line_end_ptr)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to
- * do it this way because enlarging attribute_buf mid-stream would
- * invalidate pointers already stored into fieldvals[].
+ * then transfer data without any checks for enough space. We need to do
+ * it this way because enlarging attribute_buf mid-stream would invalidate
+ * pointers already stored into fieldvals[].
*/
if (cstate->attribute_buf.maxlen <= cstate->line_buf.len)
enlargeStringInfo(&cstate->attribute_buf, cstate->line_buf.len);
/* Scan data for field */
for (;;)
{
- char c;
+ char c;
end_ptr = cur_ptr;
if (cur_ptr >= line_end_ptr)
*/
if (cur_ptr < line_end_ptr)
{
- char nextc = *cur_ptr;
+ char nextc = *cur_ptr;
if (nextc == escapec || nextc == quotec)
{
}
}
}
+
/*
* end of quoted field. Must do this test after testing for escape
* in case quote char and escape char are the same (which is the
CopySendChar(cstate, '\\');
/*
- * We can skip pg_encoding_mblen() overhead when encoding
- * is safe, because in valid backend encodings, extra
- * bytes of a multibyte character never look like ASCII.
+ * We can skip pg_encoding_mblen() overhead when encoding is
+ * safe, because in valid backend encodings, extra bytes of a
+ * multibyte character never look like ASCII.
*/
if (cstate->client_only_encoding)
mblen = pg_encoding_mblen(cstate->client_encoding, string);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.172 2005/10/10 20:02:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.173 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
datdba = GetUserId();
/*
- * To create a database, must have createdb privilege and must be able
- * to become the target role (this does not imply that the target role
- * itself must have createdb privilege). The latter provision guards
- * against "giveaway" attacks. Note that a superuser will always have
- * both of these privileges a fortiori.
+ * To create a database, must have createdb privilege and must be able to
+ * become the target role (this does not imply that the target role itself
+ * must have createdb privilege). The latter provision guards against
+ * "giveaway" attacks. Note that a superuser will always have both of
+ * these privileges a fortiori.
*/
if (!have_createdb_privilege())
ereport(ERROR,
/*
* Check for db name conflict. There is a race condition here, since
* another backend could create the same DB name before we commit.
- * However, holding an exclusive lock on pg_database for the whole
- * time we are copying the source database doesn't seem like a good
- * idea, so accept possibility of race to create. We will check again
- * after we grab the exclusive lock.
+ * However, holding an exclusive lock on pg_database for the whole time we
+ * are copying the source database doesn't seem like a good idea, so
+ * accept possibility of race to create. We will check again after we
+ * grab the exclusive lock.
*/
if (get_db_info(dbname, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL))
&src_vacuumxid, &src_frozenxid, &src_deftablespace))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("template database \"%s\" does not exist", dbtemplate)));
+ errmsg("template database \"%s\" does not exist", dbtemplate)));
/*
* Permission check: to copy a DB that's not marked datistemplate, you
if (DatabaseHasActiveBackends(src_dboid, true))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("source database \"%s\" is being accessed by other users",
- dbtemplate)));
+ errmsg("source database \"%s\" is being accessed by other users",
+ dbtemplate)));
/* If encoding is defaulted, use source's encoding */
if (encoding < 0)
/*
* If we are trying to change the default tablespace of the template,
* we require that the template not have any files in the new default
- * tablespace. This is necessary because otherwise the copied
+ * tablespace. This is necessary because otherwise the copied
* database would contain pg_class rows that refer to its default
* tablespace both explicitly (by OID) and implicitly (as zero), which
* would cause problems. For example another CREATE DATABASE using
/*
* Normally we mark the new database with the same datvacuumxid and
- * datfrozenxid as the source. However, if the source is not allowing
+ * datfrozenxid as the source. However, if the source is not allowing
* connections then we assume it is fully frozen, and we can set the
* current transaction ID as the xid limits. This avoids immediately
* starting to generate warnings after cloning template0.
src_vacuumxid = src_frozenxid = GetCurrentTransactionId();
/*
- * Preassign OID for pg_database tuple, so that we can compute db
- * path. We have to open pg_database to do this, but we don't want
- * to take ExclusiveLock yet, so just do it and close again.
+ * Preassign OID for pg_database tuple, so that we can compute db path.
+ * We have to open pg_database to do this, but we don't want to take
+ * ExclusiveLock yet, so just do it and close again.
*/
pg_database_rel = heap_open(DatabaseRelationId, AccessShareLock);
dboid = GetNewOid(pg_database_rel);
/*
* Force dirty buffers out to disk, to ensure source database is
- * up-to-date for the copy. (We really only need to flush buffers for
- * the source database, but bufmgr.c provides no API for that.)
+ * up-to-date for the copy. (We really only need to flush buffers for the
+ * source database, but bufmgr.c provides no API for that.)
*/
BufferSync();
/*
- * Once we start copying subdirectories, we need to be able to clean
- * 'em up if we fail. Establish a TRY block to make sure this happens.
- * (This is not a 100% solution, because of the possibility of failure
- * during transaction commit after we leave this routine, but it should
- * handle most scenarios.)
+ * Once we start copying subdirectories, we need to be able to clean 'em
+ * up if we fail. Establish a TRY block to make sure this happens. (This
+ * is not a 100% solution, because of the possibility of failure during
+ * transaction commit after we leave this routine, but it should handle
+ * most scenarios.)
*/
PG_TRY();
{
/*
- * Iterate through all tablespaces of the template database,
- * and copy each one to the new database.
+ * Iterate through all tablespaces of the template database, and copy
+ * each one to the new database.
*/
rel = heap_open(TableSpaceRelationId, AccessShareLock);
scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
- HeapTupleSetOid(tuple, dboid); /* override heap_insert's OID
- * selection */
+ HeapTupleSetOid(tuple, dboid); /* override heap_insert's OID
+ * selection */
simple_heap_insert(pg_database_rel, tuple);
/*
* We force a checkpoint before committing. This effectively means
* that committed XLOG_DBASE_CREATE operations will never need to be
- * replayed (at least not in ordinary crash recovery; we still have
- * to make the XLOG entry for the benefit of PITR operations).
- * This avoids two nasty scenarios:
+ * replayed (at least not in ordinary crash recovery; we still have to
+ * make the XLOG entry for the benefit of PITR operations). This
+ * avoids two nasty scenarios:
*
* #1: When PITR is off, we don't XLOG the contents of newly created
* indexes; therefore the drop-and-recreate-whole-directory behavior
* of DBASE_CREATE replay would lose such indexes.
*
* #2: Since we have to recopy the source database during DBASE_CREATE
- * replay, we run the risk of copying changes in it that were committed
- * after the original CREATE DATABASE command but before the system
- * crash that led to the replay. This is at least unexpected and at
- * worst could lead to inconsistencies, eg duplicate table names.
+ * replay, we run the risk of copying changes in it that were
+ * committed after the original CREATE DATABASE command but before the
+ * system crash that led to the replay. This is at least unexpected
+ * and at worst could lead to inconsistencies, eg duplicate table
+ * names.
*
* (Both of these were real bugs in releases 8.0 through 8.0.3.)
*
- * In PITR replay, the first of these isn't an issue, and the second
- * is only a risk if the CREATE DATABASE and subsequent template
- * database change both occur while a base backup is being taken.
- * There doesn't seem to be much we can do about that except document
- * it as a limitation.
+ * In PITR replay, the first of these isn't an issue, and the second is
+ * only a risk if the CREATE DATABASE and subsequent template database
+ * change both occur while a base backup is being taken. There doesn't
+ * seem to be much we can do about that except document it as a
+ * limitation.
*
- * Perhaps if we ever implement CREATE DATABASE in a less cheesy
- * way, we can avoid this.
+ * Perhaps if we ever implement CREATE DATABASE in a less cheesy way, we
+ * can avoid this.
*/
RequestCheckpoint(true, false);
errmsg("cannot drop the currently open database")));
/*
- * Obtain exclusive lock on pg_database. We need this to ensure that
- * no new backend starts up in the target database while we are
- * deleting it. (Actually, a new backend might still manage to start
- * up, because it isn't able to lock pg_database while starting. But
- * it will detect its error in ReverifyMyDatabase and shut down before
- * any serious damage is done. See postinit.c.)
+ * Obtain exclusive lock on pg_database. We need this to ensure that no
+ * new backend starts up in the target database while we are deleting it.
+ * (Actually, a new backend might still manage to start up, because it
+ * isn't able to lock pg_database while starting. But it will detect its
+ * error in ReverifyMyDatabase and shut down before any serious damage is
+ * done. See postinit.c.)
*
- * An ExclusiveLock, rather than AccessExclusiveLock, is sufficient
- * since ReverifyMyDatabase takes RowShareLock. This allows ordinary
- * readers of pg_database to proceed in parallel.
+ * An ExclusiveLock, rather than AccessExclusiveLock, is sufficient since
+ * ReverifyMyDatabase takes RowShareLock. This allows ordinary readers of
+ * pg_database to proceed in parallel.
*/
pgdbrel = heap_open(DatabaseRelationId, ExclusiveLock);
/*
* Disallow dropping a DB that is marked istemplate. This is just to
- * prevent people from accidentally dropping template0 or template1;
- * they can do so if they're really determined ...
+ * prevent people from accidentally dropping template0 or template1; they
+ * can do so if they're really determined ...
*/
if (db_istemplate)
ereport(ERROR,
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- dbname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ dbname)));
/*
* Find the database's tuple by OID (should be unique).
if (!HeapTupleIsValid(tup))
{
/*
- * This error should never come up since the existence of the
- * database is checked earlier
+ * This error should never come up since the existence of the database
+ * is checked earlier
*/
elog(ERROR, "database \"%s\" doesn't exist despite earlier reports to the contrary",
dbname);
/*
* Delete any comments associated with the database
*
- * NOTE: this is probably dead code since any such comments should have
- * been in that database, not mine.
+ * NOTE: this is probably dead code since any such comments should have been
+ * in that database, not mine.
*/
DeleteComments(db_id, DatabaseRelationId, 0);
dropDatabaseDependencies(db_id);
/*
- * Drop pages for this database that are in the shared buffer cache.
- * This is important to ensure that no remaining backend tries to
- * write out a dirty buffer to the dead database later...
+ * Drop pages for this database that are in the shared buffer cache. This
+ * is important to ensure that no remaining backend tries to write out a
+ * dirty buffer to the dead database later...
*/
DropBuffers(db_id);
key2;
/*
- * Obtain ExclusiveLock so that no new session gets started
- * while the rename is in progress.
+ * Obtain ExclusiveLock so that no new session gets started while the
+ * rename is in progress.
*/
rel = heap_open(DatabaseRelationId, ExclusiveLock);
errmsg("database \"%s\" does not exist", oldname)));
/*
- * XXX Client applications probably store the current database
- * somewhere, so renaming it could cause confusion. On the other
- * hand, there may not be an actual problem besides a little
- * confusion, so think about this and decide.
+ * XXX Client applications probably store the current database somewhere,
+ * so renaming it could cause confusion. On the other hand, there may not
+ * be an actual problem besides a little confusion, so think about this
+ * and decide.
*/
if (HeapTupleGetOid(tup) == MyDatabaseId)
ereport(ERROR,
if (DatabaseHasActiveBackends(HeapTupleGetOid(tup), false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- oldname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ oldname)));
/* make sure the new name doesn't exist */
ScanKeyInit(&key2,
connlimit = intVal(dconnlimit->arg);
/*
- * We don't need ExclusiveLock since we aren't updating the
- * flat file.
+ * We don't need ExclusiveLock since we aren't updating the flat file.
*/
rel = heap_open(DatabaseRelationId, RowExclusiveLock);
ScanKeyInit(&scankey,
heap_close(rel, NoLock);
/*
- * We don't bother updating the flat file since the existing options
- * for ALTER DATABASE don't affect it.
+ * We don't bother updating the flat file since the existing options for
+ * ALTER DATABASE don't affect it.
*/
}
valuestr = flatten_set_variable_args(stmt->variable, stmt->value);
/*
- * We don't need ExclusiveLock since we aren't updating the
- * flat file.
+ * We don't need ExclusiveLock since we aren't updating the flat file.
*/
rel = heap_open(DatabaseRelationId, RowExclusiveLock);
ScanKeyInit(&scankey,
heap_close(rel, NoLock);
/*
- * We don't bother updating the flat file since ALTER DATABASE SET
- * doesn't affect it.
+ * We don't bother updating the flat file since ALTER DATABASE SET doesn't
+ * affect it.
*/
}
Form_pg_database datForm;
/*
- * We don't need ExclusiveLock since we aren't updating the
- * flat file.
+ * We don't need ExclusiveLock since we aren't updating the flat file.
*/
rel = heap_open(DatabaseRelationId, RowExclusiveLock);
ScanKeyInit(&scankey,
HeapTuple newtuple;
/* Otherwise, must be owner of the existing object */
- if (!pg_database_ownercheck(HeapTupleGetOid(tuple),GetUserId()))
+ if (!pg_database_ownercheck(HeapTupleGetOid(tuple), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
dbname);
check_is_member_of_role(GetUserId(), newOwnerId);
/*
- * must have createdb rights
+ * must have createdb rights
*
- * NOTE: This is different from other alter-owner checks in
- * that the current user is checked for createdb privileges
- * instead of the destination owner. This is consistent
- * with the CREATE case for databases. Because superusers
- * will always have this right, we need no special case for them.
+ * NOTE: This is different from other alter-owner checks in that the
+ * current user is checked for createdb privileges instead of the
+ * destination owner. This is consistent with the CREATE case for
+ * databases. Because superusers will always have this right, we need
+ * no special case for them.
*/
if (!have_createdb_privilege())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to change owner of database")));
+ errmsg("permission denied to change owner of database")));
memset(repl_null, ' ', sizeof(repl_null));
memset(repl_repl, ' ', sizeof(repl_repl));
dst_path = GetDatabasePath(xlrec->db_id, xlrec->tablespace_id);
/*
- * Our theory for replaying a CREATE is to forcibly drop the
- * target subdirectory if present, then re-copy the source data.
- * This may be more work than needed, but it is simple to
- * implement.
+ * Our theory for replaying a CREATE is to forcibly drop the target
+ * subdirectory if present, then re-copy the source data. This may be
+ * more work than needed, but it is simple to implement.
*/
if (stat(dst_path, &st) == 0 && S_ISDIR(st.st_mode))
{
dst_path = GetDatabasePath(xlrec->db_id, xlrec->tablespace_id);
/*
- * Drop pages for this database that are in the shared buffer
- * cache
+ * Drop pages for this database that are in the shared buffer cache
*/
DropBuffers(xlrec->db_id);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.92 2004/12/31 21:59:41 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.93 2005/10/15 02:49:15 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid
- * int8 strings.
+ * constants by the lexer. Accept these if they are valid int8
+ * strings.
*/
return DatumGetInt64(DirectFunctionCall1(int8in,
- CStringGetDatum(strVal(def->arg))));
+ CStringGetDatum(strVal(def->arg))));
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.137 2005/06/04 02:07:09 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.138 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ListCell *l;
/*
- * Because the planner is not cool about not scribbling on its input,
- * we make a preliminary copy of the source querytree. This prevents
+ * Because the planner is not cool about not scribbling on its input, we
+ * make a preliminary copy of the source querytree. This prevents
* problems in the case that the EXPLAIN is in a portal or plpgsql
* function and is executed repeatedly. (See also the same hack in
- * DECLARE CURSOR and PREPARE.) XXX the planner really shouldn't
- * modify its input ... FIXME someday.
+ * DECLARE CURSOR and PREPARE.) XXX the planner really shouldn't modify
+ * its input ... FIXME someday.
*/
query = copyObject(query);
ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
TupOutputState *tstate)
{
- instr_time starttime;
+ instr_time starttime;
double totaltime = 0;
ExplainState *es;
StringInfo str;
pfree(s);
do_text_output_multiline(tstate, f);
pfree(f);
- do_text_output_oneline(tstate, ""); /* separator line */
+ do_text_output_oneline(tstate, ""); /* separator line */
}
}
if (es->printAnalyze)
{
ResultRelInfo *rInfo;
- int numrels = queryDesc->estate->es_num_result_relations;
- int nr;
+ int numrels = queryDesc->estate->es_num_result_relations;
+ int nr;
rInfo = queryDesc->estate->es_result_relations;
for (nr = 0; nr < numrels; rInfo++, nr++)
{
- int nt;
+ int nt;
if (!rInfo->ri_TrigDesc || !rInfo->ri_TrigInstrument)
continue;
for (nt = 0; nt < rInfo->ri_TrigDesc->numtriggers; nt++)
{
- Trigger *trig = rInfo->ri_TrigDesc->triggers + nt;
+ Trigger *trig = rInfo->ri_TrigDesc->triggers + nt;
Instrumentation *instr = rInfo->ri_TrigInstrument + nt;
- char *conname;
+ char *conname;
/* Must clean up instrumentation state */
InstrEndLoop(instr);
continue;
if (trig->tgisconstraint &&
- (conname = GetConstraintNameForTrigger(trig->tgoid)) != NULL)
+ (conname = GetConstraintNameForTrigger(trig->tgoid)) != NULL)
{
appendStringInfo(str, "Trigger for constraint %s",
conname);
if (numrels > 1)
appendStringInfo(str, " on %s",
- RelationGetRelationName(rInfo->ri_RelationDesc));
+ RelationGetRelationName(rInfo->ri_RelationDesc));
appendStringInfo(str, ": time=%.3f calls=%.0f\n",
1000.0 * instr->total,
}
/*
- * Close down the query and free resources. Include time for this
- * in the total runtime (although it should be pretty minimal).
+ * Close down the query and free resources. Include time for this in the
+ * total runtime (although it should be pretty minimal).
*/
INSTR_TIME_SET_CURRENT(starttime);
static double
elapsed_time(instr_time *starttime)
{
- instr_time endtime;
+ instr_time endtime;
INSTR_TIME_SET_CURRENT(endtime);
endtime.tv_usec += 1000000;
endtime.tv_sec--;
}
-#else /* WIN32 */
+#else /* WIN32 */
endtime.QuadPart -= starttime->QuadPart;
#endif
if (ScanDirectionIsBackward(((IndexScan *) plan)->indexorderdir))
appendStringInfoString(str, " Backward");
appendStringInfo(str, " using %s",
- quote_identifier(get_rel_name(((IndexScan *) plan)->indexid)));
+ quote_identifier(get_rel_name(((IndexScan *) plan)->indexid)));
/* FALL THRU */
case T_SeqScan:
case T_BitmapHeapScan:
quote_identifier(relname));
if (strcmp(rte->eref->aliasname, relname) != 0)
appendStringInfo(str, " %s",
- quote_identifier(rte->eref->aliasname));
+ quote_identifier(rte->eref->aliasname));
}
break;
case T_BitmapIndexScan:
Assert(rte->rtekind == RTE_FUNCTION);
/*
- * If the expression is still a function call, we can get
- * the real name of the function. Otherwise, punt (this
- * can happen if the optimizer simplified away the
- * function call, for example).
+ * If the expression is still a function call, we can get the
+ * real name of the function. Otherwise, punt (this can
+ * happen if the optimizer simplified away the function call,
+ * for example).
*/
if (rte->funcexpr && IsA(rte->funcexpr, FuncExpr))
{
quote_identifier(proname));
if (strcmp(rte->eref->aliasname, proname) != 0)
appendStringInfo(str, " %s",
- quote_identifier(rte->eref->aliasname));
+ quote_identifier(rte->eref->aliasname));
}
break;
default:
break;
}
-
+
appendStringInfo(str, " (cost=%.2f..%.2f rows=%.0f width=%d)",
plan->startup_cost, plan->total_cost,
plan->plan_rows, plan->plan_width);
/*
- * We have to forcibly clean up the instrumentation state because
- * we haven't done ExecutorEnd yet. This is pretty grotty ...
+ * We have to forcibly clean up the instrumentation state because we
+ * haven't done ExecutorEnd yet. This is pretty grotty ...
*/
if (planstate->instrument)
InstrEndLoop(planstate->instrument);
double nloops = planstate->instrument->nloops;
appendStringInfo(str, " (actual time=%.3f..%.3f rows=%.0f loops=%.0f)",
- 1000.0 * planstate->instrument->startup / nloops,
- 1000.0 * planstate->instrument->total / nloops,
+ 1000.0 * planstate->instrument->startup / nloops,
+ 1000.0 * planstate->instrument->total / nloops,
planstate->instrument->ntuples / nloops,
planstate->instrument->nloops);
}
for (i = 0; i < indent; i++)
appendStringInfo(str, " ");
appendStringInfo(str, " -> ");
+
/*
- * Ordinarily we don't pass down our own outer_plan value to our
- * child nodes, but in bitmap scan trees we must, since the bottom
+ * Ordinarily we don't pass down our own outer_plan value to our child
+ * nodes, but in bitmap scan trees we must, since the bottom
* BitmapIndexScan nodes may have outer references.
*/
explain_outNode(str, outerPlan(plan),
if (IsA(plan, BitmapAnd))
{
- BitmapAnd *bitmapandplan = (BitmapAnd *) plan;
+ BitmapAnd *bitmapandplan = (BitmapAnd *) plan;
BitmapAndState *bitmapandstate = (BitmapAndState *) planstate;
ListCell *lst;
int j;
explain_outNode(str, subnode,
bitmapandstate->bitmapplans[j],
- outer_plan, /* pass down same outer plan */
+ outer_plan, /* pass down same outer plan */
indent + 3, es);
j++;
}
if (IsA(plan, BitmapOr))
{
- BitmapOr *bitmaporplan = (BitmapOr *) plan;
+ BitmapOr *bitmaporplan = (BitmapOr *) plan;
BitmapOrState *bitmaporstate = (BitmapOrState *) planstate;
ListCell *lst;
int j;
explain_outNode(str, subnode,
bitmaporstate->bitmapplans[j],
- outer_plan, /* pass down same outer plan */
+ outer_plan, /* pass down same outer plan */
indent + 3, es);
j++;
}
scancontext = deparse_context_for_rte(rte);
/*
- * If we have an outer plan that is referenced by the qual, add it to
- * the deparse context. If not, don't (so that we don't force
- * prefixes unnecessarily).
+ * If we have an outer plan that is referenced by the qual, add it to the
+ * deparse context. If not, don't (so that we don't force prefixes
+ * unnecessarily).
*/
if (outer_plan)
{
if (bms_is_member(OUTER, varnos))
outercontext = deparse_context_for_subplan("outer",
- outer_plan->targetlist,
+ outer_plan->targetlist,
es->rtable);
else
outercontext = NULL;
/*
* In this routine we expect that the plan node's tlist has not been
- * processed by set_plan_references(). Normally, any Vars will
- * contain valid varnos referencing the actual rtable. But we might
- * instead be looking at a dummy tlist generated by prepunion.c; if
- * there are Vars with zero varno, use the tlist itself to determine
- * their names.
+ * processed by set_plan_references(). Normally, any Vars will contain
+ * valid varnos referencing the actual rtable. But we might instead be
+ * looking at a dummy tlist generated by prepunion.c; if there are Vars
+ * with zero varno, use the tlist itself to determine their names.
*/
varnos = pull_varnos((Node *) tlist);
if (bms_is_member(0, varnos))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.68 2005/09/24 22:54:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.69 2005/10/15 02:49:15 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot return shell type %s",
- TypeNameToString(returnType))));
+ errmsg("SQL function cannot return shell type %s",
+ TypeNameToString(returnType))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
ListCell *x;
int i;
- *requiredResultType = InvalidOid; /* default result */
+ *requiredResultType = InvalidOid; /* default result */
inTypes = (Oid *) palloc(parameterCount * sizeof(Oid));
allTypes = (Datum *) palloc(parameterCount * sizeof(Datum));
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot accept shell type %s",
- TypeNameToString(t))));
+ errmsg("SQL function cannot accept shell type %s",
+ TypeNameToString(t))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting or redundant options")));
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
static char
interpret_func_volatility(DefElem *defel)
{
- char *str = strVal(defel->arg);
+ char *str = strVal(defel->arg);
if (strcmp(str, "immutable") == 0)
return PROVOLATILE_IMMUTABLE;
else
{
elog(ERROR, "invalid volatility \"%s\"", str);
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
}
}
else
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized function attribute \"%s\" ignored",
- param->defname)));
+ errmsg("unrecognized function attribute \"%s\" ignored",
+ param->defname)));
}
}
if (languageOid == ClanguageId)
{
/*
- * For "C" language, store the file name in probin and, when
- * given, the link symbol name in prosrc.
+ * For "C" language, store the file name in probin and, when given,
+ * the link symbol name in prosrc.
*/
*probin_str_p = strVal(linitial(as));
if (list_length(as) == 1)
/* override attributes from explicit list */
compute_attributes_sql_style(stmt->options,
- &as_clause, &language, &volatility, &isStrict, &security);
+ &as_clause, &language, &volatility, &isStrict, &security);
/* Convert language name to canonical case */
languageName = case_translate_language_name(language);
/*
* In PostgreSQL versions before 6.5, the SQL name of the created
* function could not be different from the internal name, and
- * "prosrc" wasn't used. So there is code out there that does
- * CREATE FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some
- * modicum of backwards compatibility, accept an empty "prosrc"
- * value as meaning the supplied SQL function name.
+ * "prosrc" wasn't used. So there is code out there that does CREATE
+ * FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some modicum of
+ * backwards compatibility, accept an empty "prosrc" value as meaning
+ * the supplied SQL function name.
*/
if (strlen(prosrc_str) == 0)
prosrc_str = funcname;
}
/*
- * And now that we have all the parameters, and know we're permitted
- * to do so, go ahead and create the function.
+ * And now that we have all the parameters, and know we're permitted to do
+ * so, go ahead and create the function.
*/
ProcedureCreate(funcname,
namespaceId,
/* Permission check: must own func or its namespace */
if (!pg_proc_ownercheck(funcOid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
- GetUserId()))
+ !pg_namespace_ownercheck(((Form_pg_proc) GETSTRUCT(tup))->pronamespace,
+ GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
NameListToString(functionName));
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(functionName)),
- errhint("Use DROP AGGREGATE to drop aggregate functions.")));
+ errhint("Use DROP AGGREGATE to drop aggregate functions.")));
if (((Form_pg_proc) GETSTRUCT(tup))->prolang == INTERNALlanguageId)
{
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(name)),
- errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
+ errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
namespaceOid = procForm->pronamespace;
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes.values),
+ procForm->proargtypes.values),
get_namespace_name(namespaceOid))));
}
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_proc_ownercheck(procOid,GetUserId()))
+ if (!pg_proc_ownercheck(procOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
NameListToString(name));
void
AlterFunction(AlterFunctionStmt *stmt)
{
- HeapTuple tup;
- Oid funcOid;
+ HeapTuple tup;
+ Oid funcOid;
Form_pg_proc procForm;
- Relation rel;
- ListCell *l;
- DefElem *volatility_item = NULL;
- DefElem *strict_item = NULL;
- DefElem *security_def_item = NULL;
+ Relation rel;
+ ListCell *l;
+ DefElem *volatility_item = NULL;
+ DefElem *strict_item = NULL;
+ DefElem *security_def_item = NULL;
rel = heap_open(ProcedureRelationId, RowExclusiveLock);
NameListToString(stmt->func->funcname))));
/* Examine requested actions. */
- foreach (l, stmt->actions)
+ foreach(l, stmt->actions)
{
- DefElem *defel = (DefElem *) lfirst(l);
+ DefElem *defel = (DefElem *) lfirst(l);
if (compute_common_attribute(defel,
&volatility_item,
if (nargs < 1 || nargs > 3)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must take one to three arguments")));
+ errmsg("cast function must take one to three arguments")));
if (procstruct->proargtypes.values[0] != sourcetypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("argument of cast function must match source data type")));
+ errmsg("argument of cast function must match source data type")));
if (nargs > 1 && procstruct->proargtypes.values[1] != INT4OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("second argument of cast function must be type integer")));
+ errmsg("second argument of cast function must be type integer")));
if (nargs > 2 && procstruct->proargtypes.values[2] != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("third argument of cast function must be type boolean")));
+ errmsg("third argument of cast function must be type boolean")));
if (procstruct->prorettype != targettypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("return data type of cast function must match target data type")));
/*
- * Restricting the volatility of a cast function may or may not be
- * a good idea in the abstract, but it definitely breaks many old
+ * Restricting the volatility of a cast function may or may not be a
+ * good idea in the abstract, but it definitely breaks many old
* user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
if (procstruct->proisagg)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must not be an aggregate function")));
+ errmsg("cast function must not be an aggregate function")));
if (procstruct->proretset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create a cast WITHOUT FUNCTION")));
+ errmsg("must be superuser to create a cast WITHOUT FUNCTION")));
/*
* Also, insist that the types match as to size, alignment, and
- * pass-by-value attributes; this provides at least a crude check
- * that they have similar representations. A pair of types that
- * fail this test should certainly not be equated.
+ * pass-by-value attributes; this provides at least a crude check that
+ * they have similar representations. A pair of types that fail this
+ * test should certainly not be equated.
*/
get_typlenbyvalalign(sourcetypeid, &typ1len, &typ1byval, &typ1align);
get_typlenbyvalalign(targettypeid, &typ2len, &typ2byval, &typ2align);
if (sourcetypeid == targettypeid && nargs < 2)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("source data type and target data type are the same")));
+ errmsg("source data type and target data type are the same")));
/* convert CoercionContext enum to char value for castcontext */
switch (stmt->context)
relation = heap_open(CastRelationId, RowExclusiveLock);
/*
- * Check for duplicate. This is just to give a friendly error
- * message, the unique index would catch it anyway (so no need to
- * sweat about race conditions).
+ * Check for duplicate. This is just to give a friendly error message,
+ * the unique index would catch it anyway (so no need to sweat about race
+ * conditions).
*/
tuple = SearchSysCache(CASTSOURCETARGET,
ObjectIdGetDatum(sourcetypeid),
void
AlterFunctionNamespace(List *name, List *argtypes, const char *newschema)
{
- Oid procOid;
- Oid oldNspOid;
- Oid nspOid;
- HeapTuple tup;
- Relation procRel;
- Form_pg_proc proc;
+ Oid procOid;
+ Oid oldNspOid;
+ Oid nspOid;
+ HeapTuple tup;
+ Relation procRel;
+ Form_pg_proc proc;
procRel = heap_open(ProcedureRelationId, RowExclusiveLock);
if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move objects into or out of temporary schemas")));
+ errmsg("cannot move objects into or out of temporary schemas")));
/* same for TOAST schema */
if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.133 2005/06/22 21:14:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.134 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Verify we (still) have CREATE rights in the rel's namespace.
- * (Presumably we did when the rel was created, but maybe not
- * anymore.) Skip check if caller doesn't want it. Also skip check
- * if bootstrapping, since permissions machinery may not be working
- * yet.
+ * (Presumably we did when the rel was created, but maybe not anymore.)
+ * Skip check if caller doesn't want it. Also skip check if
+ * bootstrapping, since permissions machinery may not be working yet.
*/
if (check_rights && !IsBootstrapProcessingMode())
{
}
/*
- * Force shared indexes into the pg_global tablespace. This is a bit of
- * a hack but seems simpler than marking them in the BKI commands.
+ * Force shared indexes into the pg_global tablespace. This is a bit of a
+ * hack but seems simpler than marking them in the BKI commands.
*/
if (rel->rd_rel->relisshared)
tablespaceId = GLOBALTABLESPACE_OID;
}
/*
- * look up the access method, verify it can handle the requested
- * features
+ * look up the access method, verify it can handle the requested features
*/
tuple = SearchSysCache(AMNAME,
PointerGetDatum(accessMethodName),
if (unique && !accessMethodForm->amcanunique)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("access method \"%s\" does not support unique indexes",
- accessMethodName)));
+ errmsg("access method \"%s\" does not support unique indexes",
+ accessMethodName)));
if (numberOfAttributes > 1 && !accessMethodForm->amcanmulticol)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("access method \"%s\" does not support multicolumn indexes",
- accessMethodName)));
+ errmsg("access method \"%s\" does not support multicolumn indexes",
+ accessMethodName)));
ReleaseSysCache(tuple);
ListCell *keys;
/*
- * If ALTER TABLE, check that there isn't already a PRIMARY KEY.
- * In CREATE TABLE, we have faith that the parser rejected
- * multiple pkey clauses; and CREATE INDEX doesn't have a way to
- * say PRIMARY KEY, so it's no problem either.
+ * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In
+ * CREATE TABLE, we have faith that the parser rejected multiple pkey
+ * clauses; and CREATE INDEX doesn't have a way to say PRIMARY KEY, so
+ * it's no problem either.
*/
if (is_alter_table &&
relationHasPrimaryKey(rel))
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("multiple primary keys for table \"%s\" are not allowed",
- RelationGetRelationName(rel))));
+ errmsg("multiple primary keys for table \"%s\" are not allowed",
+ RelationGetRelationName(rel))));
}
/*
- * Check that all of the attributes in a primary key are marked as
- * not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
+ * Check that all of the attributes in a primary key are marked as not
+ * null, otherwise attempt to ALTER TABLE .. SET NOT NULL
*/
cmds = NIL;
foreach(keys, attributeList)
else
{
/*
- * This shouldn't happen during CREATE TABLE, but can
- * happen during ALTER TABLE. Keep message in sync with
+ * This shouldn't happen during CREATE TABLE, but can happen
+ * during ALTER TABLE. Keep message in sync with
* transformIndexConstraints() in parser/analyze.c.
*/
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- key->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ key->name)));
}
}
/*
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child
- * tables? Currently, since the PRIMARY KEY itself doesn't
- * cascade, we don't cascade the notnull constraint(s) either; but
- * this is pretty debatable.
+ * tables? Currently, since the PRIMARY KEY itself doesn't cascade,
+ * we don't cascade the notnull constraint(s) either; but this is
+ * pretty debatable.
*
- * XXX: possible future improvement: when being called from ALTER
- * TABLE, it would be more efficient to merge this with the outer
- * ALTER TABLE, so as to avoid two scans. But that seems to
- * complicate DefineIndex's API unduly.
+ * XXX: possible future improvement: when being called from ALTER TABLE,
+ * it would be more efficient to merge this with the outer ALTER
+ * TABLE, so as to avoid two scans. But that seems to complicate
+ * DefineIndex's API unduly.
*/
if (cmds)
AlterTableInternal(relationId, cmds, false);
}
/*
- * Prepare arguments for index_create, primarily an IndexInfo
- * structure. Note that ii_Predicate must be in implicit-AND format.
+ * Prepare arguments for index_create, primarily an IndexInfo structure.
+ * Note that ii_Predicate must be in implicit-AND format.
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_NumIndexAttrs = numberOfAttributes;
heap_close(rel, NoLock);
/*
- * Report index creation if appropriate (delay this till after most of
- * the error checks)
+ * Report index creation if appropriate (delay this till after most of the
+ * error checks)
*/
if (isconstraint && !quiet)
ereport(NOTICE,
- (errmsg("%s %s will create implicit index \"%s\" for table \"%s\"",
- is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
- primary ? "PRIMARY KEY" : "UNIQUE",
- indexRelationName, RelationGetRelationName(rel))));
+ (errmsg("%s %s will create implicit index \"%s\" for table \"%s\"",
+ is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
+ primary ? "PRIMARY KEY" : "UNIQUE",
+ indexRelationName, RelationGetRelationName(rel))));
index_create(relationId, indexRelationName, indexRelationId,
indexInfo, accessMethodId, tablespaceId, classObjectId,
* We update the relation's pg_class tuple even if it already has
* relhasindex = true. This is needed to cause a shared-cache-inval
* message to be sent for the pg_class tuple, which will cause other
- * backends to flush their relcache entries and in particular their
- * cached lists of the indexes for this relation.
+ * backends to flush their relcache entries and in particular their cached
+ * lists of the indexes for this relation.
*/
setRelhasindex(relationId, true, primary, InvalidOid);
}
{
/*
* We don't currently support generation of an actual query plan for a
- * predicate, only simple scalar expressions; hence these
- * restrictions.
+ * predicate, only simple scalar expressions; hence these restrictions.
*/
if (contain_subplans((Node *) predicate))
ereport(ERROR,
if (contain_mutable_functions((Node *) predicate))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("functions in index predicate must be marked IMMUTABLE")));
+ errmsg("functions in index predicate must be marked IMMUTABLE")));
}
static void
if (isconstraint)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- attribute->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ attribute->name)));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
atttype = exprType(attribute->expr);
/*
- * We don't currently support generation of an actual query
- * plan for an index expression, only simple scalar
- * expressions; hence these restrictions.
+ * We don't currently support generation of an actual query plan
+ * for an index expression, only simple scalar expressions; hence
+ * these restrictions.
*/
if (contain_subplans(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in index expression")));
+ errmsg("cannot use subquery in index expression")));
if (contain_agg_clause(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in index expression")));
+ errmsg("cannot use aggregate function in index expression")));
/*
- * A expression using mutable functions is probably wrong,
- * since if you aren't going to get the same result for the
- * same data every time, it's not clear what the index entries
- * mean at all.
+ * A expression using mutable functions is probably wrong, since
+ * if you aren't going to get the same result for the same data
+ * every time, it's not clear what the index entries mean at all.
*/
if (contain_mutable_functions(attribute->expr))
ereport(ERROR,
opInputType;
/*
- * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so
- * we ignore those opclass names so the default *_ops is used. This
- * can be removed in some later release. bjm 2000/02/07
+ * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so we
+ * ignore those opclass names so the default *_ops is used. This can be
+ * removed in some later release. bjm 2000/02/07
*
* Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
* 2000/07/30
*
- * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
- * too for awhile. I'm starting to think we need a better approach.
- * tgl 2000/10/01
+ * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that too
+ * for awhile. I'm starting to think we need a better approach. tgl
+ * 2000/10/01
*
* Release 8.0 removes bigbox_ops (which was dead code for a long while
* anyway). tgl 2003/11/11
NameListToString(opclass), accessMethodName)));
/*
- * Verify that the index operator class accepts this datatype. Note
- * we will accept binary compatibility.
+ * Verify that the index operator class accepts this datatype. Note we
+ * will accept binary compatibility.
*/
opClassId = HeapTupleGetOid(tuple);
opInputType = ((Form_pg_opclass) GETSTRUCT(tuple))->opcintype;
if (!IsBinaryCoercible(attrType, opInputType))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator class \"%s\" does not accept data type %s",
- NameListToString(opclass), format_type_be(attrType))));
+ errmsg("operator class \"%s\" does not accept data type %s",
+ NameListToString(opclass), format_type_be(attrType))));
ReleaseSysCache(tuple);
* (either exactly or binary-compatibly, but prefer an exact match).
*
* We could find more than one binary-compatible match, in which case we
- * require the user to specify which one he wants. If we find more
- * than one exact match, then someone put bogus entries in pg_opclass.
+ * require the user to specify which one he wants. If we find more than
+ * one exact match, then someone put bogus entries in pg_opclass.
*
* The initial search is done by namespace.c so that we only consider
* opclasses visible in the current namespace search path. (See also
if (nexact != 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple default operator classes for data type %s",
- format_type_be(attrType))));
+ errmsg("there are multiple default operator classes for data type %s",
+ format_type_be(attrType))));
if (ncompatible == 1)
return compatibleOid;
/*
* If we must truncate, preferentially truncate the longer name. This
- * logic could be expressed without a loop, but it's simple and
- * obvious as a loop.
+ * logic could be expressed without a loop, but it's simple and obvious as
+ * a loop.
*/
while (name1chars + name2chars > availchars)
{
ListCell *indexoidscan;
/*
- * Get the list of index OIDs for the table from the relcache, and
- * look up each one in the pg_index syscache until we find one marked
- * primary key (hopefully there isn't more than one such).
+ * Get the list of index OIDs for the table from the relcache, and look up
+ * each one in the pg_index syscache until we find one marked primary key
+ * (hopefully there isn't more than one such).
*/
indexoidlist = RelationGetIndexList(rel);
/*
* We cannot run inside a user transaction block; if we were inside a
- * transaction, then our commit- and start-transaction-command calls
- * would not have the intended effect!
+ * transaction, then our commit- and start-transaction-command calls would
+ * not have the intended effect!
*/
PreventTransactionChain((void *) databaseName, "REINDEX DATABASE");
/*
- * Create a memory context that will survive forced transaction
- * commits we do below. Since it is a child of PortalContext, it will
- * go away eventually even if we suffer an error; there's no need for
- * special abort cleanup logic.
+ * Create a memory context that will survive forced transaction commits we
+ * do below. Since it is a child of PortalContext, it will go away
+ * eventually even if we suffer an error; there's no need for special
+ * abort cleanup logic.
*/
private_context = AllocSetContextCreate(PortalContext,
"ReindexDatabase",
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * We always want to reindex pg_class first. This ensures that if
- * there is any corruption in pg_class' indexes, they will be fixed
- * before we process any other tables. This is critical because
- * reindexing itself will try to update pg_class.
+ * We always want to reindex pg_class first. This ensures that if there
+ * is any corruption in pg_class' indexes, they will be fixed before we
+ * process any other tables. This is critical because reindexing itself
+ * will try to update pg_class.
*/
if (do_system)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.12 2004/12/31 21:59:41 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.13 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ListCell *p;
/*
- * Iterate over the list and open, lock, and close the relations one
- * at a time
+ * Iterate over the list and open, lock, and close the relations one at a
+ * time
*/
foreach(p, lockstmt->relations)
Relation rel;
/*
- * We don't want to open the relation until we've checked
- * privilege. So, manually get the relation OID.
+ * We don't want to open the relation until we've checked privilege.
+ * So, manually get the relation OID.
*/
reloid = RangeVarGetRelid(relation, false);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.37 2005/08/23 01:41:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.38 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ReleaseSysCache(tup);
/*
- * Currently, we require superuser privileges to create an opclass.
- * This seems necessary because we have no way to validate that the
- * offered set of operators and functions are consistent with the AM's
- * expectations. It would be nice to provide such a check someday, if
- * it can be done without solving the halting problem :-(
+ * Currently, we require superuser privileges to create an opclass. This
+ * seems necessary because we have no way to validate that the offered set
+ * of operators and functions are consistent with the AM's expectations.
+ * It would be nice to provide such a check someday, if it can be done
+ * without solving the halting problem :-(
*/
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create an operator class")));
+ errmsg("must be superuser to create an operator class")));
/* Look up the datatype */
typeoid = typenameTypeId(stmt->datatype);
if (OidIsValid(storageoid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("storage type specified more than once")));
+ errmsg("storage type specified more than once")));
storageoid = typenameTypeId(item->storedtype);
break;
default:
{
/*
* Currently, only GiST allows storagetype different from
- * datatype. This hardcoded test should be eliminated in
- * favor of adding another boolean column to pg_am ...
+ * datatype. This hardcoded test should be eliminated in favor of
+ * adding another boolean column to pg_am ...
*/
if (amoid != GIST_AM_OID)
ereport(ERROR,
rel = heap_open(OperatorClassRelationId, RowExclusiveLock);
/*
- * Make sure there is no existing opclass of this name (this is just
- * to give a more friendly error message than "duplicate key").
+ * Make sure there is no existing opclass of this name (this is just to
+ * give a more friendly error message than "duplicate key").
*/
if (SearchSysCacheExists(CLAAMNAMENSP,
ObjectIdGetDatum(amoid),
opcname, stmt->amname)));
/*
- * If we are creating a default opclass, check there isn't one
- * already. (Note we do not restrict this test to visible opclasses;
- * this ensures that typcache.c can find unique solutions to its
- * questions.)
+ * If we are creating a default opclass, check there isn't one already.
+ * (Note we do not restrict this test to visible opclasses; this ensures
+ * that typcache.c can find unique solutions to its questions.)
*/
if (stmt->isDefault)
{
errmsg("could not make operator class \"%s\" be default for type %s",
opcname,
TypeNameToString(stmt->datatype)),
- errdetail("Operator class \"%s\" already is the default.",
- NameStr(opclass->opcname))));
+ errdetail("Operator class \"%s\" already is the default.",
+ NameStr(opclass->opcname))));
}
systable_endscan(scan);
namestrcpy(&opcName, opcname);
values[i++] = NameGetDatum(&opcName); /* opcname */
values[i++] = ObjectIdGetDatum(namespaceoid); /* opcnamespace */
- values[i++] = ObjectIdGetDatum(GetUserId()); /* opcowner */
+ values[i++] = ObjectIdGetDatum(GetUserId()); /* opcowner */
values[i++] = ObjectIdGetDatum(typeoid); /* opcintype */
values[i++] = BoolGetDatum(stmt->isDefault); /* opcdefault */
values[i++] = ObjectIdGetDatum(storageoid); /* opckeytype */
storeProcedures(opclassoid, procedures);
/*
- * Create dependencies. Note: we do not create a dependency link to
- * the AM, because we don't currently support DROP ACCESS METHOD.
+ * Create dependencies. Note: we do not create a dependency link to the
+ * AM, because we don't currently support DROP ACCESS METHOD.
*/
myself.classId = OperatorClassRelationId;
myself.objectId = opclassoid;
opform = (Form_pg_operator) GETSTRUCT(optup);
/*
- * btree operators must be binary ops returning boolean, and the
- * left-side input type must match the operator class' input type.
+ * btree operators must be binary ops returning boolean, and the left-side
+ * input type must match the operator class' input type.
*/
if (opform->oprkind != 'b')
ereport(ERROR,
if (opform->oprleft != typeoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operators must have index type as left input")));
+ errmsg("btree operators must have index type as left input")));
/*
- * The subtype is "default" (0) if oprright matches the operator
- * class, otherwise it is oprright.
+ * The subtype is "default" (0) if oprright matches the operator class,
+ * otherwise it is oprright.
*/
if (opform->oprright == typeoid)
subtype = InvalidOid;
procform = (Form_pg_proc) GETSTRUCT(proctup);
/*
- * btree support procs must be 2-arg procs returning int4, and the
- * first input type must match the operator class' input type.
+ * btree support procs must be 2-arg procs returning int4, and the first
+ * input type must match the operator class' input type.
*/
if (procform->pronargs != 2)
ereport(ERROR,
if (procform->proargtypes.values[0] != typeoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree procedures must have index type as first input")));
+ errmsg("btree procedures must have index type as first input")));
/*
- * The subtype is "default" (0) if second input type matches the
- * operator class, otherwise it is the second input type.
+ * The subtype is "default" (0) if second input type matches the operator
+ * class, otherwise it is the second input type.
*/
if (procform->proargtypes.values[1] == typeoid)
subtype = InvalidOid;
if (isProc)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("procedure number %d appears more than once",
- member->number)));
+ errmsg("procedure number %d appears more than once",
+ member->number)));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("operator number %d appears more than once",
- member->number)));
+ errmsg("operator number %d appears more than once",
+ member->number)));
}
}
*list = lappend(*list, member);
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
opcID = HeapTupleGetOid(tuple);
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_opclass_ownercheck(HeapTupleGetOid(tup),GetUserId()))
+ if (!pg_opclass_ownercheck(HeapTupleGetOid(tup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPCLASS,
NameListToString(name));
}
/*
- * Modify the owner --- okay to scribble on tup because it's a
- * copy
+ * Modify the owner --- okay to scribble on tup because it's a copy
*/
opcForm->opcowner = newOwnerId;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.25 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.26 2005/10/15 02:49:15 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
TypeName *typeName2 = NULL; /* second type name */
Oid typeId1 = InvalidOid; /* types converted to OID */
Oid typeId2 = InvalidOid;
- List *commutatorName = NIL; /* optional commutator operator
- * name */
+ List *commutatorName = NIL; /* optional commutator operator name */
List *negatorName = NIL; /* optional negator operator name */
- List *restrictionName = NIL; /* optional restrict. sel.
- * procedure */
+ List *restrictionName = NIL; /* optional restrict. sel. procedure */
List *joinName = NIL; /* optional join sel. procedure */
List *leftSortName = NIL; /* optional left sort operator */
List *rightSortName = NIL; /* optional right sort operator */
if (typeName1->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (pg_strcasecmp(defel->defname, "rightarg") == 0)
{
if (typeName2->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (pg_strcasecmp(defel->defname, "procedure") == 0)
functionName = defGetQualifiedName(defel);
typeId2 = typenameTypeId(typeName2);
/*
- * If any of the mergejoin support operators were given, then canMerge
- * is implicit. If canMerge is specified or implicit, fill in default
+ * If any of the mergejoin support operators were given, then canMerge is
+ * implicit. If canMerge is specified or implicit, fill in default
* operator names for any missing mergejoin support operators.
*/
if (leftSortName || rightSortName || ltCompareName || gtCompareName)
typeId1, /* left type id */
typeId2, /* right type id */
functionName, /* function for operator */
- commutatorName, /* optional commutator operator
- * name */
+ commutatorName, /* optional commutator operator name */
negatorName, /* optional negator operator name */
- restrictionName, /* optional restrict. sel.
- * procedure */
+ restrictionName, /* optional restrict. sel. procedure */
joinName, /* optional join sel. procedure name */
canHash, /* operator hashes */
leftSortName, /* optional left sort operator */
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_oper_ownercheck(operOid,GetUserId()))
+ if (!pg_oper_ownercheck(operOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER,
NameListToString(name));
}
/*
- * Modify the owner --- okay to scribble on tup because it's a
- * copy
+ * Modify the owner --- okay to scribble on tup because it's a copy
*/
oprForm->oprowner = newOwnerId;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.42 2005/06/03 23:05:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.43 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
errmsg("invalid cursor name: must not be empty")));
/*
- * If this is a non-holdable cursor, we require that this statement
- * has been executed inside a transaction block (or else, it would
- * have no user-visible effect).
+ * If this is a non-holdable cursor, we require that this statement has
+ * been executed inside a transaction block (or else, it would have no
+ * user-visible effect).
*/
if (!(stmt->options & CURSOR_OPT_HOLD))
RequireTransactionChain((void *) stmt, "DECLARE CURSOR");
/*
- * Because the planner is not cool about not scribbling on its input,
- * we make a preliminary copy of the source querytree. This prevents
+ * Because the planner is not cool about not scribbling on its input, we
+ * make a preliminary copy of the source querytree. This prevents
* problems in the case that the DECLARE CURSOR is in a portal and is
- * executed repeatedly. XXX the planner really shouldn't modify its
- * input ... FIXME someday.
+ * executed repeatedly. XXX the planner really shouldn't modify its input
+ * ... FIXME someday.
*/
query = copyObject(stmt->query);
/*
* The query has been through parse analysis, but not rewriting or
- * planning as yet. Note that the grammar ensured we have a SELECT
- * query, so we are not expecting rule rewriting to do anything
- * strange.
+ * planning as yet. Note that the grammar ensured we have a SELECT query,
+ * so we are not expecting rule rewriting to do anything strange.
*/
AcquireRewriteLocks(query);
rewritten = QueryRewrite(query);
if (query->rowMarks != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DECLARE CURSOR ... FOR UPDATE/SHARE is not supported"),
+ errmsg("DECLARE CURSOR ... FOR UPDATE/SHARE is not supported"),
errdetail("Cursors must be READ ONLY.")));
plan = planner(query, true, stmt->options, NULL);
/*
- * Create a portal and copy the query and plan into its memory
- * context.
+ * Create a portal and copy the query and plan into its memory context.
*/
portal = CreatePortal(stmt->portalname, false, false);
/*
* Also copy the outer portal's parameter list into the inner portal's
- * memory context. We want to pass down the parameter values in case
- * we had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo =
- * $1 This will have been parsed using the outer parameter set and the
- * parameter value needs to be preserved for use when the cursor is
- * executed.
+ * memory context. We want to pass down the parameter values in case we
+ * had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo = $1 This
+ * will have been parsed using the outer parameter set and the parameter
+ * value needs to be preserved for use when the cursor is executed.
*/
params = copyParamList(params);
* Set up options for portal.
*
* If the user didn't specify a SCROLL type, allow or disallow scrolling
- * based on whether it would require any additional runtime overhead
- * to do so.
+ * based on whether it would require any additional runtime overhead to do
+ * so.
*/
portal->cursorOptions = stmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
Assert(portal->strategy == PORTAL_ONE_SELECT);
/*
- * We're done; the query won't actually be run until
- * PerformPortalFetch is called.
+ * We're done; the query won't actually be run until PerformPortalFetch is
+ * called.
*/
}
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_CURSOR),
- errmsg("cursor \"%s\" does not exist", stmt->portalname)));
+ errmsg("cursor \"%s\" does not exist", stmt->portalname)));
return; /* keep compiler happy */
}
AssertArg(portal->cleanup == PortalCleanup);
/*
- * Shut down executor, if still running. We skip this during error
- * abort, since other mechanisms will take care of releasing executor
- * resources, and we can't be sure that ExecutorEnd itself wouldn't
- * fail.
+ * Shut down executor, if still running. We skip this during error abort,
+ * since other mechanisms will take care of releasing executor resources,
+ * and we can't be sure that ExecutorEnd itself wouldn't fail.
*/
queryDesc = PortalGetQueryDesc(portal);
if (queryDesc)
MemoryContextSwitchTo(PortalContext);
/*
- * Rewind the executor: we need to store the entire result set in
- * the tuplestore, so that subsequent backward FETCHs can be
- * processed.
+ * Rewind the executor: we need to store the entire result set in the
+ * tuplestore, so that subsequent backward FETCHs can be processed.
*/
ExecutorRewind(queryDesc);
/*
* Reset the position in the result set: ideally, this could be
- * implemented by just skipping straight to the tuple # that we
- * need to be at, but the tuplestore API doesn't support that. So
- * we start at the beginning of the tuplestore and iterate through
- * it until we reach where we need to be. FIXME someday?
+ * implemented by just skipping straight to the tuple # that we need
+ * to be at, but the tuplestore API doesn't support that. So we start
+ * at the beginning of the tuplestore and iterate through it until we
+ * reach where we need to be. FIXME someday?
*/
MemoryContextSwitchTo(portal->holdContext);
if (portal->posOverflow) /* oops, cannot trust portalPos */
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not reposition held cursor")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not reposition held cursor")));
tuplestore_rescan(portal->holdStore);
QueryContext = saveQueryContext;
/*
- * We can now release any subsidiary memory of the portal's heap
- * context; we'll never use it again. The executor already dropped
- * its context, but this will clean up anything that glommed onto the
- * portal's heap via PortalContext.
+ * We can now release any subsidiary memory of the portal's heap context;
+ * we'll never use it again. The executor already dropped its context,
+ * but this will clean up anything that glommed onto the portal's heap via
+ * PortalContext.
*/
MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
}
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.40 2005/06/22 17:45:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.41 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Parse analysis is already done, but we must still rewrite and plan
- * the query.
+ * Parse analysis is already done, but we must still rewrite and plan the
+ * query.
*/
/*
- * Because the planner is not cool about not scribbling on its input,
- * we make a preliminary copy of the source querytree. This prevents
+ * Because the planner is not cool about not scribbling on its input, we
+ * make a preliminary copy of the source querytree. This prevents
* problems in the case that the PREPARE is in a portal or plpgsql
* function and is executed repeatedly. (See also the same hack in
- * DECLARE CURSOR and EXPLAIN.) XXX the planner really shouldn't
- * modify its input ... FIXME someday.
+ * DECLARE CURSOR and EXPLAIN.) XXX the planner really shouldn't modify
+ * its input ... FIXME someday.
*/
query = copyObject(stmt->query);
plan_list = pg_plan_queries(query_list, NULL, false);
/*
- * Save the results. We don't have the query string for this PREPARE,
- * but we do have the string we got from the client, so use that.
+ * Save the results. We don't have the query string for this PREPARE, but
+ * we do have the string we got from the client, so use that.
*/
StorePreparedStatement(stmt->name,
debug_query_string,
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it till
- * end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till end
+ * of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, stmt->params, entry->argtype_list);
portal = CreateNewPortal();
/*
- * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so
- * that we can modify its destination (yech, but this has always been
- * ugly). For regular EXECUTE we can just use the stored query where
- * it sits, since the executor is read-only.
+ * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so that
+ * we can modify its destination (yech, but this has always been ugly).
+ * For regular EXECUTE we can just use the stored query where it sits,
+ * since the executor is read-only.
*/
if (stmt->into)
{
bool isNull;
paramLI[i].value = ExecEvalExprSwitchContext(n,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
paramLI[i].kind = PARAM_NUM;
/*
* We need to copy the data so that it is stored in the correct memory
* context. Do this before making hashtable entry, so that an
- * out-of-memory failure only wastes memory and doesn't leave us with
- * an incomplete (ie corrupt) hashtable entry.
+ * out-of-memory failure only wastes memory and doesn't leave us with an
+ * incomplete (ie corrupt) hashtable entry.
*/
qstring = query_string ? pstrdup(query_string) : NULL;
query_list = (List *) copyObject(query_list);
if (prepared_queries)
{
/*
- * We can't just use the statement name as supplied by the user:
- * the hash package is picky enough that it needs to be
- * NULL-padded out to the appropriate length to work correctly.
+ * We can't just use the statement name as supplied by the user: the
+ * hash package is picky enough that it needs to be NULL-padded out to
+ * the appropriate length to work correctly.
*/
StrNCpy(key, stmt_name, sizeof(key));
/*
* Given a prepared statement that returns tuples, extract the query
- * targetlist. Returns NIL if the statement doesn't have a determinable
+ * targetlist. Returns NIL if the statement doesn't have a determinable
* targetlist.
*
* Note: do not modify the result.
return ((Query *) linitial(stmt->query_list))->targetList;
if (strategy == PORTAL_UTIL_SELECT)
{
- Node *utilityStmt;
+ Node *utilityStmt;
utilityStmt = ((Query *) linitial(stmt->query_list))->utilityStmt;
switch (nodeTag(utilityStmt))
{
case T_FetchStmt:
- {
- FetchStmt *substmt = (FetchStmt *) utilityStmt;
- Portal subportal;
+ {
+ FetchStmt *substmt = (FetchStmt *) utilityStmt;
+ Portal subportal;
- Assert(!substmt->ismove);
- subportal = GetPortalByName(substmt->portalname);
- Assert(PortalIsValid(subportal));
- return FetchPortalTargetList(subportal);
- }
+ Assert(!substmt->ismove);
+ subportal = GetPortalByName(substmt->portalname);
+ Assert(PortalIsValid(subportal));
+ return FetchPortalTargetList(subportal);
+ }
case T_ExecuteStmt:
- {
- ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt;
- PreparedStatement *entry;
+ {
+ ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt;
+ PreparedStatement *entry;
- Assert(!substmt->into);
- entry = FetchPreparedStatement(substmt->name, true);
- return FetchPreparedStatementTargetList(entry);
- }
+ Assert(!substmt->into);
+ entry = FetchPreparedStatement(substmt->name, true);
+ return FetchPreparedStatementTargetList(entry);
+ }
default:
break;
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it till
- * end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till end
+ * of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, execstmt->params,
if (query->commandType != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("prepared statement is not a SELECT")));
+ errmsg("prepared statement is not a SELECT")));
/* Copy the query so we can modify it */
query = copyObject(query);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.62 2005/09/08 20:07:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.63 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} PLTemplate;
static void create_proc_lang(const char *languageName,
- Oid handlerOid, Oid valOid, bool trusted);
+ Oid handlerOid, Oid valOid, bool trusted);
static PLTemplate *find_language_template(const char *languageName);
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create procedural language")));
+ errmsg("must be superuser to create procedural language")));
/*
* Translate the language name and check that this language doesn't
*/
if ((pltemplate = find_language_template(languageName)) != NULL)
{
- List *funcname;
+ List *funcname;
/*
* Give a notice if we are ignoring supplied parameters.
(errmsg("using pg_pltemplate information instead of CREATE LANGUAGE parameters")));
/*
- * Find or create the handler function, which we force to be in
- * the pg_catalog schema. If already present, it must have the
- * correct return type.
+ * Find or create the handler function, which we force to be in the
+ * pg_catalog schema. If already present, it must have the correct
+ * return type.
*/
funcname = SystemFuncName(pltemplate->tmplhandler);
handlerOid = LookupFuncName(funcname, 0, funcargtypes, true);
if (funcrettype != LANGUAGE_HANDLEROID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type \"language_handler\"",
- NameListToString(funcname))));
+ errmsg("function %s must return type \"language_handler\"",
+ NameListToString(funcname))));
}
else
{
handlerOid = ProcedureCreate(pltemplate->tmplhandler,
PG_CATALOG_NAMESPACE,
- false, /* replace */
- false, /* returnsSet */
+ false, /* replace */
+ false, /* returnsSet */
LANGUAGE_HANDLEROID,
ClanguageId,
F_FMGR_C_VALIDATOR,
pltemplate->tmplhandler,
pltemplate->tmpllibrary,
- false, /* isAgg */
- false, /* security_definer */
- false, /* isStrict */
+ false, /* isAgg */
+ false, /* security_definer */
+ false, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 0),
PointerGetDatum(NULL),
{
valOid = ProcedureCreate(pltemplate->tmplvalidator,
PG_CATALOG_NAMESPACE,
- false, /* replace */
- false, /* returnsSet */
+ false, /* replace */
+ false, /* returnsSet */
VOIDOID,
ClanguageId,
F_FMGR_C_VALIDATOR,
pltemplate->tmplvalidator,
pltemplate->tmpllibrary,
- false, /* isAgg */
- false, /* security_definer */
- false, /* isStrict */
+ false, /* isAgg */
+ false, /* security_definer */
+ false, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 1),
PointerGetDatum(NULL),
else
{
/*
- * No template, so use the provided information. If there's
- * no handler clause, the user is trying to rely on a template
- * that we don't have, so complain accordingly.
+ * No template, so use the provided information. If there's no
+ * handler clause, the user is trying to rely on a template that we
+ * don't have, so complain accordingly.
*/
if (!stmt->plhandler)
ereport(ERROR,
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type \"language_handler\"",
- NameListToString(stmt->plhandler))));
+ errmsg("function %s must return type \"language_handler\"",
+ NameListToString(stmt->plhandler))));
}
/* validate the validator function */
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to drop procedural language")));
+ errmsg("must be superuser to drop procedural language")));
/*
* Translate the language name, check that the language exists
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to rename procedural language")));
+ errmsg("must be superuser to rename procedural language")));
/* rename */
namestrcpy(&(((Form_pg_language) GETSTRUCT(tup))->lanname), newname);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.34 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.35 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Oid namespaceId;
List *parsetree_list;
ListCell *parsetree_item;
- Oid owner_uid;
- Oid saved_uid;
+ Oid owner_uid;
+ Oid saved_uid;
AclResult aclresult;
saved_uid = GetUserId();
* To create a schema, must have schema-create privilege on the current
* database and must be able to become the target role (this does not
* imply that the target role itself must have create-schema privilege).
- * The latter provision guards against "giveaway" attacks. Note that
- * a superuser will always have both of these privileges a fortiori.
+ * The latter provision guards against "giveaway" attacks. Note that a
+ * superuser will always have both of these privileges a fortiori.
*/
aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
if (aclresult != ACLCHECK_OK)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", schemaName),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/*
* If the requested authorization is different from the current user,
- * temporarily set the current user so that the object(s) will be
- * created with the correct ownership.
+ * temporarily set the current user so that the object(s) will be created
+ * with the correct ownership.
*
- * (The setting will revert to session user on error or at the end of
- * this routine.)
+ * (The setting will revert to session user on error or at the end of this
+ * routine.)
*/
if (saved_uid != owner_uid)
SetUserId(owner_uid);
CommandCounterIncrement();
/*
- * Temporarily make the new namespace be the front of the search path,
- * as well as the default creation target namespace. This will be
- * undone at the end of this routine, or upon error.
+ * Temporarily make the new namespace be the front of the search path, as
+ * well as the default creation target namespace. This will be undone at
+ * the end of this routine, or upon error.
*/
PushSpecialNamespace(namespaceId);
/*
- * Examine the list of commands embedded in the CREATE SCHEMA command,
- * and reorganize them into a sequentially executable order with no
- * forward references. Note that the result is still a list of raw
- * parsetrees in need of parse analysis --- we cannot, in general, run
- * analyze.c on one statement until we have actually executed the
- * prior ones.
+ * Examine the list of commands embedded in the CREATE SCHEMA command, and
+ * reorganize them into a sequentially executable order with no forward
+ * references. Note that the result is still a list of raw parsetrees in
+ * need of parse analysis --- we cannot, in general, run analyze.c on one
+ * statement until we have actually executed the prior ones.
*/
parsetree_list = analyzeCreateSchemaStmt(stmt);
namespaceName);
/*
- * Do the deletion. Objects contained in the schema are removed by
- * means of their dependency links to the schema.
+ * Do the deletion. Objects contained in the schema are removed by means
+ * of their dependency links to the schema.
*/
object.classId = NamespaceRelationId;
object.objectId = namespaceId;
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* rename */
namestrcpy(&(((Form_pg_namespace) GETSTRUCT(tup))->nspname), newname);
AclResult aclresult;
/* Otherwise, must be owner of the existing object */
- if (!pg_namespace_ownercheck(HeapTupleGetOid(tup),GetUserId()))
+ if (!pg_namespace_ownercheck(HeapTupleGetOid(tup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE,
name);
/* Must be able to become new owner */
- check_is_member_of_role(GetUserId(),newOwnerId);
+ check_is_member_of_role(GetUserId(), newOwnerId);
/*
* must have create-schema rights
*
- * NOTE: This is different from other alter-owner checks in
- * that the current user is checked for create privileges
- * instead of the destination owner. This is consistent
- * with the CREATE case for schemas. Because superusers
- * will always have this right, we need no special case for them.
+ * NOTE: This is different from other alter-owner checks in that the
+ * current user is checked for create privileges instead of the
+ * destination owner. This is consistent with the CREATE case for
+ * schemas. Because superusers will always have this right, we need
+ * no special case for them.
*/
aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(),
ACL_CREATE);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.124 2005/10/02 23:50:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.125 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Two special hacks here:
*
- * 1. Since VACUUM does not process sequences, we have to force the tuple
- * to have xmin = FrozenTransactionId now. Otherwise it would become
+ * 1. Since VACUUM does not process sequences, we have to force the tuple to
+ * have xmin = FrozenTransactionId now. Otherwise it would become
* invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
*
- * 2. Even though heap_insert emitted a WAL log record, we have to emit
- * an XLOG_SEQ_LOG record too, since (a) the heap_insert record will
- * not have the right xmin, and (b) REDO of the heap_insert record
- * would re-init page and sequence magic number would be lost. This
- * means two log records instead of one :-(
+ * 2. Even though heap_insert emitted a WAL log record, we have to emit an
+ * XLOG_SEQ_LOG record too, since (a) the heap_insert record will not have
+ * the right xmin, and (b) REDO of the heap_insert record would re-init
+ * page and sequence magic number would be lost. This means two log
+ * records instead of one :-(
*/
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
{
/*
- * Note that the "tuple" structure is still just a local tuple
- * record created by heap_formtuple; its t_data pointer doesn't
- * point at the disk buffer. To scribble on the disk buffer we
- * need to fetch the item pointer. But do the same to the local
- * tuple, since that will be the source for the WAL log record,
- * below.
+ * Note that the "tuple" structure is still just a local tuple record
+ * created by heap_formtuple; its t_data pointer doesn't point at the
+ * disk buffer. To scribble on the disk buffer we need to fetch the
+ * item pointer. But do the same to the local tuple, since that will
+ * be the source for the WAL log record, below.
*/
ItemId itemId;
Item item;
/* Clear local cache so that we don't think we have cached numbers */
elm->last = new.last_value; /* last returned number */
- elm->cached = new.last_value; /* last cached number (forget
- * cached values) */
+ elm->cached = new.last_value; /* last cached number (forget cached
+ * values) */
START_CRIT_SECTION();
}
/*
- * Decide whether we should emit a WAL log record. If so, force up
- * the fetch count to grab SEQ_LOG_VALS more values than we actually
- * need to cache. (These will then be usable without logging.)
+ * Decide whether we should emit a WAL log record. If so, force up the
+ * fetch count to grab SEQ_LOG_VALS more values than we actually need to
+ * cache. (These will then be usable without logging.)
*
- * If this is the first nextval after a checkpoint, we must force a new
- * WAL record to be written anyway, else replay starting from the
- * checkpoint would fail to advance the sequence past the logged
- * values. In this case we may as well fetch extra values.
+ * If this is the first nextval after a checkpoint, we must force a new WAL
+ * record to be written anyway, else replay starting from the checkpoint
+ * would fail to advance the sequence past the logged values. In this
+ * case we may as well fetch extra values.
*/
if (log < fetch)
{
while (fetch) /* try to fetch cache [+ log ] numbers */
{
/*
- * Check MAXVALUE for ascending sequences and MINVALUE for
- * descending sequences
+ * Check MAXVALUE for ascending sequences and MINVALUE for descending
+ * sequences
*/
if (incby > 0)
{
snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
- RelationGetRelationName(seqrel), buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
+ RelationGetRelationName(seqrel), buf)));
}
next = minv;
}
snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
- RelationGetRelationName(seqrel), buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
+ RelationGetRelationName(seqrel), buf)));
}
next = maxv;
}
/* save info in local cache */
elm->last = next; /* last returned number */
- elm->cached = next; /* last cached number (forget cached
- * values) */
+ elm->cached = next; /* last cached number (forget cached values) */
START_CRIT_SECTION();
/*
* If we haven't touched the sequence already in this transaction,
- * we need to acquire AccessShareLock. We arrange for the lock to
+ * we need to acquire AccessShareLock. We arrange for the lock to
* be owned by the top transaction, so that we don't need to do it
* more than once per xact.
*/
/*
* Allocate new seqtable entry if we didn't find one.
*
- * NOTE: seqtable entries remain in the list for the life of a backend.
- * If the sequence itself is deleted then the entry becomes wasted
- * memory, but it's small enough that this should not matter.
+ * NOTE: seqtable entries remain in the list for the life of a backend. If
+ * the sequence itself is deleted then the entry becomes wasted memory,
+ * but it's small enough that this should not matter.
*/
if (elm == NULL)
{
/*
- * Time to make a new seqtable entry. These entries live as long
- * as the backend does, so we use plain malloc for them.
+ * Time to make a new seqtable entry. These entries live as long as
+ * the backend does, so we use plain malloc for them.
*/
elm = (SeqTable) malloc(sizeof(SeqTableData));
if (elm == NULL)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->min_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be less than MINVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be less than MINVALUE (%s)",
+ bufs, bufm)));
}
if (new->last_value > new->max_value)
{
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
+ bufs, bufm)));
}
/* CACHE */
buffer = XLogReadBuffer(true, reln, 0);
if (!BufferIsValid(buffer))
elog(PANIC, "seq_redo: can't read block 0 of rel %u/%u/%u",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
page = (Page) BufferGetPage(buffer);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.173 2005/10/03 02:45:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.174 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static bool needs_toast_table(Relation rel);
static void AlterIndexNamespaces(Relation classRel, Relation rel,
- Oid oldNspOid, Oid newNspOid);
+ Oid oldNspOid, Oid newNspOid);
static void AlterSeqNamespaces(Relation classRel, Relation rel,
- Oid oldNspOid, Oid newNspOid,
- const char *newNspName);
+ Oid oldNspOid, Oid newNspOid,
+ const char *newNspName);
static int transformColumnNameList(Oid relId, List *colList,
int16 *attnums, Oid *atttypids);
static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
static void ATPostAlterTypeParse(char *cmd, List **wqueue);
static void ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing);
static void change_owner_recurse_to_sequences(Oid relationOid,
- Oid newOwnerId);
+ Oid newOwnerId);
static void ATExecClusterOn(Relation rel, const char *indexName);
static void ATExecDropCluster(Relation rel);
static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel,
char *tablespacename);
static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace);
static void ATExecEnableDisableTrigger(Relation rel, char *trigname,
- bool enable, bool skip_system);
+ bool enable, bool skip_system);
static void copy_relation_data(Relation rel, SMgrRelation dst);
static void update_ri_trigger_args(Oid relid,
const char *oldname,
AttrNumber attnum;
/*
- * Truncate relname to appropriate length (probably a waste of time,
- * as parser should have done this already).
+ * Truncate relname to appropriate length (probably a waste of time, as
+ * parser should have done this already).
*/
StrNCpy(relname, stmt->relation->relname, NAMEDATALEN);
if (stmt->oncommit != ONCOMMIT_NOOP && !stmt->relation->istemp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("ON COMMIT can only be used on temporary tables")));
+ errmsg("ON COMMIT can only be used on temporary tables")));
/*
- * Look up the namespace in which we are supposed to create the
- * relation. Check we have permission to create there. Skip check if
- * bootstrapping, since permissions machinery may not be working yet.
+ * Look up the namespace in which we are supposed to create the relation.
+ * Check we have permission to create there. Skip check if bootstrapping,
+ * since permissions machinery may not be working yet.
*/
namespaceId = RangeVarGetCreationNamespace(stmt->relation);
}
/*
- * Look up inheritance ancestors and generate relation schema,
- * including inherited attributes.
+ * Look up inheritance ancestors and generate relation schema, including
+ * inherited attributes.
*/
schema = MergeAttributes(schema, stmt->inhRelations,
stmt->relation->istemp,
- &inheritOids, &old_constraints, &parentOidCount);
+ &inheritOids, &old_constraints, &parentOidCount);
/*
- * Create a relation descriptor from the relation schema and create
- * the relation. Note that in this stage only inherited (pre-cooked)
- * defaults and constraints will be included into the new relation.
- * (BuildDescForRelation takes care of the inherited defaults, but we
- * have to copy inherited constraints here.)
+ * Create a relation descriptor from the relation schema and create the
+ * relation. Note that in this stage only inherited (pre-cooked) defaults
+ * and constraints will be included into the new relation.
+ * (BuildDescForRelation takes care of the inherited defaults, but we have
+ * to copy inherited constraints here.)
*/
descriptor = BuildDescForRelation(schema);
Assert(cdef->raw_expr == NULL && cdef->cooked_expr != NULL);
/*
- * In multiple-inheritance situations, it's possible to
- * inherit the same grandparent constraint through multiple
- * parents. Hence, discard inherited constraints that match as
- * to both name and expression. Otherwise, gripe if the names
- * conflict.
+ * In multiple-inheritance situations, it's possible to inherit
+ * the same grandparent constraint through multiple parents.
+ * Hence, discard inherited constraints that match as to both name
+ * and expression. Otherwise, gripe if the names conflict.
*/
for (i = 0; i < ncheck; i++)
{
/*
* Open the new relation and acquire exclusive lock on it. This isn't
- * really necessary for locking out other backends (since they can't
- * see the new rel anyway until we commit), but it keeps the lock
- * manager from complaining about deadlock risks.
+ * really necessary for locking out other backends (since they can't see
+ * the new rel anyway until we commit), but it keeps the lock manager from
+ * complaining about deadlock risks.
*/
rel = relation_open(relationId, AccessExclusiveLock);
/*
- * Now add any newly specified column default values and CHECK
- * constraints to the new relation. These are passed to us in the
- * form of raw parsetrees; we need to transform them to executable
- * expression trees before they can be added. The most convenient way
- * to do that is to apply the parser's transformExpr routine, but
- * transformExpr doesn't work unless we have a pre-existing relation.
- * So, the transformation has to be postponed to this final step of
- * CREATE TABLE.
+ * Now add any newly specified column default values and CHECK constraints
+ * to the new relation. These are passed to us in the form of raw
+ * parsetrees; we need to transform them to executable expression trees
+ * before they can be added. The most convenient way to do that is to
+ * apply the parser's transformExpr routine, but transformExpr doesn't
+ * work unless we have a pre-existing relation. So, the transformation has
+ * to be postponed to this final step of CREATE TABLE.
*
- * Another task that's conveniently done at this step is to add
- * dependency links between columns and supporting relations (such as
- * SERIAL sequences).
+ * Another task that's conveniently done at this step is to add dependency
+ * links between columns and supporting relations (such as SERIAL
+ * sequences).
*
* First, scan schema to find new column defaults.
*/
/*
* ExecuteTruncate
- * Executes a TRUNCATE command.
+ * Executes a TRUNCATE command.
*
* This is a multi-relation truncate. It first opens and grabs exclusive
* locks on all relations involved, checking permissions and otherwise
void
ExecuteTruncate(List *relations)
{
- List *rels = NIL;
- ListCell *cell;
+ List *rels = NIL;
+ ListCell *cell;
foreach(cell, relations)
{
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a table",
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
/* Permissions checks */
if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
- RelationGetRelationName(rel));
+ RelationGetRelationName(rel));
if (!allowSystemTableMods && IsSystemRelation(rel))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied: \"%s\" is a system catalog",
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
/*
* We can never allow truncation of shared or nailed-in-cache
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot truncate system relation \"%s\"",
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
/*
* Don't allow truncate on temp tables of other backends ... their
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temporary tables of other sessions")));
+ errmsg("cannot truncate temporary tables of other sessions")));
/* Save it into the list of rels to truncate */
rels = lappend(rels, rel);
List *constraints = NIL;
int parentsWithOids = 0;
bool have_bogus_defaults = false;
- char *bogus_marker = "Bogus!"; /* marks conflicting
- * defaults */
+ char *bogus_marker = "Bogus!"; /* marks conflicting defaults */
int child_attno;
/*
- * Check for and reject tables with too many columns. We perform
- * this check relatively early for two reasons: (a) we don't run
- * the risk of overflowing an AttrNumber in subsequent code (b) an
- * O(n^2) algorithm is okay if we're processing <= 1600 columns,
- * but could take minutes to execute if the user attempts to
- * create a table with hundreds of thousands of columns.
+ * Check for and reject tables with too many columns. We perform this
+ * check relatively early for two reasons: (a) we don't run the risk of
+ * overflowing an AttrNumber in subsequent code (b) an O(n^2) algorithm is
+ * okay if we're processing <= 1600 columns, but could take minutes to
+ * execute if the user attempts to create a table with hundreds of
+ * thousands of columns.
*
- * Note that we also need to check that any we do not exceed this
- * figure after including columns from inherited relations.
+ * Note that we also need to check that any we do not exceed this figure
+ * after including columns from inherited relations.
*/
if (list_length(schema) > MaxHeapAttributeNumber)
ereport(ERROR,
/*
* Check for duplicate names in the explicit list of attributes.
*
- * Although we might consider merging such entries in the same way that
- * we handle name conflicts for inherited attributes, it seems to make
- * more sense to assume such conflicts are errors.
+ * Although we might consider merging such entries in the same way that we
+ * handle name conflicts for inherited attributes, it seems to make more
+ * sense to assume such conflicts are errors.
*/
foreach(entry, schema)
{
}
/*
- * Scan the parents left-to-right, and merge their attributes to form
- * a list of inherited attributes (inhSchema). Also check to see if
- * we need to inherit an OID column.
+ * Scan the parents left-to-right, and merge their attributes to form a
+ * list of inherited attributes (inhSchema). Also check to see if we need
+ * to inherit an OID column.
*/
child_attno = 0;
foreach(entry, supers)
if (!istemp && isTempNamespace(RelationGetNamespace(relation)))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit from temporary relation \"%s\"",
- parent->relname)));
+ errmsg("cannot inherit from temporary relation \"%s\"",
+ parent->relname)));
/*
* We should have an UNDER permission flag for this, but for now,
constr = tupleDesc->constr;
/*
- * newattno[] will contain the child-table attribute numbers for
- * the attributes of this parent table. (They are not the same
- * for parents after the first one, nor if we have dropped
- * columns.)
+ * newattno[] will contain the child-table attribute numbers for the
+ * attributes of this parent table. (They are not the same for
+ * parents after the first one, nor if we have dropped columns.)
*/
newattno = (AttrNumber *)
palloc(tupleDesc->natts * sizeof(AttrNumber));
{
/*
* change_varattnos_of_a_node asserts that this is greater
- * than zero, so if anything tries to use it, we should
- * find out.
+ * than zero, so if anything tries to use it, we should find
+ * out.
*/
newattno[parent_attno - 1] = 0;
continue;
def->typename->typmod != attribute->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("inherited column \"%s\" has a type conflict",
- attributeName),
+ errmsg("inherited column \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- format_type_be(attribute->atttypid))));
+ format_type_be(attribute->atttypid))));
def->inhcount++;
/* Merge of NOT NULL constraints = OR 'em together */
def->is_not_null |= attribute->attnotnull;
Assert(this_default != NULL);
/*
- * If default expr could contain any vars, we'd need to
- * fix 'em, but it can't; so default is ready to apply to
- * child.
+ * If default expr could contain any vars, we'd need to fix
+ * 'em, but it can't; so default is ready to apply to child.
*
- * If we already had a default from some prior parent, check
- * to see if they are the same. If so, no problem; if
- * not, mark the column as having a bogus default. Below,
- * we will complain if the bogus default isn't overridden
- * by the child schema.
+ * If we already had a default from some prior parent, check to
+ * see if they are the same. If so, no problem; if not, mark
+ * the column as having a bogus default. Below, we will
+ * complain if the bogus default isn't overridden by the child
+ * schema.
*/
Assert(def->raw_default == NULL);
if (def->cooked_default == NULL)
}
/*
- * Now copy the constraints of this parent, adjusting attnos using
- * the completed newattno[] map
+ * Now copy the constraints of this parent, adjusting attnos using the
+ * completed newattno[] map
*/
if (constr && constr->num_check > 0)
{
pfree(newattno);
/*
- * Close the parent rel, but keep our AccessShareLock on it until
- * xact commit. That will prevent someone else from deleting or
- * ALTERing the parent before the child is committed.
+ * Close the parent rel, but keep our AccessShareLock on it until xact
+ * commit. That will prevent someone else from deleting or ALTERing
+ * the parent before the child is committed.
*/
heap_close(relation, NoLock);
}
/*
* If we had no inherited attributes, the result schema is just the
- * explicitly declared columns. Otherwise, we need to merge the
- * declared columns into the inherited schema list.
+ * explicitly declared columns. Otherwise, we need to merge the declared
+ * columns into the inherited schema list.
*/
if (inhSchema != NIL)
{
* have the same type and typmod.
*/
ereport(NOTICE,
- (errmsg("merging column \"%s\" with inherited definition",
- attributeName)));
+ (errmsg("merging column \"%s\" with inherited definition",
+ attributeName)));
def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1);
if (typenameTypeId(def->typename) != typenameTypeId(newdef->typename) ||
def->typename->typmod != newdef->typename->typmod)
attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- TypeNameToString(newdef->typename))));
+ TypeNameToString(newdef->typename))));
/* Mark the column as locally defined */
def->is_local = true;
/* Merge of NOT NULL constraints = OR 'em together */
schema = inhSchema;
/*
- * Check that we haven't exceeded the legal # of columns after
- * merging in inherited columns.
+ * Check that we haven't exceeded the legal # of columns after merging
+ * in inherited columns.
*/
if (list_length(schema) > MaxHeapAttributeNumber)
ereport(ERROR,
}
/*
- * If we found any conflicting parent default values, check to make
- * sure they were overridden by the child.
+ * If we found any conflicting parent default values, check to make sure
+ * they were overridden by the child.
*/
if (have_bogus_defaults)
{
if (def->cooked_default == bogus_marker)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_DEFINITION),
- errmsg("column \"%s\" inherits conflicting default values",
- def->colname),
+ errmsg("column \"%s\" inherits conflicting default values",
+ def->colname),
errhint("To resolve the conflict, specify a default explicitly.")));
}
}
var->varattno > 0)
{
/*
- * ??? the following may be a problem when the node is
- * multiply referenced though stringToNode() doesn't create
- * such a node currently.
+ * ??? the following may be a problem when the node is multiply
+ * referenced though stringToNode() doesn't create such a node
+ * currently.
*/
Assert(newattno[var->varattno - 1] > 0);
var->varattno = newattno[var->varattno - 1];
return;
/*
- * Store INHERITS information in pg_inherits using direct ancestors
- * only. Also enter dependencies on the direct ancestors, and make
- * sure they are marked with relhassubclass = true.
+ * Store INHERITS information in pg_inherits using direct ancestors only.
+ * Also enter dependencies on the direct ancestors, and make sure they are
+ * marked with relhassubclass = true.
*
- * (Once upon a time, both direct and indirect ancestors were found here
- * and then entered into pg_ipl. Since that catalog doesn't exist
- * anymore, there's no need to look for indirect ancestors.)
+ * (Once upon a time, both direct and indirect ancestors were found here and
+ * then entered into pg_ipl. Since that catalog doesn't exist anymore,
+ * there's no need to look for indirect ancestors.)
*/
relation = heap_open(InheritsRelationId, RowExclusiveLock);
desc = RelationGetDescr(relation);
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
*
- * If the tuple already has the right relhassubclass setting, we don't
- * need to update it, but we still need to issue an SI inval message.
+ * If the tuple already has the right relhassubclass setting, we don't need
+ * to update it, but we still need to issue an SI inval message.
*/
relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
ListCell *indexoidscan;
/*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction.
+ * Grab an exclusive lock on the target table, which we will NOT release
+ * until end of transaction.
*/
targetrelation = relation_open(myrelid, AccessExclusiveLock);
/*
- * permissions checking. this would normally be done in utility.c,
- * but this particular routine is recursive.
+ * permissions checking. this would normally be done in utility.c, but
+ * this particular routine is recursive.
*
* normally, only the owner of a class can change its schema.
*/
* attribute in all classes that inherit from 'relname' (as well as in
* 'relname').
*
- * any permissions or problems with duplicate attributes will cause the
- * whole transaction to abort, which is what we want -- all or
- * nothing.
+ * any permissions or problems with duplicate attributes will cause the whole
+ * transaction to abort, which is what we want -- all or nothing.
*/
if (recurse)
{
children = find_all_inheritors(myrelid);
/*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
+ * find_all_inheritors does the recursive search of the inheritance
+ * hierarchy, so all we have to do is process all of the relids in the
+ * list that it returns.
*/
foreach(child, children)
{
else
{
/*
- * If we are told not to recurse, there had better not be any
- * child tables; else the rename would put them out of step.
+ * If we are told not to recurse, there had better not be any child
+ * tables; else the rename would put them out of step.
*/
if (!recursing &&
find_inheritance_children(myrelid) != NIL)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
errmsg("column \"%s\" of relation \"%s\" already exists",
- newattname, RelationGetRelationName(targetrelation))));
+ newattname, RelationGetRelationName(targetrelation))));
namestrcpy(&(attform->attname), newattname);
heap_freetuple(atttup);
/*
- * Update column names of indexes that refer to the column being
- * renamed.
+ * Update column names of indexes that refer to the column being renamed.
*/
indexoidlist = RelationGetIndexList(targetrelation);
bool relhastriggers;
/*
- * Grab an exclusive lock on the target table or index, which we will
- * NOT release until end of transaction.
+ * Grab an exclusive lock on the target table or index, which we will NOT
+ * release until end of transaction.
*/
targetrelation = relation_open(myrelid, AccessExclusiveLock);
relhastriggers = (targetrelation->rd_rel->reltriggers > 0);
/*
- * Find relation's pg_class tuple, and make sure newrelname isn't in
- * use.
+ * Find relation's pg_class tuple, and make sure newrelname isn't in use.
*/
relrelation = heap_open(RelationRelationId, RowExclusiveLock);
newrelname)));
/*
- * Update pg_class tuple with new relname. (Scribbling on reltup is
- * OK because it's a copy...)
+ * Update pg_class tuple with new relname. (Scribbling on reltup is OK
+ * because it's a copy...)
*/
namestrcpy(&(((Form_pg_class) GETSTRUCT(reltup))->relname), newrelname);
/*
* It is an RI trigger, so parse the tgargs bytea.
*
- * NB: we assume the field will never be compressed or moved out of
- * line; so does trigger.c ...
+ * NB: we assume the field will never be compressed or moved out of line;
+ * so does trigger.c ...
*/
tgnargs = pg_trigger->tgnargs;
val = (bytea *)
}
/*
- * Figure out which item(s) to look at. If the trigger is
- * primary-key type and attached to my rel, I should look at the
- * PK fields; if it is foreign-key type and attached to my rel, I
- * should look at the FK fields. But the opposite rule holds when
- * examining triggers found by tgconstrrel search.
+ * Figure out which item(s) to look at. If the trigger is primary-key
+ * type and attached to my rel, I should look at the PK fields; if it
+ * is foreign-key type and attached to my rel, I should look at the FK
+ * fields. But the opposite rule holds when examining triggers found
+ * by tgconstrrel search.
*/
examine_pk = (tg_type == RI_TRIGGER_PK) == (!fk_scan);
heap_close(tgrel, RowExclusiveLock);
/*
- * Increment cmd counter to make updates visible; this is needed in
- * case the same tuple has to be updated again by next pass (can
- * happen in case of a self-referential FK relationship).
+ * Increment cmd counter to make updates visible; this is needed in case
+ * the same tuple has to be updated again by next pass (can happen in case
+ * of a self-referential FK relationship).
*/
CommandCounterIncrement();
}
/*
* Copy the original subcommand for each table. This avoids conflicts
* when different child tables need to make different parse
- * transformations (for example, the same column may have different
- * column numbers in different children).
+ * transformations (for example, the same column may have different column
+ * numbers in different children).
*/
cmd = copyObject(cmd);
/*
- * Do permissions checking, recursion to child tables if needed, and
- * any additional phase-1 processing needed.
+ * Do permissions checking, recursion to child tables if needed, and any
+ * additional phase-1 processing needed.
*/
switch (cmd->subtype)
{
case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */
/*
- * We allow defaults on views so that INSERT into a view can
- * have default-ish behavior. This works because the rewriter
+ * We allow defaults on views so that INSERT into a view can have
+ * default-ish behavior. This works because the rewriter
* substitutes default values into INSERTs before it expands
* rules.
*/
/*
* Currently we recurse only for CHECK constraints, never for
- * foreign-key constraints. UNIQUE/PKEY constraints won't be
- * seen here.
+ * foreign-key constraints. UNIQUE/PKEY constraints won't be seen
+ * here.
*/
if (IsA(cmd->def, Constraint))
ATSimpleRecursion(wqueue, rel, cmd, recurse);
ListCell *ltab;
/*
- * We process all the tables "in parallel", one pass at a time. This
- * is needed because we may have to propagate work from one table to
- * another (specifically, ALTER TYPE on a foreign key's PK has to
- * dispatch the re-adding of the foreign key constraint to the other
- * table). Work can only be propagated into later passes, however.
+ * We process all the tables "in parallel", one pass at a time. This is
+ * needed because we may have to propagate work from one table to another
+ * (specifically, ALTER TYPE on a foreign key's PK has to dispatch the
+ * re-adding of the foreign key constraint to the other table). Work can
+ * only be propagated into later passes, however.
*/
for (pass = 0; pass < AT_NUM_PASSES; pass++)
{
continue;
/*
- * Exclusive lock was obtained by phase 1, needn't get it
- * again
+ * Exclusive lock was obtained by phase 1, needn't get it again
*/
rel = relation_open(tab->relid, NoLock);
ATExecCmd(tab, rel, (AlterTableCmd *) lfirst(lcmd));
/*
- * After the ALTER TYPE pass, do cleanup work (this is not
- * done in ATExecAlterColumnType since it should be done only
- * once if multiple columns of a table are altered).
+ * After the ALTER TYPE pass, do cleanup work (this is not done in
+ * ATExecAlterColumnType since it should be done only once if
+ * multiple columns of a table are altered).
*/
if (pass == AT_PASS_ALTER_TYPE)
ATPostAlterTypeCleanup(wqueue, tab);
}
/*
- * Do an implicit CREATE TOAST TABLE if we executed any subcommands
- * that might have added a column or changed column storage.
+ * Do an implicit CREATE TOAST TABLE if we executed any subcommands that
+ * might have added a column or changed column storage.
*/
foreach(ltab, *wqueue)
{
case AT_EnableTrigUser: /* ENABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL, true, true);
break;
- case AT_DisableTrigUser: /* DISABLE TRIGGER USER */
+ case AT_DisableTrigUser: /* DISABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL, false, true);
break;
default: /* oops */
}
/*
- * Bump the command counter to ensure the next subcommand in the
- * sequence can see the changes so far
+ * Bump the command counter to ensure the next subcommand in the sequence
+ * can see the changes so far
*/
CommandCounterIncrement();
}
AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab);
/*
- * We only need to rewrite the table if at least one column needs
- * to be recomputed.
+ * We only need to rewrite the table if at least one column needs to
+ * be recomputed.
*/
if (tab->newvals != NIL)
{
/*
* We can never allow rewriting of shared or nailed-in-cache
- * relations, because we can't support changing their
- * relfilenode values.
+ * relations, because we can't support changing their relfilenode
+ * values.
*/
if (OldHeap->rd_rel->relisshared || OldHeap->rd_isnailed)
ereport(ERROR,
RelationGetRelationName(OldHeap))));
/*
- * Don't allow rewrite on temp tables of other backends ...
- * their local buffer manager is not going to cope.
+ * Don't allow rewrite on temp tables of other backends ... their
+ * local buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot rewrite temporary tables of other sessions")));
+ errmsg("cannot rewrite temporary tables of other sessions")));
/*
* Select destination tablespace (same as original unless user
/*
* Create the new heap, using a temporary name in the same
- * namespace as the existing table. NOTE: there is some risk
- * of collision with user relnames. Working around this seems
- * more trouble than it's worth; in particular, we can't
- * create the new heap in a different namespace from the old,
- * or we will have problems with the TEMP status of temp
- * tables.
+ * namespace as the existing table. NOTE: there is some risk of
+ * collision with user relnames. Working around this seems more
+ * trouble than it's worth; in particular, we can't create the new
+ * heap in a different namespace from the old, or we will have
+ * problems with the TEMP status of temp tables.
*/
snprintf(NewHeapName, sizeof(NewHeapName),
"pg_temp_%u", tab->relid);
/* performDeletion does CommandCounterIncrement at end */
/*
- * Rebuild each index on the relation (but not the toast
- * table, which is all-new anyway). We do not need
+ * Rebuild each index on the relation (but not the toast table,
+ * which is all-new anyway). We do not need
* CommandCounterIncrement() because reindex_relation does it.
*/
reindex_relation(tab->relid, false);
else
{
/*
- * Test the current data within the table against new
- * constraints generated by ALTER TABLE commands, but don't
- * rebuild data.
+ * Test the current data within the table against new constraints
+ * generated by ALTER TABLE commands, but don't rebuild data.
*/
if (tab->constraints != NIL)
ATRewriteTable(tab, InvalidOid);
/*
- * If we had SET TABLESPACE but no reason to reconstruct
- * tuples, just do a block-by-block copy.
+ * If we had SET TABLESPACE but no reason to reconstruct tuples,
+ * just do a block-by-block copy.
*/
if (tab->newTableSpace)
ATExecSetTableSpace(tab->relid, tab->newTableSpace);
/*
* Foreign key constraints are checked in a final pass, since (a) it's
- * generally best to examine each one separately, and (b) it's at
- * least theoretically possible that we have changed both relations of
- * the foreign key, and we'd better have finished both rewrites before
- * we try to read the tables.
+ * generally best to examine each one separately, and (b) it's at least
+ * theoretically possible that we have changed both relations of the
+ * foreign key, and we'd better have finished both rewrites before we try
+ * to read the tables.
*/
foreach(ltab, *wqueue)
{
newrel = NULL;
/*
- * If we need to rewrite the table, the operation has to be propagated
- * to tables that use this table's rowtype as a column type.
+ * If we need to rewrite the table, the operation has to be propagated to
+ * tables that use this table's rowtype as a column type.
*
- * (Eventually this will probably become true for scans as well, but at
- * the moment a composite type does not enforce any constraints, so
- * it's not necessary/appropriate to enforce them just during ALTER.)
+ * (Eventually this will probably become true for scans as well, but at the
+ * moment a composite type does not enforce any constraints, so it's not
+ * necessary/appropriate to enforce them just during ALTER.)
*/
if (newrel)
find_composite_type_dependencies(oldrel->rd_rel->reltype,
HeapScanDesc scan;
HeapTuple tuple;
MemoryContext oldCxt;
- List *dropped_attrs = NIL;
- ListCell *lc;
+ List *dropped_attrs = NIL;
+ ListCell *lc;
econtext = GetPerTupleExprContext(estate);
/*
- * Make tuple slots for old and new tuples. Note that even when
- * the tuples are the same, the tupDescs might not be (consider
- * ADD COLUMN without a default).
+ * Make tuple slots for old and new tuples. Note that even when the
+ * tuples are the same, the tupDescs might not be (consider ADD COLUMN
+ * without a default).
*/
oldslot = MakeSingleTupleTableSlot(oldTupDesc);
newslot = MakeSingleTupleTableSlot(newTupDesc);
/*
* Any attributes that are dropped according to the new tuple
- * descriptor can be set to NULL. We precompute the list of
- * dropped attributes to avoid needing to do so in the
- * per-tuple loop.
+ * descriptor can be set to NULL. We precompute the list of dropped
+ * attributes to avoid needing to do so in the per-tuple loop.
*/
for (i = 0; i < newTupDesc->natts; i++)
{
scan = heap_beginscan(oldrel, SnapshotNow, 0, NULL);
/*
- * Switch to per-tuple memory context and reset it for each
- * tuple produced, so we don't leak memory.
+ * Switch to per-tuple memory context and reset it for each tuple
+ * produced, so we don't leak memory.
*/
oldCxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
{
if (newrel)
{
- Oid tupOid = InvalidOid;
+ Oid tupOid = InvalidOid;
/* Extract data from old tuple */
heap_deform_tuple(tuple, oldTupDesc, values, isnull);
tupOid = HeapTupleGetOid(tuple);
/* Set dropped attributes to null in new tuple */
- foreach (lc, dropped_attrs)
+ foreach(lc, dropped_attrs)
isnull[lfirst_int(lc)] = true;
/*
- * Process supplied expressions to replace selected
- * columns. Expression inputs come from the old tuple.
+ * Process supplied expressions to replace selected columns.
+ * Expression inputs come from the old tuple.
*/
ExecStoreTuple(tuple, oldslot, InvalidBuffer, false);
econtext->ecxt_scantuple = oldslot;
values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate,
econtext,
- &isnull[ex->attnum - 1],
+ &isnull[ex->attnum - 1],
NULL);
}
/*
- * Form the new tuple. Note that we don't explicitly
- * pfree it, since the per-tuple memory context will
- * be reset shortly.
+ * Form the new tuple. Note that we don't explicitly pfree it,
+ * since the per-tuple memory context will be reset shortly.
*/
tuple = heap_form_tuple(newTupDesc, values, isnull);
&isnull);
if (isnull)
ereport(ERROR,
- (errcode(ERRCODE_NOT_NULL_VIOLATION),
- errmsg("column \"%s\" contains null values",
- get_attname(tab->relid,
- con->attnum))));
+ (errcode(ERRCODE_NOT_NULL_VIOLATION),
+ errmsg("column \"%s\" contains null values",
+ get_attname(tab->relid,
+ con->attnum))));
}
break;
case CONSTR_FOREIGN:
children = find_all_inheritors(relid);
/*
- * find_all_inheritors does the recursive search of the
- * inheritance hierarchy, so all we have to do is process all of
- * the relids in the list that it returns.
+ * find_all_inheritors does the recursive search of the inheritance
+ * hierarchy, so all we have to do is process all of the relids in the
+ * list that it returns.
*/
foreach(child, children)
{
HeapTuple depTup;
/*
- * We scan pg_depend to find those things that depend on the rowtype.
- * (We assume we can ignore refobjsubid for a rowtype.)
+ * We scan pg_depend to find those things that depend on the rowtype. (We
+ * assume we can ignore refobjsubid for a rowtype.)
*/
depRel = heap_open(DependRelationId, AccessShareLock);
else if (OidIsValid(rel->rd_rel->reltype))
{
/*
- * A view or composite type itself isn't a problem, but we
- * must recursively check for indirect dependencies via its
- * rowtype.
+ * A view or composite type itself isn't a problem, but we must
+ * recursively check for indirect dependencies via its rowtype.
*/
find_composite_type_dependencies(rel->rd_rel->reltype,
origTblName);
/*
* Recurse to add the column to child classes, if requested.
*
- * We must recurse one level at a time, so that multiply-inheriting
- * children are visited the right number of times and end up with the
- * right attinhcount.
+ * We must recurse one level at a time, so that multiply-inheriting children
+ * are visited the right number of times and end up with the right
+ * attinhcount.
*/
if (recurse)
{
else
{
/*
- * If we are told not to recurse, there had better not be any
- * child tables; else the addition would put them out of step.
+ * If we are told not to recurse, there had better not be any child
+ * tables; else the addition would put them out of step.
*/
if (find_inheritance_children(RelationGetRelid(rel)) != NIL)
ereport(ERROR,
attrdesc = heap_open(AttributeRelationId, RowExclusiveLock);
/*
- * Are we adding the column to a recursion child? If so, check
- * whether to merge with an existing definition for the column.
+ * Are we adding the column to a recursion child? If so, check whether to
+ * merge with an existing definition for the column.
*/
if (colDef->inhcount > 0)
{
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- RelationGetRelationName(rel), colDef->colname)));
+ RelationGetRelationName(rel), colDef->colname)));
/* Bump the existing child att's inhcount */
childatt->attinhcount++;
/* Inform the user about the merge */
ereport(NOTICE,
- (errmsg("merging definition of column \"%s\" for child \"%s\"",
- colDef->colname, RelationGetRelationName(rel))));
+ (errmsg("merging definition of column \"%s\" for child \"%s\"",
+ colDef->colname, RelationGetRelationName(rel))));
heap_close(attrdesc, RowExclusiveLock);
return;
elog(ERROR, "cache lookup failed for relation %u", myrelid);
/*
- * this test is deliberately not attisdropped-aware, since if one
- * tries to add a column matching a dropped column name, it's gonna
- * fail anyway.
+ * this test is deliberately not attisdropped-aware, since if one tries to
+ * add a column matching a dropped column name, it's gonna fail anyway.
*/
if (SearchSysCacheExists(ATTNAME,
ObjectIdGetDatum(myrelid),
/*
* Tell Phase 3 to fill in the default expression, if there is one.
*
- * If there is no default, Phase 3 doesn't have to do anything, because
- * that effectively means that the default is NULL. The heap tuple
- * access routines always check for attnum > # of attributes in tuple,
- * and return NULL if so, so without any modification of the tuple
- * data we will get the effect of NULL values in the new column.
+ * If there is no default, Phase 3 doesn't have to do anything, because that
+ * effectively means that the default is NULL. The heap tuple access
+ * routines always check for attnum > # of attributes in tuple, and return
+ * NULL if so, so without any modification of the tuple data we will get
+ * the effect of NULL values in the new column.
*
- * An exception occurs when the new column is of a domain type: the
- * domain might have a NOT NULL constraint, or a check constraint that
- * indirectly rejects nulls. If there are any domain constraints then
- * we construct an explicit NULL default value that will be passed through
- * CoerceToDomain processing. (This is a tad inefficient, since it
- * causes rewriting the table which we really don't have to do, but
- * the present design of domain processing doesn't offer any simple way
- * of checking the constraints more directly.)
+ * An exception occurs when the new column is of a domain type: the domain
+ * might have a NOT NULL constraint, or a check constraint that indirectly
+ * rejects nulls. If there are any domain constraints then we construct
+ * an explicit NULL default value that will be passed through
+ * CoerceToDomain processing. (This is a tad inefficient, since it causes
+ * rewriting the table which we really don't have to do, but the present
+ * design of domain processing doesn't offer any simple way of checking
+ * the constraints more directly.)
*
* Note: we use build_column_default, and not just the cooked default
- * returned by AddRelationRawConstraints, so that the right thing
- * happens when a datatype's default applies.
+ * returned by AddRelationRawConstraints, so that the right thing happens
+ * when a datatype's default applies.
*/
defval = (Expr *) build_column_default(rel, attribute->attnum);
if (!defval && GetDomainConstraints(typeOid) != NIL)
{
- Oid basetype = getBaseType(typeOid);
+ Oid basetype = getBaseType(typeOid);
defval = (Expr *) makeNullConst(basetype);
defval = (Expr *) coerce_to_target_type(NULL,
{
/*
* We do our own permission checking because (a) we want to allow SET
- * STATISTICS on indexes (for expressional index columns), and (b) we
- * want to allow SET STATISTICS on system catalogs without requiring
+ * STATISTICS on indexes (for expressional index columns), and (b) we want
+ * to allow SET STATISTICS on system catalogs without requiring
* allowSystemTableMods to be turned on.
*/
if (rel->rd_rel->relkind != RELKIND_RELATION &&
colName)));
/*
- * safety check: do not allow toasted storage modes unless column
- * datatype is TOAST-aware.
+ * safety check: do not allow toasted storage modes unless column datatype
+ * is TOAST-aware.
*/
if (newstorage == 'p' || TypeIsToastable(attrtuple->atttypid))
attrtuple->attstorage = newstorage;
/*
* Propagate to children as appropriate. Unlike most other ALTER
- * routines, we have to do this one level of recursion at a time; we
- * can't use find_all_inheritors to do it in one pass.
+ * routines, we have to do this one level of recursion at a time; we can't
+ * use find_all_inheritors to do it in one pass.
*/
children = find_inheritance_children(RelationGetRelid(rel));
{
/*
* If the child column has other definition sources, just
- * decrement its inheritance count; if not, recurse to
- * delete it.
+ * decrement its inheritance count; if not, recurse to delete
+ * it.
*/
if (childatt->attinhcount == 1 && !childatt->attislocal)
{
else
{
/*
- * If we were told to drop ONLY in this table (no
- * recursion), we need to mark the inheritors' attribute
- * as locally defined rather than inherited.
+ * If we were told to drop ONLY in this table (no recursion),
+ * we need to mark the inheritors' attribute as locally
+ * defined rather than inherited.
*/
childatt->attinhcount--;
childatt->attislocal = true;
class_rel = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(rel)),
+ ObjectIdGetDatum(RelationGetRelid(rel)),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u",
/*
* Currently, we only expect to see CONSTR_CHECK nodes
* arriving here (see the preprocessing done in
- * parser/analyze.c). Use a switch anyway to make it
- * easier to add more code later.
+ * parser/analyze.c). Use a switch anyway to make it easier
+ * to add more code later.
*/
switch (constr->contype)
{
ListCell *lcon;
/*
- * Call AddRelationRawConstraints to do the
- * work. It returns a list of cooked
- * constraints.
+ * Call AddRelationRawConstraints to do the work.
+ * It returns a list of cooked constraints.
*/
newcons = AddRelationRawConstraints(rel, NIL,
- list_make1(constr));
+ list_make1(constr));
/* Add each constraint to Phase 3's queue */
foreach(lcon, newcons)
{
else
fkconstraint->constr_name =
ChooseConstraintName(RelationGetRelationName(rel),
- strVal(linitial(fkconstraint->fk_attrs)),
+ strVal(linitial(fkconstraint->fk_attrs)),
"fkey",
RelationGetNamespace(rel),
NIL);
Oid constrOid;
/*
- * Grab an exclusive lock on the pk table, so that someone doesn't
- * delete rows out from under us. (Although a lesser lock would do for
- * that purpose, we'll need exclusive lock anyway to add triggers to
- * the pk table; trying to start with a lesser lock will just create a
- * risk of deadlock.)
+ * Grab an exclusive lock on the pk table, so that someone doesn't delete
+ * rows out from under us. (Although a lesser lock would do for that
+ * purpose, we'll need exclusive lock anyway to add triggers to the pk
+ * table; trying to start with a lesser lock will just create a risk of
+ * deadlock.)
*/
pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock);
/*
* Validity and permissions checks
*
- * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER,
- * but we may as well error out sooner instead of later.
+ * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER, but
+ * we may as well error out sooner instead of later.
*/
if (pkrel->rd_rel->relkind != RELKIND_RELATION)
ereport(ERROR,
RelationGetRelationName(rel));
/*
- * Disallow reference from permanent table to temp table or vice
- * versa. (The ban on perm->temp is for fairly obvious reasons. The
- * ban on temp->perm is because other backends might need to run the
- * RI triggers on the perm table, but they can't reliably see tuples
- * the owning backend has created in the temp table, because
- * non-shared buffers are used for temp tables.)
+ * Disallow reference from permanent table to temp table or vice versa.
+ * (The ban on perm->temp is for fairly obvious reasons. The ban on
+ * temp->perm is because other backends might need to run the RI triggers
+ * on the perm table, but they can't reliably see tuples the owning
+ * backend has created in the temp table, because non-shared buffers are
+ * used for temp tables.)
*/
if (isTempNamespace(RelationGetNamespace(pkrel)))
{
}
/*
- * Look up the referencing attributes to make sure they exist, and
- * record their attnums and type OIDs.
+ * Look up the referencing attributes to make sure they exist, and record
+ * their attnums and type OIDs.
*/
MemSet(pkattnum, 0, sizeof(pkattnum));
MemSet(fkattnum, 0, sizeof(fkattnum));
fkattnum, fktypoid);
/*
- * If the attribute list for the referenced table was omitted, lookup
- * the definition of the primary key and use it. Otherwise, validate
- * the supplied attribute list. In either case, discover the index
- * OID and index opclasses, and the attnums and type OIDs of the
- * attributes.
+ * If the attribute list for the referenced table was omitted, lookup the
+ * definition of the primary key and use it. Otherwise, validate the
+ * supplied attribute list. In either case, discover the index OID and
+ * index opclasses, and the attnums and type OIDs of the attributes.
*/
if (fkconstraint->pk_attrs == NIL)
{
for (i = 0; i < numpks; i++)
{
/*
- * pktypoid[i] is the primary key table's i'th key's type
- * fktypoid[i] is the foreign key table's i'th key's type
+ * pktypoid[i] is the primary key table's i'th key's type fktypoid[i]
+ * is the foreign key table's i'th key's type
*
- * Note that we look for an operator with the PK type on the left;
- * when the types are different this is critical because the PK
- * index will need operators with the indexkey on the left.
- * (Ordinarily both commutator operators will exist if either
- * does, but we won't get the right answer from the test below on
- * opclass membership unless we select the proper operator.)
+ * Note that we look for an operator with the PK type on the left; when
+ * the types are different this is critical because the PK index will
+ * need operators with the indexkey on the left. (Ordinarily both
+ * commutator operators will exist if either does, but we won't get
+ * the right answer from the test below on opclass membership unless
+ * we select the proper operator.)
*/
Operator o = oper(list_make1(makeString("=")),
pktypoid[i], fktypoid[i], true);
fkconstraint->constr_name),
errdetail("Key columns \"%s\" and \"%s\" "
"are of incompatible types: %s and %s.",
- strVal(list_nth(fkconstraint->fk_attrs, i)),
- strVal(list_nth(fkconstraint->pk_attrs, i)),
+ strVal(list_nth(fkconstraint->fk_attrs, i)),
+ strVal(list_nth(fkconstraint->pk_attrs, i)),
format_type_be(fktypoid[i]),
format_type_be(pktypoid[i]))));
/*
- * Check that the found operator is compatible with the PK index,
- * and generate a warning if not, since otherwise costly seqscans
- * will be incurred to check FK validity.
+ * Check that the found operator is compatible with the PK index, and
+ * generate a warning if not, since otherwise costly seqscans will be
+ * incurred to check FK validity.
*/
if (!op_in_opclass(oprid(o), opclasses[i]))
ereport(WARNING,
fkconstraint->constr_name),
errdetail("Key columns \"%s\" and \"%s\" "
"are of different types: %s and %s.",
- strVal(list_nth(fkconstraint->fk_attrs, i)),
- strVal(list_nth(fkconstraint->pk_attrs, i)),
+ strVal(list_nth(fkconstraint->fk_attrs, i)),
+ strVal(list_nth(fkconstraint->pk_attrs, i)),
format_type_be(fktypoid[i]),
format_type_be(pktypoid[i]))));
}
/*
- * Tell Phase 3 to check that the constraint is satisfied by existing
- * rows (we can skip this during table creation).
+ * Tell Phase 3 to check that the constraint is satisfied by existing rows
+ * (we can skip this during table creation).
*/
if (!fkconstraint->skip_validation)
{
if (attnum >= INDEX_MAX_KEYS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
- errmsg("cannot have more than %d keys in a foreign key",
- INDEX_MAX_KEYS)));
+ errmsg("cannot have more than %d keys in a foreign key",
+ INDEX_MAX_KEYS)));
attnums[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->attnum;
atttypids[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->atttypid;
ReleaseSysCache(atttuple);
int i;
/*
- * Get the list of index OIDs for the table from the relcache, and
- * look up each one in the pg_index syscache until we find one marked
- * primary key (hopefully there isn't more than one such).
+ * Get the list of index OIDs for the table from the relcache, and look up
+ * each one in the pg_index syscache until we find one marked primary key
+ * (hopefully there isn't more than one such).
*/
*indexOid = InvalidOid;
if (!OidIsValid(*indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("there is no primary key for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ errmsg("there is no primary key for referenced table \"%s\"",
+ RelationGetRelationName(pkrel))));
/* Must get indclass the hard way */
indclassDatum = SysCacheGetAttr(INDEXRELID, indexTuple,
atttypids[i] = attnumTypeId(pkrel, pkattno);
opclasses[i] = indclass->values[i];
*attnamelist = lappend(*attnamelist,
- makeString(pstrdup(NameStr(*attnumAttName(pkrel, pkattno)))));
+ makeString(pstrdup(NameStr(*attnumAttName(pkrel, pkattno)))));
}
ReleaseSysCache(indexTuple);
ListCell *indexoidscan;
/*
- * Get the list of index OIDs for the table from the relcache, and
- * look up each one in the pg_index syscache, and match unique indexes
- * to the list of attnums we are given.
+ * Get the list of index OIDs for the table from the relcache, and look up
+ * each one in the pg_index syscache, and match unique indexes to the list
+ * of attnums we are given.
*/
indexoidlist = RelationGetIndexList(pkrel);
indclass = (oidvector *) DatumGetPointer(indclassDatum);
/*
- * The given attnum list may match the index columns in any
- * order. Check that each list is a subset of the other.
+ * The given attnum list may match the index columns in any order.
+ * Check that each list is a subset of the other.
*/
for (i = 0; i < numattrs; i++)
{
return;
/*
- * Scan through each tuple, calling RI_FKey_check_ins (insert trigger)
- * as if that tuple had just been inserted. If any of those fail, it
- * should ereport(ERROR) and that's that.
+ * Scan through each tuple, calling RI_FKey_check_ins (insert trigger) as
+ * if that tuple had just been inserted. If any of those fail, it should
+ * ereport(ERROR) and that's that.
*/
MemSet(&trig, 0, sizeof(trig));
trig.tgoid = InvalidOid;
trig.tginitdeferred = FALSE;
trig.tgargs = (char **) palloc(sizeof(char *) *
- (4 + list_length(fkconstraint->fk_attrs)
- + list_length(fkconstraint->pk_attrs)));
+ (4 + list_length(fkconstraint->fk_attrs)
+ + list_length(fkconstraint->pk_attrs)));
trig.tgargs[0] = trig.tgname;
trig.tgargs[1] = RelationGetRelationName(rel);
fk_trigger->args = lappend(fk_trigger->args,
makeString(myRel->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable->relname));
+ makeString(fkconstraint->pktable->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
+ makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
if (list_length(fkconstraint->fk_attrs) != list_length(fkconstraint->pk_attrs))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FOREIGN_KEY),
constrobj;
/*
- * Reconstruct a RangeVar for my relation (not passed in,
- * unfortunately).
+ * Reconstruct a RangeVar for my relation (not passed in, unfortunately).
*/
myRel = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)),
pstrdup(RelationGetRelationName(rel)));
CommandCounterIncrement();
/*
- * Build and execute a CREATE CONSTRAINT TRIGGER statement for the
- * CHECK action for both INSERTs and UPDATEs on the referencing table.
+ * Build and execute a CREATE CONSTRAINT TRIGGER statement for the CHECK
+ * action for both INSERTs and UPDATEs on the referencing table.
*/
CreateFKCheckTrigger(myRel, fkconstraint, &constrobj, &trigobj, true);
CreateFKCheckTrigger(myRel, fkconstraint, &constrobj, &trigobj, false);
fk_trigger->args = lappend(fk_trigger->args,
makeString(myRel->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable->relname));
+ makeString(fkconstraint->pktable->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
+ makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
forboth(fk_attr, fkconstraint->fk_attrs,
pk_attr, fkconstraint->pk_attrs)
{
fk_trigger->args = lappend(fk_trigger->args,
makeString(myRel->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable->relname));
+ makeString(fkconstraint->pktable->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
+ makeString(fkMatchTypeToString(fkconstraint->fk_matchtype)));
forboth(fk_attr, fkconstraint->fk_attrs,
pk_attr, fkconstraint->pk_attrs)
{
/* Otherwise if more than one constraint deleted, notify */
else if (deleted > 1)
ereport(NOTICE,
- (errmsg("multiple constraints named \"%s\" were dropped",
- constrName)));
+ (errmsg("multiple constraints named \"%s\" were dropped",
+ constrName)));
}
}
CheckAttributeType(colName, targettype);
/*
- * Set up an expression to transform the old data value to the new
- * type. If a USING option was given, transform and use that
- * expression, else just take the old value and try to coerce it. We
- * do this first so that type incompatibility can be detected before
- * we waste effort, and because we need the expression to be parsed
- * against the original table rowtype.
+ * Set up an expression to transform the old data value to the new type.
+ * If a USING option was given, transform and use that expression, else
+ * just take the old value and try to coerce it. We do this first so that
+ * type incompatibility can be detected before we waste effort, and
+ * because we need the expression to be parsed against the original table
+ * rowtype.
*/
if (cmd->transform)
{
if (expression_returns_set(transform))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("transform expression must not return a set")));
+ errmsg("transform expression must not return a set")));
/* No subplans or aggregates, either... */
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in transform expression")));
+ errmsg("cannot use subquery in transform expression")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in transform expression")));
+ errmsg("cannot use aggregate function in transform expression")));
}
else
{
ReleaseSysCache(tuple);
/*
- * The recursion case is handled by ATSimpleRecursion. However, if we
- * are told not to recurse, there had better not be any child tables;
- * else the alter would put them out of step.
+ * The recursion case is handled by ATSimpleRecursion. However, if we are
+ * told not to recurse, there had better not be any child tables; else the
+ * alter would put them out of step.
*/
if (recurse)
ATSimpleRecursion(wqueue, rel, cmd, recurse);
targettype = HeapTupleGetOid(typeTuple);
/*
- * If there is a default expression for the column, get it and ensure
- * we can coerce it to the new datatype. (We must do this before
- * changing the column type, because build_column_default itself will
- * try to coerce, and will not issue the error message we want if it
- * fails.)
+ * If there is a default expression for the column, get it and ensure we
+ * can coerce it to the new datatype. (We must do this before changing
+ * the column type, because build_column_default itself will try to
+ * coerce, and will not issue the error message we want if it fails.)
*
- * We remove any implicit coercion steps at the top level of the old
- * default expression; this has been agreed to satisfy the principle
- * of least surprise. (The conversion to the new column type should
- * act like it started from what the user sees as the stored expression,
- * and the implicit coercions aren't going to be shown.)
+ * We remove any implicit coercion steps at the top level of the old default
+ * expression; this has been agreed to satisfy the principle of least
+ * surprise. (The conversion to the new column type should act like it
+ * started from what the user sees as the stored expression, and the
+ * implicit coercions aren't going to be shown.)
*/
if (attTup->atthasdef)
{
Assert(defaultexpr);
defaultexpr = strip_implicit_coercions(defaultexpr);
defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */
- defaultexpr, exprType(defaultexpr),
+ defaultexpr, exprType(defaultexpr),
targettype, typename->typmod,
COERCION_ASSIGNMENT,
COERCE_IMPLICIT_CAST);
if (defaultexpr == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("default for column \"%s\" cannot be cast to type \"%s\"",
- colName, TypeNameToString(typename))));
+ errmsg("default for column \"%s\" cannot be cast to type \"%s\"",
+ colName, TypeNameToString(typename))));
}
else
defaultexpr = NULL;
/*
- * Find everything that depends on the column (constraints, indexes,
- * etc), and record enough information to let us recreate the objects.
+ * Find everything that depends on the column (constraints, indexes, etc),
+ * and record enough information to let us recreate the objects.
*
* The actual recreation does not happen here, but only after we have
- * performed all the individual ALTER TYPE operations. We have to
- * save the info before executing ALTER TYPE, though, else the
- * deparser will get confused.
+ * performed all the individual ALTER TYPE operations. We have to save
+ * the info before executing ALTER TYPE, though, else the deparser will
+ * get confused.
*
- * There could be multiple entries for the same object, so we must check
- * to ensure we process each one only once. Note: we assume that an
- * index that implements a constraint will not show a direct
- * dependency on the column.
+ * There could be multiple entries for the same object, so we must check to
+ * ensure we process each one only once. Note: we assume that an index
+ * that implements a constraint will not show a direct dependency on the
+ * column.
*/
depRel = heap_open(DependRelationId, RowExclusiveLock);
if (!list_member_oid(tab->changedIndexOids, foundObject.objectId))
{
tab->changedIndexOids = lappend_oid(tab->changedIndexOids,
- foundObject.objectId);
+ foundObject.objectId);
tab->changedIndexDefs = lappend(tab->changedIndexDefs,
- pg_get_indexdef_string(foundObject.objectId));
+ pg_get_indexdef_string(foundObject.objectId));
}
}
else if (relKind == RELKIND_SEQUENCE)
{
/*
- * This must be a SERIAL column's sequence. We
- * need not do anything to it.
+ * This must be a SERIAL column's sequence. We need
+ * not do anything to it.
*/
Assert(foundObject.objectSubId == 0);
}
if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId))
{
tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids,
- foundObject.objectId);
+ foundObject.objectId);
tab->changedConstraintDefs = lappend(tab->changedConstraintDefs,
- pg_get_constraintdef_string(foundObject.objectId));
+ pg_get_constraintdef_string(foundObject.objectId));
}
break;
case OCLASS_DEFAULT:
/*
- * Ignore the column's default expression, since we will
- * fix it below.
+ * Ignore the column's default expression, since we will fix
+ * it below.
*/
Assert(defaultexpr);
break;
case OCLASS_SCHEMA:
/*
- * We don't expect any of these sorts of objects to depend
- * on a column.
+ * We don't expect any of these sorts of objects to depend on
+ * a column.
*/
elog(ERROR, "unexpected object depending on column: %s",
getObjectDescription(&foundObject));
/*
* Now scan for dependencies of this column on other things. The only
- * thing we should find is the dependency on the column datatype,
- * which we want to remove.
+ * thing we should find is the dependency on the column datatype, which we
+ * want to remove.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
add_column_datatype_dependency(RelationGetRelid(rel), attnum, targettype);
/*
- * Drop any pg_statistic entry for the column, since it's now wrong
- * type
+ * Drop any pg_statistic entry for the column, since it's now wrong type
*/
RemoveStatistics(RelationGetRelid(rel), attnum);
/*
- * Update the default, if present, by brute force --- remove and
- * re-add the default. Probably unsafe to take shortcuts, since the
- * new version may well have additional dependencies. (It's okay to
- * do this now, rather than after other ALTER TYPE commands, since the
- * default won't depend on other column types.)
+ * Update the default, if present, by brute force --- remove and re-add
+ * the default. Probably unsafe to take shortcuts, since the new version
+ * may well have additional dependencies. (It's okay to do this now,
+ * rather than after other ALTER TYPE commands, since the default won't
+ * depend on other column types.)
*/
if (defaultexpr)
{
CommandCounterIncrement();
/*
- * We use RESTRICT here for safety, but at present we do not
- * expect anything to depend on the default.
+ * We use RESTRICT here for safety, but at present we do not expect
+ * anything to depend on the default.
*/
RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true);
ListCell *l;
/*
- * Re-parse the index and constraint definitions, and attach them to
- * the appropriate work queue entries. We do this before dropping
- * because in the case of a FOREIGN KEY constraint, we might not yet
- * have exclusive lock on the table the constraint is attached to, and
- * we need to get that before dropping. It's safe because the parser
- * won't actually look at the catalogs to detect the existing entry.
+ * Re-parse the index and constraint definitions, and attach them to the
+ * appropriate work queue entries. We do this before dropping because in
+ * the case of a FOREIGN KEY constraint, we might not yet have exclusive
+ * lock on the table the constraint is attached to, and we need to get
+ * that before dropping. It's safe because the parser won't actually look
+ * at the catalogs to detect the existing entry.
*/
foreach(l, tab->changedIndexDefs)
ATPostAlterTypeParse((char *) lfirst(l), wqueue);
ATPostAlterTypeParse((char *) lfirst(l), wqueue);
/*
- * Now we can drop the existing constraints and indexes ---
- * constraints first, since some of them might depend on the indexes.
- * It should be okay to use DROP_RESTRICT here, since nothing else
- * should be depending on these objects.
+ * Now we can drop the existing constraints and indexes --- constraints
+ * first, since some of them might depend on the indexes. It should be
+ * okay to use DROP_RESTRICT here, since nothing else should be depending
+ * on these objects.
*/
foreach(l, tab->changedConstraintOids)
{
}
/*
- * The objects will get recreated during subsequent passes over the
- * work queue.
+ * The objects will get recreated during subsequent passes over the work
+ * queue.
*/
}
ListCell *list_item;
/*
- * We expect that we only have to do raw parsing and parse analysis,
- * not any rule rewriting, since these will all be utility statements.
+ * We expect that we only have to do raw parsing and parse analysis, not
+ * any rule rewriting, since these will all be utility statements.
*/
raw_parsetree_list = raw_parser(cmd);
querytree_list = NIL;
}
/*
- * Attach each generated command to the proper place in the work
- * queue. Note this could result in creation of entirely new
- * work-queue entries.
+ * Attach each generated command to the proper place in the work queue.
+ * Note this could result in creation of entirely new work-queue entries.
*/
foreach(list_item, querytree_list)
{
Form_pg_class tuple_class;
/*
- * Get exclusive lock till end of transaction on the target table.
- * Use relation_open so that we can work on indexes and sequences.
+ * Get exclusive lock till end of transaction on the target table. Use
+ * relation_open so that we can work on indexes and sequences.
*/
target_rel = relation_open(relationOid, AccessExclusiveLock);
/* Superusers can always do it */
if (!superuser())
{
- Oid namespaceOid = tuple_class->relnamespace;
+ Oid namespaceOid = tuple_class->relnamespace;
AclResult aclresult;
/* Otherwise, must be owner of the existing object */
- if (!pg_class_ownercheck(relationOid,GetUserId()))
+ if (!pg_class_ownercheck(relationOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
RelationGetRelationName(target_rel));
AlterTypeOwnerInternal(tuple_class->reltype, newOwnerId);
/*
- * If we are operating on a table, also change the ownership of
- * any indexes and sequences that belong to the table, as well as
- * the table's toast table (if it has one)
+ * If we are operating on a table, also change the ownership of any
+ * indexes and sequences that belong to the table, as well as the
+ * table's toast table (if it has one)
*/
if (tuple_class->relkind == RELKIND_RELATION ||
tuple_class->relkind == RELKIND_TOASTVALUE)
{
Relation depRel;
SysScanDesc scan;
- ScanKeyData key[2];
+ ScanKeyData key[2];
HeapTuple tup;
/*
- * SERIAL sequences are those having an internal dependency on one
- * of the table's columns (we don't care *which* column, exactly).
+ * SERIAL sequences are those having an internal dependency on one of the
+ * table's columns (we don't care *which* column, exactly).
*/
depRel = heap_open(DependRelationId, AccessShareLock);
ScanKeyInit(&key[0],
- Anum_pg_depend_refclassid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(RelationRelationId));
+ Anum_pg_depend_refclassid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationRelationId));
ScanKeyInit(&key[1],
- Anum_pg_depend_refobjid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(relationOid));
+ Anum_pg_depend_refobjid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(relationOid));
/* we leave refobjsubid unspecified */
scan = systable_beginscan(depRel, DependReferenceIndexId, true,
if (!OidIsValid(tablespaceId))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace \"%s\" does not exist", tablespacename)));
+ errmsg("tablespace \"%s\" does not exist", tablespacename)));
/* Check its permissions */
aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE);
if (OidIsValid(tab->newTableSpace))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot have multiple SET TABLESPACE subcommands")));
+ errmsg("cannot have multiple SET TABLESPACE subcommands")));
tab->newTableSpace = tablespaceId;
}
RelationGetRelationName(rel))));
/*
- * Don't allow moving temp tables of other backends ... their local
- * buffer manager is not going to cope.
+ * Don't allow moving temp tables of other backends ... their local buffer
+ * manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move temporary tables of other sessions")));
+ errmsg("cannot move temporary tables of other sessions")));
/*
* No work if no change in tablespace.
Page page = (Page) buf;
/*
- * Since we copy the file directly without looking at the shared
- * buffers, we'd better first flush out any pages of the source
- * relation that are in shared buffers. We assume no new changes
- * will be made while we are holding exclusive lock on the rel.
+ * Since we copy the file directly without looking at the shared buffers,
+ * we'd better first flush out any pages of the source relation that are
+ * in shared buffers. We assume no new changes will be made while we are
+ * holding exclusive lock on the rel.
*/
FlushRelationBuffers(rel);
/*
- * We need to log the copied data in WAL iff WAL archiving is enabled
- * AND it's not a temp rel.
+ * We need to log the copied data in WAL iff WAL archiving is enabled AND
+ * it's not a temp rel.
*/
use_wal = XLogArchivingActive() && !rel->rd_istemp;
}
/*
- * Now write the page. We say isTemp = true even if it's not a
- * temp rel, because there's no need for smgr to schedule an fsync
- * for this write; we'll do it ourselves below.
+ * Now write the page. We say isTemp = true even if it's not a temp
+ * rel, because there's no need for smgr to schedule an fsync for this
+ * write; we'll do it ourselves below.
*/
smgrwrite(dst, blkno, buf, true);
}
/*
- * If the rel isn't temp, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a temp rel we don't care
- * since the rel will be uninteresting after a crash anyway.)
+ * If the rel isn't temp, we must fsync it down to disk before it's safe
+ * to commit the transaction. (For a temp rel we don't care since the rel
+ * will be uninteresting after a crash anyway.)
*
- * It's obvious that we must do this when not WAL-logging the copy. It's
- * less obvious that we have to do it even if we did WAL-log the
- * copied pages. The reason is that since we're copying outside
- * shared buffers, a CHECKPOINT occurring during the copy has no way
- * to flush the previously written data to disk (indeed it won't know
- * the new rel even exists). A crash later on would replay WAL from
- * the checkpoint, therefore it wouldn't replay our earlier WAL
- * entries. If we do not fsync those pages here, they might still not
- * be on disk when the crash occurs.
+ * It's obvious that we must do this when not WAL-logging the copy. It's less
+ * obvious that we have to do it even if we did WAL-log the copied pages.
+ * The reason is that since we're copying outside shared buffers, a
+ * CHECKPOINT occurring during the copy has no way to flush the previously
+ * written data to disk (indeed it won't know the new rel even exists). A
+ * crash later on would replay WAL from the checkpoint, therefore it
+ * wouldn't replay our earlier WAL entries. If we do not fsync those pages
+ * here, they might still not be on disk when the crash occurs.
*/
if (!rel->rd_istemp)
smgrimmedsync(dst);
toastobject;
/*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction. (This is probably redundant in
- * all present uses...)
+ * Grab an exclusive lock on the target table, which we will NOT release
+ * until end of transaction. (This is probably redundant in all present
+ * uses...)
*/
rel = heap_open(relOid, AccessExclusiveLock);
/*
* Toast table is shared if and only if its parent is.
*
- * We cannot allow toasting a shared relation after initdb (because
- * there's no way to mark it toasted in other databases' pg_class).
- * Unfortunately we can't distinguish initdb from a manually started
- * standalone backend (toasting happens after the bootstrap phase, so
- * checking IsBootstrapProcessingMode() won't work). However, we can
- * at least prevent this mistake under normal multi-user operation.
+ * We cannot allow toasting a shared relation after initdb (because there's
+ * no way to mark it toasted in other databases' pg_class). Unfortunately
+ * we can't distinguish initdb from a manually started standalone backend
+ * (toasting happens after the bootstrap phase, so checking
+ * IsBootstrapProcessingMode() won't work). However, we can at least
+ * prevent this mistake under normal multi-user operation.
*/
shared_relation = rel->rd_rel->relisshared;
if (shared_relation && IsUnderPostmaster)
tupdesc->attrs[2]->attstorage = 'p';
/*
- * Note: the toast relation is placed in the regular pg_toast
- * namespace even if its master relation is a temp table. There
- * cannot be any naming collision, and the toast rel will be destroyed
- * when its master is, so there's no need to handle the toast rel as
- * temp.
+ * Note: the toast relation is placed in the regular pg_toast namespace
+ * even if its master relation is a temp table. There cannot be any
+ * naming collision, and the toast rel will be destroyed when its master
+ * is, so there's no need to handle the toast rel as temp.
*/
toast_relid = heap_create_with_catalog(toast_relname,
PG_TOAST_NAMESPACE,
*
* NOTE: the normal TOAST access routines could actually function with a
* single-column index on chunk_id only. However, the slice access
- * routines use both columns for faster access to an individual chunk.
- * In addition, we want it to be unique as a check against the
- * possibility of duplicate TOAST chunk OIDs. The index might also be
- * a little more efficient this way, since btree isn't all that happy
- * with large numbers of equal keys.
+ * routines use both columns for faster access to an individual chunk. In
+ * addition, we want it to be unique as a check against the possibility of
+ * duplicate TOAST chunk OIDs. The index might also be a little more
+ * efficient this way, since btree isn't all that happy with large numbers
+ * of equal keys.
*/
indexInfo = makeNode(IndexInfo);
/*
* Update toast rel's pg_class entry to show that it has an index. The
- * index OID is stored into the reltoastidxid field for easy access by
- * the tuple toaster.
+ * index OID is stored into the reltoastidxid field for easy access by the
+ * tuple toaster.
*/
setRelhasindex(toast_relid, true, true, toast_idxid);
if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move objects into or out of temporary schemas")));
+ errmsg("cannot move objects into or out of temporary schemas")));
/* same for TOAST schema */
if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE)
Oid oldNspOid, Oid newNspOid,
bool hasDependEntry)
{
- HeapTuple classTup;
+ HeapTuple classTup;
Form_pg_class classForm;
classTup = SearchSysCacheCopy(RELOID,
foreach(l, indexList)
{
- Oid indexOid = lfirst_oid(l);
+ Oid indexOid = lfirst_oid(l);
/*
- * Note: currently, the index will not have its own dependency
- * on the namespace, so we don't need to do changeDependencyFor().
- * There's no rowtype in pg_type, either.
+ * Note: currently, the index will not have its own dependency on the
+ * namespace, so we don't need to do changeDependencyFor(). There's no
+ * rowtype in pg_type, either.
*/
AlterRelationNamespaceInternal(classRel, indexOid,
oldNspOid, newNspOid,
{
Relation depRel;
SysScanDesc scan;
- ScanKeyData key[2];
+ ScanKeyData key[2];
HeapTuple tup;
/*
- * SERIAL sequences are those having an internal dependency on one
- * of the table's columns (we don't care *which* column, exactly).
+ * SERIAL sequences are those having an internal dependency on one of the
+ * table's columns (we don't care *which* column, exactly).
*/
depRel = heap_open(DependRelationId, AccessShareLock);
AlterRelationNamespaceInternal(classRel, depForm->objid,
oldNspOid, newNspOid,
true);
+
/*
- * Sequences have entries in pg_type. We need to be careful
- * to move them to the new namespace, too.
+ * Sequences have entries in pg_type. We need to be careful to move
+ * them to the new namespace, too.
*/
AlterTypeNamespaceInternal(RelationGetForm(seqRel)->reltype,
newNspOid, false);
MemoryContext oldcxt;
/*
- * We needn't bother registering the relation unless there is an ON
- * COMMIT action we need to take.
+ * We needn't bother registering the relation unless there is an ON COMMIT
+ * action we need to take.
*/
if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS)
return;
/*
* Note that table deletion will call
- * remove_on_commit_action, so the entry should get
- * marked as deleted.
+ * remove_on_commit_action, so the entry should get marked
+ * as deleted.
*/
Assert(oc->deleting_subid != InvalidSubTransactionId);
break;
if (oids_to_truncate != NIL)
{
heap_truncate(oids_to_truncate);
- CommandCounterIncrement(); /* XXX needed? */
+ CommandCounterIncrement(); /* XXX needed? */
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.27 2005/08/30 01:08:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.28 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* GUC variable */
-char *default_tablespace = NULL;
+char *default_tablespace = NULL;
static bool remove_tablespace_directories(Oid tablespaceoid, bool redo);
if (errno == ENOENT)
{
/*
- * Acquire ExclusiveLock on pg_tablespace to ensure that no
- * DROP TABLESPACE or TablespaceCreateDbspace is running
- * concurrently. Simple reads from pg_tablespace are OK.
+ * Acquire ExclusiveLock on pg_tablespace to ensure that no DROP
+ * TABLESPACE or TablespaceCreateDbspace is running concurrently.
+ * Simple reads from pg_tablespace are OK.
*/
Relation rel;
rel = NULL;
/*
- * Recheck to see if someone created the directory while we
- * were waiting for lock.
+ * Recheck to see if someone created the directory while we were
+ * waiting for lock.
*/
if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode))
{
if (errno != ENOENT || !isRedo)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m",
- dir)));
+ errmsg("could not create directory \"%s\": %m",
+ dir)));
/* Try to make parent directory too */
parentdir = pstrdup(dir);
get_parent_directory(parentdir);
if (mkdir(parentdir, S_IRWXU) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m",
- parentdir)));
+ errmsg("could not create directory \"%s\": %m",
+ parentdir)));
pfree(parentdir);
if (mkdir(dir, S_IRWXU) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m",
- dir)));
+ errmsg("could not create directory \"%s\": %m",
+ dir)));
}
}
Oid tablespaceoid;
char *location;
char *linkloc;
- Oid ownerId;
+ Oid ownerId;
/* validate */
if (strchr(location, '\''))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("tablespace location may not contain single quotes")));
+ errmsg("tablespace location may not contain single quotes")));
/*
* Allowing relative paths seems risky
errmsg("tablespace location must be an absolute path")));
/*
- * Check that location isn't too long. Remember that we're going to
- * append '//.' (XXX but do we ever form the whole
- * path explicitly? This may be overly conservative.)
+ * Check that location isn't too long. Remember that we're going to append
+ * '//.' (XXX but do we ever form the whole path
+ * explicitly? This may be overly conservative.)
*/
if (strlen(location) >= (MAXPGPATH - 1 - 10 - 1 - 10 - 1 - 10))
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable tablespace name \"%s\"",
stmt->tablespacename),
- errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
+ errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
/*
* Check that there is no other tablespace by this name. (The unique
stmt->tablespacename)));
/*
- * Insert tuple into pg_tablespace. The purpose of doing this first
- * is to lock the proposed tablename against other would-be creators.
- * The insertion will roll back if we find problems below.
+ * Insert tuple into pg_tablespace. The purpose of doing this first is to
+ * lock the proposed tablename against other would-be creators. The
+ * insertion will roll back if we find problems below.
*/
rel = heap_open(TableSpaceRelationId, RowExclusiveLock);
recordDependencyOnOwner(TableSpaceRelationId, tablespaceoid, ownerId);
/*
- * Attempt to coerce target directory to safe permissions. If this
- * fails, it doesn't exist or has the wrong owner.
+ * Attempt to coerce target directory to safe permissions. If this fails,
+ * it doesn't exist or has the wrong owner.
*/
if (chmod(location, 0700) != 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not set permissions on directory \"%s\": %m",
- location)));
+ errmsg("could not set permissions on directory \"%s\": %m",
+ location)));
/*
* Check the target directory is empty.
location)));
/*
- * Create the PG_VERSION file in the target directory. This has
- * several purposes: to make sure we can write in the directory, to
- * prevent someone from creating another tablespace pointing at the
- * same directory (the emptiness check above will fail), and to label
- * tablespace directories by PG version.
+ * Create the PG_VERSION file in the target directory. This has several
+ * purposes: to make sure we can write in the directory, to prevent
+ * someone from creating another tablespace pointing at the same directory
+ * (the emptiness check above will fail), and to label tablespace
+ * directories by PG version.
*/
set_short_version(location);
/* We keep the lock on pg_tablespace until commit */
heap_close(rel, NoLock);
-
#else /* !HAVE_SYMLINK */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
PreventTransactionChain((void *) stmt, "DROP TABLESPACE");
/*
- * Acquire ExclusiveLock on pg_tablespace to ensure that no one else
- * is trying to do DROP TABLESPACE or TablespaceCreateDbspace
- * concurrently.
+ * Acquire ExclusiveLock on pg_tablespace to ensure that no one else is
+ * trying to do DROP TABLESPACE or TablespaceCreateDbspace concurrently.
*/
rel = heap_open(TableSpaceRelationId, ExclusiveLock);
tablespacename);
/*
- * Remove the pg_tablespace tuple (this will roll back if we fail
- * below)
+ * Remove the pg_tablespace tuple (this will roll back if we fail below)
*/
simple_heap_delete(rel, &tuple->t_self);
/* We keep the lock on pg_tablespace until commit */
heap_close(rel, NoLock);
-
#else /* !HAVE_SYMLINK */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
sprintf(location, "pg_tblspc/%u", tablespaceoid);
/*
- * Check if the tablespace still contains any files. We try to rmdir
- * each per-database directory we find in it. rmdir failure implies
- * there are still files in that subdirectory, so give up. (We do not
- * have to worry about undoing any already completed rmdirs, since the
- * next attempt to use the tablespace from that database will simply
- * recreate the subdirectory via TablespaceCreateDbspace.)
+ * Check if the tablespace still contains any files. We try to rmdir each
+ * per-database directory we find in it. rmdir failure implies there are
+ * still files in that subdirectory, so give up. (We do not have to worry
+ * about undoing any already completed rmdirs, since the next attempt to
+ * use the tablespace from that database will simply recreate the
+ * subdirectory via TablespaceCreateDbspace.)
*
* Since we hold exclusive lock, no one else should be creating any fresh
- * subdirectories in parallel. It is possible that new files are
- * being created within subdirectories, though, so the rmdir call
- * could fail. Worst consequence is a less friendly error message.
+ * subdirectories in parallel. It is possible that new files are being
+ * created within subdirectories, though, so the rmdir call could fail.
+ * Worst consequence is a less friendly error message.
*/
dirdesc = AllocateDir(location);
if (dirdesc == NULL)
FreeDir(dirdesc);
/*
- * Okay, try to unlink PG_VERSION (we allow it to not be there, even
- * in non-REDO case, for robustness).
+ * Okay, try to unlink PG_VERSION (we allow it to not be there, even in
+ * non-REDO case, for robustness).
*/
subfile = palloc(strlen(location) + 11 + 1);
sprintf(subfile, "%s/PG_VERSION", location);
/*
* Okay, try to remove the symlink. We must however deal with the
- * possibility that it's a directory instead of a symlink --- this
- * could happen during WAL replay (see TablespaceCreateDbspace), and
- * it is also the normal case on Windows.
+ * possibility that it's a directory instead of a symlink --- this could
+ * happen during WAL replay (see TablespaceCreateDbspace), and it is also
+ * the normal case on Windows.
*/
if (lstat(location, &st) == 0 && S_ISDIR(st.st_mode))
{
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable tablespace name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
+ errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
/* Make sure the new name doesn't exist */
ScanKeyInit(&entry[0],
check_is_member_of_role(GetUserId(), newOwnerId);
/*
- * Normally we would also check for create permissions here,
- * but there are none for tablespaces so we follow what rename
- * tablespace does and omit the create permissions check.
+ * Normally we would also check for create permissions here, but there
+ * are none for tablespaces so we follow what rename tablespace does
+ * and omit the create permissions check.
*
- * NOTE: Only superusers may create tablespaces to begin with and
- * so initially only a superuser would be able to change its
- * ownership anyway.
+ * NOTE: Only superusers may create tablespaces to begin with and so
+ * initially only a superuser would be able to change its ownership
+ * anyway.
*/
memset(repl_null, ' ', sizeof(repl_null));
{
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the name. Must accept the value on faith.
+ * cannot verify the name. Must accept the value on faith.
*/
if (IsTransactionState())
{
/* Fast path for default_tablespace == "" */
if (default_tablespace == NULL || default_tablespace[0] == '\0')
return InvalidOid;
+
/*
* It is tempting to cache this lookup for more speed, but then we would
- * fail to detect the case where the tablespace was dropped since the
- * GUC variable was set. Note also that we don't complain if the value
- * fails to refer to an existing tablespace; we just silently return
- * InvalidOid, causing the new object to be created in the database's
- * tablespace.
+ * fail to detect the case where the tablespace was dropped since the GUC
+ * variable was set. Note also that we don't complain if the value fails
+ * to refer to an existing tablespace; we just silently return InvalidOid,
+ * causing the new object to be created in the database's tablespace.
*/
result = get_tablespace_oid(default_tablespace);
+
/*
* Allow explicit specification of database's default tablespace in
* default_tablespace without triggering permissions checks.
char *linkloc;
/*
- * Attempt to coerce target directory to safe permissions. If
- * this fails, it doesn't exist or has the wrong owner.
+ * Attempt to coerce target directory to safe permissions. If this
+ * fails, it doesn't exist or has the wrong owner.
*/
if (chmod(location, 0700) != 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not set permissions on directory \"%s\": %m",
- location)));
+ errmsg("could not set permissions on directory \"%s\": %m",
+ location)));
/* Create or re-create the PG_VERSION file in the target directory */
set_short_version(location);
if (errno != EEXIST)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create symbolic link \"%s\": %m",
- linkloc)));
+ errmsg("could not create symbolic link \"%s\": %m",
+ linkloc)));
}
pfree(linkloc);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.194 2005/08/24 17:38:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.195 2005/10/15 02:49:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Instrumentation *instr,
MemoryContext per_tuple_context);
static void AfterTriggerSaveEvent(ResultRelInfo *relinfo, int event,
- bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
+ bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
/*
{
/*
* If this trigger is a constraint (and a foreign key one) then we
- * really need a constrrelid. Since we don't have one, we'll try
- * to generate one from the argument information.
+ * really need a constrrelid. Since we don't have one, we'll try to
+ * generate one from the argument information.
*
* This is really just a workaround for a long-ago pg_dump bug that
* omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
- * commands. We don't want to bomb out completely here if we
- * can't determine the correct relation, because that would
- * prevent loading the dump file. Instead, NOTICE here and ERROR
- * in the trigger.
+ * commands. We don't want to bomb out completely here if we can't
+ * determine the correct relation, because that would prevent loading
+ * the dump file. Instead, NOTICE here and ERROR in the trigger.
*/
bool needconstrrelid = false;
void *elem = NULL;
}
/*
- * Generate the trigger's OID now, so that we can use it in the name
- * if needed.
+ * Generate the trigger's OID now, so that we can use it in the name if
+ * needed.
*/
tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
/*
* If trigger is an RI constraint, use specified trigger name as
- * constraint name and build a unique trigger name instead. This is
- * mainly for backwards compatibility with CREATE CONSTRAINT TRIGGER
- * commands.
+ * constraint name and build a unique trigger name instead. This is mainly
+ * for backwards compatibility with CREATE CONSTRAINT TRIGGER commands.
*/
if (stmt->isconstraint)
{
}
/*
- * Scan pg_trigger for existing triggers on relation. We do this
- * mainly because we must count them; a secondary benefit is to give a
- * nice error message if there's already a trigger of the same name.
- * (The unique index on tgrelid/tgname would complain anyway.)
+ * Scan pg_trigger for existing triggers on relation. We do this mainly
+ * because we must count them; a secondary benefit is to give a nice error
+ * message if there's already a trigger of the same name. (The unique
+ * index on tgrelid/tgname would complain anyway.)
*
* NOTE that this is cool only because we have AccessExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- trigname, stmt->relation->relname)));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ trigname, stmt->relation->relname)));
found++;
}
systable_endscan(tgscan);
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
- * see a trigger function declared OPAQUE, change it to TRIGGER.
+ * We allow OPAQUE just so we can load old dump files. When we see a
+ * trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
{
values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(trigname));
+ CStringGetDatum(trigname));
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
values[Anum_pg_trigger_tgenabled - 1] = BoolGetDatum(true);
values[Anum_pg_trigger_tgisconstraint - 1] = BoolGetDatum(stmt->isconstraint);
values[Anum_pg_trigger_tgconstrname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(constrname));
+ CStringGetDatum(constrname));
values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
}
values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
- CStringGetDatum(args));
+ CStringGetDatum(args));
}
else
{
values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
- CStringGetDatum(""));
+ CStringGetDatum(""));
}
/* tgattr is currently always a zero-length array */
tgattr = buildint2vector(NULL, 0);
pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
/*
- * Update relation's pg_class entry. Crucial side-effect: other
- * backends (and this one too!) are sent SI message to make them
- * rebuild relcache entries.
+ * Update relation's pg_class entry. Crucial side-effect: other backends
+ * (and this one too!) are sent SI message to make them rebuild relcache
+ * entries.
*/
pgrel = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
/*
* We used to try to update the rel's relcache entry here, but that's
- * fairly pointless since it will happen as a byproduct of the
- * upcoming CommandCounterIncrement...
+ * fairly pointless since it will happen as a byproduct of the upcoming
+ * CommandCounterIncrement...
*/
/*
- * Record dependencies for trigger. Always place a normal dependency
- * on the function. If we are doing this in response to an explicit
- * CREATE TRIGGER command, also make trigger be auto-dropped if its
- * relation is dropped or if the FK relation is dropped. (Auto drop
- * is compatible with our pre-7.3 behavior.) If the trigger is being
- * made for a constraint, we can skip the relation links; the
- * dependency on the constraint will indirectly depend on the
- * relations.
+ * Record dependencies for trigger. Always place a normal dependency on
+ * the function. If we are doing this in response to an explicit CREATE
+ * TRIGGER command, also make trigger be auto-dropped if its relation is
+ * dropped or if the FK relation is dropped. (Auto drop is compatible
+ * with our pre-7.3 behavior.) If the trigger is being made for a
+ * constraint, we can skip the relation links; the dependency on the
+ * constraint will indirectly depend on the relations.
*/
referenced.classId = ProcedureRelationId;
referenced.objectId = funcoid;
heap_close(tgrel, RowExclusiveLock);
/*
- * Update relation's pg_class entry. Crucial side-effect: other
- * backends (and this one too!) are sent SI message to make them
- * rebuild relcache entries.
+ * Update relation's pg_class entry. Crucial side-effect: other backends
+ * (and this one too!) are sent SI message to make them rebuild relcache
+ * entries.
*
- * Note this is OK only because we have AccessExclusiveLock on the rel,
- * so no one else is creating/deleting triggers on this rel at the
- * same time.
+ * Note this is OK only because we have AccessExclusiveLock on the rel, so no
+ * one else is creating/deleting triggers on this rel at the same time.
*/
pgrel = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
ScanKeyData key[2];
/*
- * Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction.
+ * Grab an exclusive lock on the target table, which we will NOT release
+ * until end of transaction.
*/
targetrel = heap_open(relid, AccessExclusiveLock);
/*
- * Scan pg_trigger twice for existing triggers on relation. We do
- * this in order to ensure a trigger does not exist with newname (The
- * unique index on tgrelid/tgname would complain anyway) and to ensure
- * a trigger does exist with oldname.
+ * Scan pg_trigger twice for existing triggers on relation. We do this in
+ * order to ensure a trigger does not exist with newname (The unique index
+ * on tgrelid/tgname would complain anyway) and to ensure a trigger does
+ * exist with oldname.
*
* NOTE that this is cool only because we have AccessExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- newname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ newname, RelationGetRelationName(targetrel))));
systable_endscan(tgscan);
/*
CatalogUpdateIndexes(tgrel, tuple);
/*
- * Invalidate relation's relcache entry so that other backends
- * (and this one too!) are sent SI message to make them rebuild
- * relcache entries. (Ideally this should happen
- * automatically...)
+ * Invalidate relation's relcache entry so that other backends (and
+ * this one too!) are sent SI message to make them rebuild relcache
+ * entries. (Ideally this should happen automatically...)
*/
CacheInvalidateRelcache(targetrel);
}
EnableDisableTrigger(Relation rel, const char *tgname,
bool enable, bool skip_system)
{
- Relation tgrel;
- int nkeys;
+ Relation tgrel;
+ int nkeys;
ScanKeyData keys[2];
SysScanDesc tgscan;
- HeapTuple tuple;
- bool found;
- bool changed;
+ HeapTuple tuple;
+ bool found;
+ bool changed;
/* Scan the relevant entries in pg_triggers */
tgrel = heap_open(TriggerRelationId, RowExclusiveLock);
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied: \"%s\" is a system trigger",
- NameStr(oldtrig->tgname))));
+ errmsg("permission denied: \"%s\" is a system trigger",
+ NameStr(oldtrig->tgname))));
}
found = true;
if (oldtrig->tgenabled != enable)
{
/* need to change this one ... make a copy to scribble on */
- HeapTuple newtup = heap_copytuple(tuple);
+ HeapTuple newtup = heap_copytuple(tuple);
Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
newtrig->tgenabled = enable;
triggers = (Trigger *) palloc(ntrigs * sizeof(Trigger));
/*
- * Note: since we scan the triggers using TriggerRelidNameIndexId, we
- * will be reading the triggers in name order, except possibly during
- * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This
- * in turn ensures that triggers will be fired in name order.
+ * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
+ * be reading the triggers in name order, except possibly during
+ * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This in
+ * turn ensures that triggers will be fired in name order.
*/
ScanKeyInit(&skey,
Anum_pg_trigger_tgrelid,
build->tgoid = HeapTupleGetOid(htup);
build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname)));
+ NameGetDatum(&pg_trigger->tgname)));
build->tgfoid = pg_trigger->tgfoid;
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
j;
/*
- * We need not examine the "index" data, just the trigger array
- * itself; if we have the same triggers with the same types, the
- * derived index data should match.
+ * We need not examine the "index" data, just the trigger array itself; if
+ * we have the same triggers with the same types, the derived index data
+ * should match.
*
- * As of 7.3 we assume trigger set ordering is significant in the
- * comparison; so we just compare corresponding slots of the two sets.
+ * As of 7.3 we assume trigger set ordering is significant in the comparison;
+ * so we just compare corresponding slots of the two sets.
*/
if (trigdesc1 != NULL)
{
/*
* Do the function evaluation in the per-tuple memory context, so that
- * leaked memory will be reclaimed once per tuple. Note in particular
- * that any new tuple created by the trigger function will live till
- * the end of the tuple cycle.
+ * leaked memory will be reclaimed once per tuple. Note in particular that
+ * any new tuple created by the trigger function will live till the end of
+ * the tuple cycle.
*/
oldContext = MemoryContextSwitchTo(per_tuple_context);
MemoryContextSwitchTo(oldContext);
/*
- * Trigger protocol allows function to return a null pointer, but NOT
- * to set the isnull result flag.
+ * Trigger protocol allows function to return a null pointer, but NOT to
+ * set the isnull result flag.
*/
if (fcinfo.isnull)
ereport(ERROR,
fcinfo.flinfo->fn_oid)));
/*
- * If doing EXPLAIN ANALYZE, stop charging time to this trigger,
- * and count one "tuple returned" (really the number of firings).
+ * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
+ * one "tuple returned" (really the number of firings).
*/
if (instr)
InstrStopNode(instr + tgindx, true);
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
if (newSlot != NULL)
{
- HTSU_Result test;
+ HTSU_Result test;
ItemPointerData update_ctid;
TransactionId update_xmax;
}
/*
- * if tuple was deleted or PlanQual failed for updated
- * tuple - we have not process this tuple!
+ * if tuple was deleted or PlanQual failed for updated tuple -
+ * we have not process this tuple!
*/
return NULL;
* they will easily go away during subtransaction abort.
*
* Because the list of pending events can grow large, we go to some effort
- * to minimize memory consumption. We do not use the generic List mechanism
+ * to minimize memory consumption. We do not use the generic List mechanism
* but thread the events manually.
*
* XXX We need to be able to save the per-event data in a file if it grows too
bool all_isdeferred;
int numstates; /* number of trigstates[] entries in use */
int numalloc; /* allocated size of trigstates[] */
- SetConstraintTriggerData trigstates[1]; /* VARIABLE LENGTH ARRAY */
+ SetConstraintTriggerData trigstates[1]; /* VARIABLE LENGTH ARRAY */
} SetConstraintStateData;
typedef SetConstraintStateData *SetConstraintState;
typedef struct AfterTriggerEventData
{
- AfterTriggerEvent ate_next; /* list link */
- TriggerEvent ate_event; /* event type and status bits */
- CommandId ate_firing_id; /* ID for firing cycle */
- Oid ate_tgoid; /* the trigger's ID */
- Oid ate_relid; /* the relation it's on */
- ItemPointerData ate_oldctid; /* specific tuple(s) involved */
+ AfterTriggerEvent ate_next; /* list link */
+ TriggerEvent ate_event; /* event type and status bits */
+ CommandId ate_firing_id; /* ID for firing cycle */
+ Oid ate_tgoid; /* the trigger's ID */
+ Oid ate_relid; /* the relation it's on */
+ ItemPointerData ate_oldctid; /* specific tuple(s) involved */
ItemPointerData ate_newctid;
} AfterTriggerEventData;
*
* firing_counter is incremented for each call of afterTriggerInvokeEvents.
* We mark firable events with the current firing cycle's ID so that we can
- * tell which ones to work on. This ensures sane behavior if a trigger
+ * tell which ones to work on. This ensures sane behavior if a trigger
* function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
* only fire those events that weren't already scheduled for firing.
*
* This is saved and restored across failed subtransactions.
*
* events is the current list of deferred events. This is global across
- * all subtransactions of the current transaction. In a subtransaction
+ * all subtransactions of the current transaction. In a subtransaction
* abort, we know that the events added by the subtransaction are at the
* end of the list, so it is relatively easy to discard them.
*
* which we similarly use to clean up at subtransaction abort.
*
* firing_stack is a stack of copies of subtransaction-start-time
- * firing_counter. We use this to recognize which deferred triggers were
+ * firing_counter. We use this to recognize which deferred triggers were
* fired (or marked for firing) within an aborted subtransaction.
*
* We use GetCurrentTransactionNestLevel() to determine the correct array
* index in these stacks. maxtransdepth is the number of allocated entries in
- * each stack. (By not keeping our own stack pointer, we can avoid trouble
+ * each stack. (By not keeping our own stack pointer, we can avoid trouble
* in cases where errors during subxact abort cause multiple invocations
* of AfterTriggerEndSubXact() at the same nesting depth.)
*/
typedef struct AfterTriggersData
{
- CommandId firing_counter; /* next firing ID to assign */
- SetConstraintState state; /* the active S C state */
+ CommandId firing_counter; /* next firing ID to assign */
+ SetConstraintState state; /* the active S C state */
AfterTriggerEventList events; /* deferred-event list */
- int query_depth; /* current query list index */
- AfterTriggerEventList *query_stack; /* events pending from each query */
- int maxquerydepth; /* allocated len of above array */
+ int query_depth; /* current query list index */
+ AfterTriggerEventList *query_stack; /* events pending from each query */
+ int maxquerydepth; /* allocated len of above array */
/* these fields are just for resetting at subtrans abort: */
SetConstraintState *state_stack; /* stacked S C states */
- AfterTriggerEventList *events_stack; /* stacked list pointers */
- int *depth_stack; /* stacked query_depths */
- CommandId *firing_stack; /* stacked firing_counters */
- int maxtransdepth; /* allocated len of above arrays */
+ AfterTriggerEventList *events_stack; /* stacked list pointers */
+ int *depth_stack; /* stacked query_depths */
+ CommandId *firing_stack; /* stacked firing_counters */
+ int maxtransdepth; /* allocated len of above arrays */
} AfterTriggersData;
typedef AfterTriggersData *AfterTriggers;
static void AfterTriggerExecute(AfterTriggerEvent event,
- Relation rel, TriggerDesc *trigdesc,
- FmgrInfo *finfo,
- Instrumentation *instr,
- MemoryContext per_tuple_context);
+ Relation rel, TriggerDesc *trigdesc,
+ FmgrInfo *finfo,
+ Instrumentation *instr,
+ MemoryContext per_tuple_context);
static SetConstraintState SetConstraintStateCreate(int numalloc);
static SetConstraintState SetConstraintStateCopy(SetConstraintState state);
static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
- Oid tgoid, bool tgisdeferred);
+ Oid tgoid, bool tgisdeferred);
/* ----------
elog(ERROR, "could not find trigger %u", tgoid);
/*
- * If doing EXPLAIN ANALYZE, start charging time to this trigger.
- * We want to include time spent re-fetching tuples in the trigger cost.
+ * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
+ * to include time spent re-fetching tuples in the trigger cost.
*/
if (instr)
InstrStartNode(instr + tgindx);
MemoryContextReset(per_tuple_context);
/*
- * Call the trigger and throw away any possibly returned updated
- * tuple. (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
+ * Call the trigger and throw away any possibly returned updated tuple.
+ * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
*/
rettuple = ExecCallTriggerFunc(&LocTriggerData,
tgindx,
ReleaseBuffer(newbuffer);
/*
- * If doing EXPLAIN ANALYZE, stop charging time to this trigger,
- * and count one "tuple returned" (really the number of firings).
+ * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
+ * one "tuple returned" (really the number of firings).
*/
if (instr)
InstrStopNode(instr + tgindx, true);
*
* If estate isn't NULL, then we expect that all the firable events are
* for triggers of the relations included in the estate's result relation
- * array. This allows us to re-use the estate's open relations and
+ * array. This allows us to re-use the estate's open relations and
* trigger cache info. When estate is NULL, we have to find the relations
* the hard way.
*
event->ate_firing_id == firing_id)
{
/*
- * So let's fire it... but first, open the correct
- * relation if this is not the same relation as before.
+ * So let's fire it... but first, open the correct relation if
+ * this is not the same relation as before.
*/
if (rel == NULL || rel->rd_id != event->ate_relid)
{
{
/* Find target relation among estate's result rels */
ResultRelInfo *rInfo;
- int nr;
+ int nr;
rInfo = estate->es_result_relations;
nr = estate->es_num_result_relations;
rInfo++;
nr--;
}
- if (nr <= 0) /* should not happen */
+ if (nr <= 0) /* should not happen */
elog(ERROR, "could not find relation %u among query result relations",
event->ate_relid);
rel = rInfo->ri_RelationDesc;
FreeTriggerDesc(trigdesc);
if (finfo)
pfree(finfo);
- Assert(instr == NULL); /* never used in this case */
+ Assert(instr == NULL); /* never used in this case */
/*
- * We assume that an appropriate lock is still held by
- * the executor, so grab no new lock here.
+ * We assume that an appropriate lock is still held by the
+ * executor, so grab no new lock here.
*/
rel = heap_open(event->ate_relid, NoLock);
/*
- * Copy relation's trigger info so that we have a
- * stable copy no matter what the called triggers do.
+ * Copy relation's trigger info so that we have a stable
+ * copy no matter what the called triggers do.
*/
trigdesc = CopyTriggerDesc(rel->trigdesc);
event->ate_relid);
/*
- * Allocate space to cache fmgr lookup info for
- * triggers.
+ * Allocate space to cache fmgr lookup info for triggers.
*/
finfo = (FmgrInfo *)
palloc0(trigdesc->numtriggers * sizeof(FmgrInfo));
/*
* Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is still
- * set, so recursive examinations of the event list won't try
- * to re-fire it.
+ * set, so recursive examinations of the event list won't try to
+ * re-fire it.
*/
AfterTriggerExecute(event, rel, trigdesc, finfo, instr,
per_tuple_context);
* If it's now done, throw it away, if allowed.
*
* NB: it's possible the trigger call above added more events to the
- * queue, or that calls we will do later will want to add more, so
- * we have to be careful about maintaining list validity at all
- * points here.
+ * queue, or that calls we will do later will want to add more, so we
+ * have to be careful about maintaining list validity at all points
+ * here.
*/
next_event = event->ate_next;
if (afterTriggers->query_depth >= afterTriggers->maxquerydepth)
{
/* repalloc will keep the stack in the same context */
- int new_alloc = afterTriggers->maxquerydepth * 2;
+ int new_alloc = afterTriggers->maxquerydepth * 2;
afterTriggers->query_stack = (AfterTriggerEventList *)
repalloc(afterTriggers->query_stack,
Assert(afterTriggers->query_depth >= 0);
/*
- * Process all immediate-mode triggers queued by the query, and move
- * the deferred ones to the main list of deferred events.
+ * Process all immediate-mode triggers queued by the query, and move the
+ * deferred ones to the main list of deferred events.
*
- * Notice that we decide which ones will be fired, and put the deferred
- * ones on the main list, before anything is actually fired. This
- * ensures reasonably sane behavior if a trigger function does
- * SET CONSTRAINTS ... IMMEDIATE: all events we have decided to defer
- * will be available for it to fire.
+ * Notice that we decide which ones will be fired, and put the deferred ones
+ * on the main list, before anything is actually fired. This ensures
+ * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
+ * IMMEDIATE: all events we have decided to defer will be available for it
+ * to fire.
*
* If we find no firable events, we don't have to increment firing_counter.
*/
events = &afterTriggers->query_stack[afterTriggers->query_depth];
if (afterTriggerMarkEvents(events, &afterTriggers->events, true))
{
- CommandId firing_id = afterTriggers->firing_counter++;
+ CommandId firing_id = afterTriggers->firing_counter++;
/* OK to delete the immediate events after processing them */
afterTriggerInvokeEvents(events, firing_id, estate, true);
Assert(afterTriggers->query_depth == -1);
/*
- * If there are any triggers to fire, make sure we have set a snapshot
- * for them to use. (Since PortalRunUtility doesn't set a snap for
- * COMMIT, we can't assume ActiveSnapshot is valid on entry.)
+ * If there are any triggers to fire, make sure we have set a snapshot for
+ * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
+ * can't assume ActiveSnapshot is valid on entry.)
*/
events = &afterTriggers->events;
if (events->head != NULL)
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
/*
- * Run all the remaining triggers. Loop until they are all gone,
- * just in case some trigger queues more for us to do.
+ * Run all the remaining triggers. Loop until they are all gone, just in
+ * case some trigger queues more for us to do.
*/
while (afterTriggerMarkEvents(events, NULL, false))
{
- CommandId firing_id = afterTriggers->firing_counter++;
+ CommandId firing_id = afterTriggers->firing_counter++;
afterTriggerInvokeEvents(events, firing_id, NULL, true);
}
int my_level = GetCurrentTransactionNestLevel();
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* shouldn't happen?)
*/
if (afterTriggers == NULL)
else
{
/* repalloc will keep the stacks in the same context */
- int new_alloc = afterTriggers->maxtransdepth * 2;
+ int new_alloc = afterTriggers->maxtransdepth * 2;
afterTriggers->state_stack = (SetConstraintState *)
repalloc(afterTriggers->state_stack,
}
/*
- * Push the current information into the stack. The SET CONSTRAINTS
- * state is not saved until/unless changed.
+ * Push the current information into the stack. The SET CONSTRAINTS state
+ * is not saved until/unless changed.
*/
afterTriggers->state_stack[my_level] = NULL;
afterTriggers->events_stack[my_level] = afterTriggers->events;
CommandId subxact_firing_id;
/*
- * Ignore call if the transaction is in aborted state. (Probably unneeded)
+ * Ignore call if the transaction is in aborted state. (Probably
+ * unneeded)
*/
if (afterTriggers == NULL)
return;
*/
/*
- * Restore the trigger state. If the saved state is NULL, then
- * this subxact didn't save it, so it doesn't need restoring.
+ * Restore the trigger state. If the saved state is NULL, then this
+ * subxact didn't save it, so it doesn't need restoring.
*/
state = afterTriggers->state_stack[my_level];
if (state != NULL)
afterTriggers->state_stack[my_level] = NULL;
/*
- * Scan for any remaining deferred events that were marked DONE
- * or IN PROGRESS by this subxact or a child, and un-mark them.
- * We can recognize such events because they have a firing ID
- * greater than or equal to the firing_counter value we saved at
- * subtransaction start. (This essentially assumes that the
- * current subxact includes all subxacts started after it.)
+ * Scan for any remaining deferred events that were marked DONE or IN
+ * PROGRESS by this subxact or a child, and un-mark them. We can
+ * recognize such events because they have a firing ID greater than or
+ * equal to the firing_counter value we saved at subtransaction start.
+ * (This essentially assumes that the current subxact includes all
+ * subxacts started after it.)
*/
subxact_firing_id = afterTriggers->firing_stack[my_level];
for (event = afterTriggers->events.head;
state = (SetConstraintState)
MemoryContextAllocZero(TopTransactionContext,
sizeof(SetConstraintStateData) +
- (numalloc - 1) *sizeof(SetConstraintTriggerData));
+ (numalloc - 1) *sizeof(SetConstraintTriggerData));
state->numalloc = numalloc;
}
/*
- * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
+ * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
* pointer to the state object (it will change if we have to repalloc).
*/
static SetConstraintState
return;
/*
- * If in a subtransaction, and we didn't save the current state
- * already, save it so it can be restored if the subtransaction
- * aborts.
+ * If in a subtransaction, and we didn't save the current state already,
+ * save it so it can be restored if the subtransaction aborts.
*/
if (my_level > 1 &&
afterTriggers->state_stack[my_level] == NULL)
if (strlen(cname) == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("unnamed constraints cannot be set explicitly")));
+ errmsg("unnamed constraints cannot be set explicitly")));
/*
* Setup to scan pg_trigger by tgconstrname ...
Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
/*
- * If we found some, check that they fit the deferrability
- * but skip referential action ones, since they are
- * silently never deferrable.
+ * If we found some, check that they fit the deferrability but
+ * skip referential action ones, since they are silently never
+ * deferrable.
*/
if (pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL &&
}
/*
- * SQL99 requires that when a constraint is set to IMMEDIATE, any
- * deferred checks against that constraint must be made when the SET
- * CONSTRAINTS command is executed -- i.e. the effects of the SET
- * CONSTRAINTS command apply retroactively. We've updated the
- * constraints state, so scan the list of previously deferred events
- * to fire any that have now become immediate.
+ * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
+ * checks against that constraint must be made when the SET CONSTRAINTS
+ * command is executed -- i.e. the effects of the SET CONSTRAINTS command
+ * apply retroactively. We've updated the constraints state, so scan the
+ * list of previously deferred events to fire any that have now become
+ * immediate.
*
- * Obviously, if this was SET ... DEFERRED then it can't have converted
- * any unfired events to immediate, so we need do nothing in that case.
+ * Obviously, if this was SET ... DEFERRED then it can't have converted any
+ * unfired events to immediate, so we need do nothing in that case.
*/
if (!stmt->deferred)
{
if (afterTriggerMarkEvents(events, NULL, true))
{
- CommandId firing_id = afterTriggers->firing_counter++;
+ CommandId firing_id = afterTriggers->firing_counter++;
/*
- * We can delete fired events if we are at top transaction
- * level, but we'd better not if inside a subtransaction, since
- * the subtransaction could later get rolled back.
+ * We can delete fired events if we are at top transaction level,
+ * but we'd better not if inside a subtransaction, since the
+ * subtransaction could later get rolled back.
*/
afterTriggerInvokeEvents(events, firing_id, NULL,
!IsSubTransaction());
continue;
/*
- * If this is an UPDATE of a PK table or FK table that does
- * not change the PK or FK respectively, we can skip queuing
- * the event: there is no need to fire the trigger.
+ * If this is an UPDATE of a PK table or FK table that does not change
+ * the PK or FK respectively, we can skip queuing the event: there is
+ * no need to fire the trigger.
*/
if ((event & TRIGGER_EVENT_OPMASK) == TRIGGER_EVENT_UPDATE)
{
break;
case RI_TRIGGER_FK:
+
/*
* Update on FK table
*
- * There is one exception when updating FK tables:
- * if the updated row was inserted by our own
- * transaction and the FK is deferred, we still
- * need to fire the trigger. This is because our
- * UPDATE will invalidate the INSERT so the
- * end-of-transaction INSERT RI trigger will not
- * do anything, so we have to do the check for the
- * UPDATE anyway.
+ * There is one exception when updating FK tables: if the
+ * updated row was inserted by our own transaction and the
+ * FK is deferred, we still need to fire the trigger. This
+ * is because our UPDATE will invalidate the INSERT so the
+ * end-of-transaction INSERT RI trigger will not do
+ * anything, so we have to do the check for the UPDATE
+ * anyway.
*/
if (HeapTupleHeaderGetXmin(oldtup->t_data) !=
GetCurrentTransactionId() &&
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.80 2005/08/22 17:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.81 2005/10/15 02:49:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
/*
* Type names must be one character shorter than other names, allowing
- * room to create the corresponding array type name with prepended
- * "_".
+ * room to create the corresponding array type name with prepended "_".
*/
if (strlen(typeName) > (NAMEDATALEN - 2))
ereport(ERROR,
char *a = defGetString(defel);
/*
- * Note: if argument was an unquoted identifier, parser will
- * have applied translations to it, so be prepared to
- * recognize translated type names as well as the nominal
- * form.
+ * Note: if argument was an unquoted identifier, parser will have
+ * applied translations to it, so be prepared to recognize
+ * translated type names as well as the nominal form.
*/
if (pg_strcasecmp(a, "double") == 0 ||
pg_strcasecmp(a, "float8") == 0 ||
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type output function %s must return type \"cstring\"",
- NameListToString(outputName))));
+ errmsg("type output function %s must return type \"cstring\"",
+ NameListToString(outputName))));
}
if (receiveOid)
{
if (resulttype != typoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type receive function %s must return type %s",
- NameListToString(receiveName), typeName)));
+ errmsg("type receive function %s must return type %s",
+ NameListToString(receiveName), typeName)));
}
if (sendOid)
{
if (resulttype != BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type send function %s must return type \"bytea\"",
- NameListToString(sendName))));
+ errmsg("type send function %s must return type \"bytea\"",
+ NameListToString(sendName))));
}
/*
- * Convert analysis function proc name to an OID. If no analysis
- * function is specified, we'll use zero to select the built-in
- * default algorithm.
+ * Convert analysis function proc name to an OID. If no analysis function
+ * is specified, we'll use zero to select the built-in default algorithm.
*/
if (analyzeName)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
false); /* Type NOT NULL */
/*
- * When we create a base type (as opposed to a complex type) we need
- * to have an array entry for it in pg_type as well.
+ * When we create a base type (as opposed to a complex type) we need to
+ * have an array entry for it in pg_type as well.
*/
shadow_type = makeArrayTypeName(typeName);
/* Permission check: must own type or its namespace */
if (!pg_type_ownercheck(typeoid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
- GetUserId()))
+ !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
+ GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE,
TypeNameToString(typename));
get_namespace_name(domainNamespace));
/*
- * Domainnames, unlike typenames don't need to account for the '_'
- * prefix. So they can be one character longer. (This test is
- * presently useless since the parser will have truncated the name to
- * fit. But leave it here since we may someday support arrays of
- * domains, in which case we'll be back to needing to enforce
- * NAMEDATALEN-2.)
+ * Domainnames, unlike typenames don't need to account for the '_' prefix.
+ * So they can be one character longer. (This test is presently useless
+ * since the parser will have truncated the name to fit. But leave it
+ * here since we may someday support arrays of domains, in which case
+ * we'll be back to needing to enforce NAMEDATALEN-2.)
*/
if (strlen(domainName) > (NAMEDATALEN - 1))
ereport(ERROR,
basetypeoid = HeapTupleGetOid(typeTup);
/*
- * Base type must be a plain base type. Domains over pseudo types
- * would create a security hole. Domains of domains might be made to
- * work in the future, but not today. Ditto for domains over complex
- * types.
+ * Base type must be a plain base type. Domains over pseudo types would
+ * create a security hole. Domains of domains might be made to work in
+ * the future, but not today. Ditto for domains over complex types.
*/
typtype = baseType->typtype;
if (typtype != 'b')
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("foreign key constraints not possible for domains")));
+ errmsg("foreign key constraints not possible for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
case CONSTR_DEFAULT:
/*
- * The inherited default value may be overridden by the
- * user with the DEFAULT statement.
+ * The inherited default value may be overridden by the user
+ * with the DEFAULT statement.
*/
if (defaultExpr)
ereport(ERROR,
pstate = make_parsestate(NULL);
/*
- * Cook the constr->raw_expr into an expression. Note:
- * Name is strictly for error message
+ * Cook the constr->raw_expr into an expression. Note: Name is
+ * strictly for error message
*/
defaultExpr = cookDefault(pstate, constr->raw_expr,
basetypeoid,
domainName);
/*
- * Expression must be stored as a nodeToString result, but
- * we also require a valid textual representation (mainly
- * to make life easier for pg_dump).
+ * Expression must be stored as a nodeToString result, but we
+ * also require a valid textual representation (mainly to make
+ * life easier for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
- deparse_context_for(domainName,
- InvalidOid),
+ deparse_context_for(domainName,
+ InvalidOid),
false, false);
defaultValueBin = nodeToString(defaultExpr);
break;
if (nullDefined && !typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = true;
nullDefined = true;
break;
if (nullDefined && typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = false;
nullDefined = true;
break;
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unique constraints not possible for domains")));
+ errmsg("unique constraints not possible for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("primary key constraints not possible for domains")));
+ errmsg("primary key constraints not possible for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
typNotNull); /* Type NOT NULL */
/*
- * Process constraints which refer to the domain ID returned by
- * TypeCreate
+ * Process constraints which refer to the domain ID returned by TypeCreate
*/
foreach(listptr, schema)
{
/* Permission check: must own type or its namespace */
if (!pg_type_ownercheck(typeoid, GetUserId()) &&
- !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
- GetUserId()))
+ !pg_namespace_ownercheck(((Form_pg_type) GETSTRUCT(tup))->typnamespace,
+ GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE,
TypeNameToString(typename));
Oid procOid;
/*
- * Input functions can take a single argument of type CSTRING, or
- * three arguments (string, typioparam OID, typmod).
+ * Input functions can take a single argument of type CSTRING, or three
+ * arguments (string, typioparam OID, typmod).
*
- * For backwards compatibility we allow OPAQUE in place of CSTRING; if we
- * see this, we issue a warning and fix up the pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of CSTRING; if we see
+ * this, we issue a warning and fix up the pg_proc entry.
*/
argList[0] = CSTRINGOID;
SetFunctionArgType(procOid, 0, CSTRINGOID);
/*
- * Need CommandCounterIncrement since DefineType will likely try
- * to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try to
+ * alter the pg_proc tuple again.
*/
CommandCounterIncrement();
/*
* Output functions can take a single argument of the type.
*
- * For backwards compatibility we allow OPAQUE in place of the actual
- * type name; if we see this, we issue a warning and fix up the
- * pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of the actual type
+ * name; if we see this, we issue a warning and fix up the pg_proc entry.
*/
argList[0] = typeOid;
{
/* Found, but must complain and fix the pg_proc entry */
ereport(WARNING,
- (errmsg("changing argument type of function %s from \"opaque\" to %s",
- NameListToString(procname), format_type_be(typeOid))));
+ (errmsg("changing argument type of function %s from \"opaque\" to %s",
+ NameListToString(procname), format_type_be(typeOid))));
SetFunctionArgType(procOid, 0, typeOid);
/*
- * Need CommandCounterIncrement since DefineType will likely try
- * to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try to
+ * alter the pg_proc tuple again.
*/
CommandCounterIncrement();
Oid procOid;
/*
- * Receive functions can take a single argument of type INTERNAL, or
- * three arguments (internal, typioparam OID, typmod).
+ * Receive functions can take a single argument of type INTERNAL, or three
+ * arguments (internal, typioparam OID, typmod).
*/
argList[0] = INTERNALOID;
Oid procOid;
/*
- * Analyze functions always take one INTERNAL argument and return
- * bool.
+ * Analyze functions always take one INTERNAL argument and return bool.
*/
argList[0] = INTERNALOID;
if (get_func_rettype(procOid) != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type analyze function %s must return type \"boolean\"",
- NameListToString(procname))));
+ errmsg("type analyze function %s must return type \"boolean\"",
+ NameListToString(procname))));
return procOid;
}
if (coldeflist == NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("composite type must have at least one attribute")));
+ errmsg("composite type must have at least one attribute")));
/*
* now set the parameters for keys/inheritance etc. All of these are
/*
* Expression must be stored as a nodeToString result, but we also
- * require a valid textual representation (mainly to make life
- * easier for pg_dump).
+ * require a valid textual representation (mainly to make life easier
+ * for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
- deparse_context_for(NameStr(typTup->typname),
- InvalidOid),
+ deparse_context_for(NameStr(typTup->typname),
+ InvalidOid),
false, false);
/*
* Form an updated tuple with the new default and write it back.
*/
new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(
- nodeToString(defaultExpr)));
+ CStringGetDatum(
+ nodeToString(defaultExpr)));
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(defaultValue));
+ CStringGetDatum(defaultValue));
new_record_repl[Anum_pg_type_typdefault - 1] = 'r';
}
else
- /* Default is NULL, drop it */
+ /* Default is NULL, drop it */
{
new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n';
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" of table \"%s\" contains null values",
- NameStr(tupdesc->attrs[attnum - 1]->attname),
- RelationGetRelationName(testrel))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname),
+ RelationGetRelationName(testrel))));
}
}
heap_endscan(scan);
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's
- * a copy.
+ * Okay to update pg_type row. We can scribble on typTup because it's a
+ * copy.
*/
typTup->typnotnull = notNull;
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("foreign key constraints not possible for domains")));
+ errmsg("foreign key constraints not possible for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unique constraints not possible for domains")));
+ errmsg("unique constraints not possible for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("primary key constraints not possible for domains")));
+ errmsg("primary key constraints not possible for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
/*
* Since all other constraint types throw errors, this must be a check
- * constraint. First, process the constraint expression and add an
- * entry to pg_constraint.
+ * constraint. First, process the constraint expression and add an entry
+ * to pg_constraint.
*/
ccbin = domainAddConstraint(HeapTupleGetOid(tup), typTup->typnamespace,
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("column \"%s\" of table \"%s\" contains values that violate the new constraint",
- NameStr(tupdesc->attrs[attnum - 1]->attname),
+ NameStr(tupdesc->attrs[attnum - 1]->attname),
RelationGetRelationName(testrel))));
}
HeapTuple depTup;
/*
- * We scan pg_depend to find those things that depend on the domain.
- * (We assume we can ignore refobjsubid for a domain.)
+ * We scan pg_depend to find those things that depend on the domain. (We
+ * assume we can ignore refobjsubid for a domain.)
*/
depRel = heap_open(DependRelationId, AccessShareLock);
}
/*
- * Confirm column has not been dropped, and is of the expected
- * type. This defends against an ALTER DROP COLUMN occuring just
- * before we acquired lock ... but if the whole table were
- * dropped, we'd still have a problem.
+ * Confirm column has not been dropped, and is of the expected type.
+ * This defends against an ALTER DROP COLUMN occuring just before we
+ * acquired lock ... but if the whole table were dropped, we'd still
+ * have a problem.
*/
if (pg_depend->objsubid > RelationGetNumberOfAttributes(rtc->rel))
continue;
continue;
/*
- * Okay, add column to result. We store the columns in
- * column-number order; this is just a hack to improve
- * predictability of regression test output ...
+ * Okay, add column to result. We store the columns in column-number
+ * order; this is just a hack to improve predictability of regression
+ * test output ...
*/
Assert(rtc->natts < RelationGetNumberOfAttributes(rtc->rel));
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("constraint \"%s\" for domain \"%s\" already exists",
- constr->name, domainName)));
+ errmsg("constraint \"%s\" for domain \"%s\" already exists",
+ constr->name, domainName)));
}
else
constr->name = ChooseConstraintName(domainName,
pstate = make_parsestate(NULL);
/*
- * Set up a CoerceToDomainValue to represent the occurrence of VALUE
- * in the expression. Note that it will appear to have the type of
- * the base type, not the domain. This seems correct since within the
- * check expression, we should not assume the input value can be
- * considered a member of the domain.
+ * Set up a CoerceToDomainValue to represent the occurrence of VALUE in
+ * the expression. Note that it will appear to have the type of the base
+ * type, not the domain. This seems correct since within the check
+ * expression, we should not assume the input value can be considered a
+ * member of the domain.
*/
domVal = makeNode(CoerceToDomainValue);
domVal->typeId = baseTypeOid;
if (list_length(pstate->p_rtable) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("cannot use table references in domain check constraint")));
+ errmsg("cannot use table references in domain check constraint")));
/*
* Domains don't allow var clauses (this should be redundant with the
if (contain_var_clause(expr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("cannot use table references in domain check constraint")));
+ errmsg("cannot use table references in domain check constraint")));
/*
* No subplans or aggregates, either...
/*
* Deparse it to produce text for consrc.
*
- * Since VARNOs aren't allowed in domain constraints, relation context
- * isn't required as anything other than a shell.
+ * Since VARNOs aren't allowed in domain constraints, relation context isn't
+ * required as anything other than a shell.
*/
ccsrc = deparse_expression(expr,
deparse_context_for(domainName,
ccsrc); /* Source form check constraint */
/*
- * Return the compiled constraint expression so the calling routine
- * can perform any additional required tests.
+ * Return the compiled constraint expression so the calling routine can
+ * perform any additional required tests.
*/
return ccbin;
}
continue;
/*
- * Not expecting conbin to be NULL, but we'll test for it
- * anyway
+ * Not expecting conbin to be NULL, but we'll test for it anyway
*/
val = fastgetattr(conTup, Anum_pg_constraint_conbin,
conRel->rd_att, &isNull);
r->check_expr = ExecInitExpr(check_expr, NULL);
/*
- * use lcons() here because constraints of lower domains
- * should be applied earlier.
+ * use lcons() here because constraints of lower domains should be
+ * applied earlier.
*/
result = lcons(r, result);
}
heap_close(conRel, AccessShareLock);
/*
- * Only need to add one NOT NULL check regardless of how many domains
- * in the stack request it.
+ * Only need to add one NOT NULL check regardless of how many domains in
+ * the stack request it.
*/
if (notNull)
{
if (!superuser())
{
/* Otherwise, must be owner of the existing object */
- if (!pg_type_ownercheck(HeapTupleGetOid(tup),GetUserId()))
+ if (!pg_type_ownercheck(HeapTupleGetOid(tup), GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE,
TypeNameToString(typename));
}
/*
- * Modify the owner --- okay to scribble on typTup because it's a
- * copy
+ * Modify the owner --- okay to scribble on typTup because it's a copy
*/
typTup->typowner = newOwnerId;
typTup = (Form_pg_type) GETSTRUCT(tup);
/*
- * Modify the owner --- okay to scribble on typTup because it's a
- * copy
+ * Modify the owner --- okay to scribble on typTup because it's a copy
*/
typTup->typowner = newOwnerId;
void
AlterTypeNamespace(List *names, const char *newschema)
{
- TypeName *typename;
- Oid typeOid;
- Oid nspOid;
+ TypeName *typename;
+ Oid typeOid;
+ Oid nspOid;
/* get type OID */
typename = makeNode(TypeName);
if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move objects into or out of temporary schemas")));
+ errmsg("cannot move objects into or out of temporary schemas")));
/* same for TOAST schema */
if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE)
/*
* Composite types have pg_class entries.
*
- * We need to modify the pg_class tuple as well to
- * reflect the change of schema.
+ * We need to modify the pg_class tuple as well to reflect the change of
+ * schema.
*/
if (isCompositeType)
{
- Relation classRel;
+ Relation classRel;
classRel = heap_open(RelationRelationId, RowExclusiveLock);
/*
- * The dependency on the schema is listed under the pg_class entry,
- * so tell AlterRelationNamespaceInternal to fix it.
+ * The dependency on the schema is listed under the pg_class entry, so
+ * tell AlterRelationNamespaceInternal to fix it.
*/
AlterRelationNamespaceInternal(classRel, typform->typrelid,
oldNspOid, nspOid,
heap_close(classRel, RowExclusiveLock);
/*
- * Check for constraints associated with the composite type
- * (we don't currently support this, but probably will someday).
+ * Check for constraints associated with the composite type (we don't
+ * currently support this, but probably will someday).
*/
AlterConstraintNamespaces(typform->typrelid, oldNspOid,
nspOid, false);
AlterConstraintNamespaces(typeOid, oldNspOid, nspOid, true);
/*
- * Update dependency on schema, if any --- a table rowtype has not
- * got one.
+ * Update dependency on schema, if any --- a table rowtype has not got
+ * one.
*/
if (typform->typtype != 'c')
if (changeDependencyFor(TypeRelationId, typeOid,
- NamespaceRelationId, oldNspOid, nspOid) != 1)
+ NamespaceRelationId, oldNspOid, nspOid) != 1)
elog(ERROR, "failed to change schema dependency for type %s",
format_type_be(typeOid));
}
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.160 2005/07/31 17:19:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.161 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static List *roleNamesToIds(List *memberNames);
static void AddRoleMems(const char *rolename, Oid roleid,
- List *memberNames, List *memberIds,
- Oid grantorId, bool admin_opt);
+ List *memberNames, List *memberIds,
+ Oid grantorId, bool admin_opt);
static void DelRoleMems(const char *rolename, Oid roleid,
- List *memberNames, List *memberIds,
- bool admin_opt);
+ List *memberNames, List *memberIds,
+ bool admin_opt);
/* Check if current user has createrole privileges */
Oid roleid;
ListCell *item;
ListCell *option;
- char *password = NULL; /* user password */
+ char *password = NULL; /* user password */
bool encrypt_password = Password_encryption; /* encrypt password? */
char encrypted_password[MD5_PASSWD_LEN + 1];
- bool issuper = false; /* Make the user a superuser? */
- bool inherit = true; /* Auto inherit privileges? */
+ bool issuper = false; /* Make the user a superuser? */
+ bool inherit = true; /* Auto inherit privileges? */
bool createrole = false; /* Can this user create roles? */
bool createdb = false; /* Can the user create databases? */
bool canlogin = false; /* Can this user login? */
- int connlimit = -1; /* maximum connections allowed */
- List *addroleto = NIL; /* roles to make this a member of */
+ int connlimit = -1; /* maximum connections allowed */
+ List *addroleto = NIL; /* roles to make this a member of */
List *rolemembers = NIL; /* roles to be members of this role */
List *adminmembers = NIL; /* roles to be admins of this role */
char *validUntil = NULL; /* time the login is valid until */
stmt->role)));
/*
- * Check the pg_authid relation to be certain the role doesn't
- * already exist. Note we secure exclusive lock because
- * we need to protect our eventual update of the flat auth file.
+ * Check the pg_authid relation to be certain the role doesn't already
+ * exist. Note we secure exclusive lock because we need to protect our
+ * eventual update of the flat auth file.
*/
pg_authid_rel = heap_open(AuthIdRelationId, ExclusiveLock);
pg_authid_dsc = RelationGetDescr(pg_authid_rel);
CatalogUpdateIndexes(pg_authid_rel, tuple);
/*
- * Advance command counter so we can see new record; else tests
- * in AddRoleMems may fail.
+ * Advance command counter so we can see new record; else tests in
+ * AddRoleMems may fail.
*/
if (addroleto || adminmembers || rolemembers)
CommandCounterIncrement();
*/
foreach(item, addroleto)
{
- char *oldrolename = strVal(lfirst(item));
- Oid oldroleid = get_roleid_checked(oldrolename);
+ char *oldrolename = strVal(lfirst(item));
+ Oid oldroleid = get_roleid_checked(oldrolename);
AddRoleMems(oldrolename, oldroleid,
list_make1(makeString(stmt->role)),
}
/*
- * Add the specified members to this new role. adminmembers get the
- * admin option, rolemembers don't.
+ * Add the specified members to this new role. adminmembers get the admin
+ * option, rolemembers don't.
*/
AddRoleMems(stmt->role, roleid,
adminmembers, roleNamesToIds(adminmembers),
HeapTuple tuple,
new_tuple;
ListCell *option;
- char *password = NULL; /* user password */
+ char *password = NULL; /* user password */
bool encrypt_password = Password_encryption; /* encrypt password? */
char encrypted_password[MD5_PASSWD_LEN + 1];
- int issuper = -1; /* Make the user a superuser? */
- int inherit = -1; /* Auto inherit privileges? */
- int createrole = -1; /* Can this user create roles? */
- int createdb = -1; /* Can the user create databases? */
- int canlogin = -1; /* Can this user login? */
- int connlimit = -1; /* maximum connections allowed */
+ int issuper = -1; /* Make the user a superuser? */
+ int inherit = -1; /* Auto inherit privileges? */
+ int createrole = -1; /* Can this user create roles? */
+ int createdb = -1; /* Can the user create databases? */
+ int canlogin = -1; /* Can this user login? */
+ int connlimit = -1; /* maximum connections allowed */
List *rolemembers = NIL; /* roles to be added/removed */
char *validUntil = NULL; /* time the login is valid until */
DefElem *dpassword = NULL;
* issuper/createrole/catupdate/etc
*
* XXX It's rather unclear how to handle catupdate. It's probably best to
- * keep it equal to the superuser status, otherwise you could end up
- * with a situation where no existing superuser can alter the
- * catalogs, including pg_authid!
+ * keep it equal to the superuser status, otherwise you could end up with
+ * a situation where no existing superuser can alter the catalogs,
+ * including pg_authid!
*/
if (issuper >= 0)
{
heap_freetuple(new_tuple);
/*
- * Advance command counter so we can see new record; else tests
- * in AddRoleMems may fail.
+ * Advance command counter so we can see new record; else tests in
+ * AddRoleMems may fail.
*/
if (rolemembers)
CommandCounterIncrement();
void
DropRole(DropRoleStmt *stmt)
{
- Relation pg_authid_rel, pg_auth_members_rel;
+ Relation pg_authid_rel,
+ pg_auth_members_rel;
ListCell *item;
if (!have_createrole_privilege())
/*
* Scan the pg_authid relation to find the Oid of the role(s) to be
- * deleted. Note we secure exclusive lock on pg_authid, because we
- * need to protect our update of the flat auth file. A regular
- * writer's lock on pg_auth_members is sufficient though.
+ * deleted. Note we secure exclusive lock on pg_authid, because we need
+ * to protect our update of the flat auth file. A regular writer's lock
+ * on pg_auth_members is sufficient though.
*/
pg_authid_rel = heap_open(AuthIdRelationId, ExclusiveLock);
pg_auth_members_rel = heap_open(AuthMemRelationId, RowExclusiveLock);
const char *role = strVal(lfirst(item));
HeapTuple tuple,
tmp_tuple;
- ScanKeyData scankey;
+ ScanKeyData scankey;
char *detail;
SysScanDesc sscan;
Oid roleid;
/*
* Lock the role, so nobody can add dependencies to her while we drop
* her. We keep the lock until the end of transaction.
- */
+ */
LockSharedObject(AuthIdRelationId, roleid, 0, AccessExclusiveLock);
/* Check for pg_shdepend entries depending on this role */
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
errmsg("role \"%s\" cannot be dropped because some objects depend on it",
- role),
+ role),
errdetail("%s", detail)));
/*
ReleaseSysCache(tuple);
/*
- * Remove role from the pg_auth_members table. We have to remove
- * all tuples that show it as either a role or a member.
+ * Remove role from the pg_auth_members table. We have to remove all
+ * tuples that show it as either a role or a member.
*
- * XXX what about grantor entries? Maybe we should do one heap scan.
+ * XXX what about grantor entries? Maybe we should do one heap scan.
*/
ScanKeyInit(&scankey,
Anum_pg_auth_members_roleid,
systable_endscan(sscan);
/*
- * Advance command counter so that later iterations of this loop
- * will see the changes already made. This is essential if, for
- * example, we are trying to drop both a role and one of its
- * direct members --- we'll get an error if we try to delete the
- * linking pg_auth_members tuple twice. (We do not need a CCI
- * between the two delete loops above, because it's not allowed
- * for a role to directly contain itself.)
+ * Advance command counter so that later iterations of this loop will
+ * see the changes already made. This is essential if, for example,
+ * we are trying to drop both a role and one of its direct members ---
+ * we'll get an error if we try to delete the linking pg_auth_members
+ * tuple twice. (We do not need a CCI between the two delete loops
+ * above, because it's not allowed for a role to directly contain
+ * itself.)
*/
CommandCounterIncrement();
}
errmsg("role \"%s\" does not exist", oldname)));
/*
- * XXX Client applications probably store the session user somewhere,
- * so renaming it could cause confusion. On the other hand, there may
- * not be an actual problem besides a little confusion, so think about
- * this and decide. Same for SET ROLE ... we don't restrict renaming
- * the current effective userid, though.
+ * XXX Client applications probably store the session user somewhere, so
+ * renaming it could cause confusion. On the other hand, there may not be
+ * an actual problem besides a little confusion, so think about this and
+ * decide. Same for SET ROLE ... we don't restrict renaming the current
+ * effective userid, though.
*/
roleid = HeapTupleGetOid(oldtuple);
repl_repl[Anum_pg_authid_rolname - 1] = 'r';
repl_val[Anum_pg_authid_rolname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(newname));
+ CStringGetDatum(newname));
repl_null[Anum_pg_authid_rolname - 1] = ' ';
datum = heap_getattr(oldtuple, Anum_pg_authid_rolpassword, dsc, &isnull);
grantee_ids = roleNamesToIds(stmt->grantee_roles);
/*
- * Even though this operation doesn't change pg_authid, we must
- * secure exclusive lock on it to protect our update of the flat
- * auth file.
+ * Even though this operation doesn't change pg_authid, we must secure
+ * exclusive lock on it to protect our update of the flat auth file.
*/
pg_authid_rel = heap_open(AuthIdRelationId, ExclusiveLock);
/*
- * Step through all of the granted roles and add/remove
- * entries for the grantees, or, if admin_opt is set, then
- * just add/remove the admin option.
+ * Step through all of the granted roles and add/remove entries for the
+ * grantees, or, if admin_opt is set, then just add/remove the admin
+ * option.
*
* Note: Permissions checking is done by AddRoleMems/DelRoleMems
*/
foreach(item, stmt->granted_roles)
{
- char *rolename = strVal(lfirst(item));
- Oid roleid = get_roleid_checked(rolename);
+ char *rolename = strVal(lfirst(item));
+ Oid roleid = get_roleid_checked(rolename);
if (stmt->is_grant)
AddRoleMems(rolename, roleid,
foreach(l, memberNames)
{
- char *rolename = strVal(lfirst(l));
- Oid roleid = get_roleid_checked(rolename);
+ char *rolename = strVal(lfirst(l));
+ Oid roleid = get_roleid_checked(rolename);
result = lappend_oid(result, roleid);
}
{
Relation pg_authmem_rel;
TupleDesc pg_authmem_dsc;
- ListCell *nameitem;
- ListCell *iditem;
+ ListCell *nameitem;
+ ListCell *iditem;
Assert(list_length(memberNames) == list_length(memberIds));
return;
/*
- * Check permissions: must have createrole or admin option on the
- * role to be changed. To mess with a superuser role, you gotta
- * be superuser.
+ * Check permissions: must have createrole or admin option on the role to
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
Oid memberid = lfirst_oid(iditem);
HeapTuple authmem_tuple;
HeapTuple tuple;
- Datum new_record[Natts_pg_auth_members];
- char new_record_nulls[Natts_pg_auth_members];
- char new_record_repl[Natts_pg_auth_members];
+ Datum new_record[Natts_pg_auth_members];
+ char new_record_nulls[Natts_pg_auth_members];
+ char new_record_repl[Natts_pg_auth_members];
/*
* Refuse creation of membership loops, including the trivial case
- * where a role is made a member of itself. We do this by checking
- * to see if the target role is already a member of the proposed
- * member role.
+ * where a role is made a member of itself. We do this by checking to
+ * see if the target role is already a member of the proposed member
+ * role.
*/
if (is_member_of_role(roleid, memberid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- (errmsg("role \"%s\" is a member of role \"%s\"",
- rolename, membername))));
+ (errmsg("role \"%s\" is a member of role \"%s\"",
+ rolename, membername))));
/*
- * Check if entry for this role/member already exists;
- * if so, give warning unless we are adding admin option.
+ * Check if entry for this role/member already exists; if so, give
+ * warning unless we are adding admin option.
*/
authmem_tuple = SearchSysCache(AUTHMEMROLEMEM,
ObjectIdGetDatum(roleid),
ObjectIdGetDatum(memberid),
0, 0);
if (HeapTupleIsValid(authmem_tuple) &&
- (!admin_opt ||
+ (!admin_opt ||
((Form_pg_auth_members) GETSTRUCT(authmem_tuple))->admin_option))
{
ereport(NOTICE,
{
Relation pg_authmem_rel;
TupleDesc pg_authmem_dsc;
- ListCell *nameitem;
- ListCell *iditem;
+ ListCell *nameitem;
+ ListCell *iditem;
Assert(list_length(memberNames) == list_length(memberIds));
return;
/*
- * Check permissions: must have createrole or admin option on the
- * role to be changed. To mess with a superuser role, you gotta
- * be superuser.
+ * Check permissions: must have createrole or admin option on the role to
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
{
/* Just turn off the admin option */
HeapTuple tuple;
- Datum new_record[Natts_pg_auth_members];
- char new_record_nulls[Natts_pg_auth_members];
- char new_record_repl[Natts_pg_auth_members];
+ Datum new_record[Natts_pg_auth_members];
+ char new_record_nulls[Natts_pg_auth_members];
+ char new_record_repl[Natts_pg_auth_members];
/* Build a tuple to update with */
MemSet(new_record, 0, sizeof(new_record));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.316 2005/10/03 22:52:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.317 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* non-export function prototypes */
static List *get_rel_oids(List *relids, const RangeVar *vacrel,
- const char *stmttype);
+ const char *stmttype);
static void vac_update_dbstats(Oid dbid,
TransactionId vacuumXID,
TransactionId frozenXID);
elevel = DEBUG2;
/*
- * We cannot run VACUUM inside a user transaction block; if we were
- * inside a transaction, then our commit- and
- * start-transaction-command calls would not have the intended effect!
- * Furthermore, the forced commit that occurs before truncating the
- * relation's file would have the effect of committing the rest of the
- * user's transaction too, which would certainly not be the desired
- * behavior. (This only applies to VACUUM FULL, though. We could in
- * theory run lazy VACUUM inside a transaction block, but we choose to
- * disallow that case because we'd rather commit as soon as possible
- * after finishing the vacuum. This is mainly so that we can let go
- * the AccessExclusiveLock that we may be holding.)
+ * We cannot run VACUUM inside a user transaction block; if we were inside
+ * a transaction, then our commit- and start-transaction-command calls
+ * would not have the intended effect! Furthermore, the forced commit that
+ * occurs before truncating the relation's file would have the effect of
+ * committing the rest of the user's transaction too, which would
+ * certainly not be the desired behavior. (This only applies to VACUUM
+ * FULL, though. We could in theory run lazy VACUUM inside a transaction
+ * block, but we choose to disallow that case because we'd rather commit
+ * as soon as possible after finishing the vacuum. This is mainly so that
+ * we can let go the AccessExclusiveLock that we may be holding.)
*
* ANALYZE (without VACUUM) can run either way.
*/
/*
* Disallow the combination VACUUM FULL FREEZE; although it would mostly
* work, VACUUM FULL's ability to move tuples around means that it is
- * injecting its own XID into tuple visibility checks. We'd have to
+ * injecting its own XID into tuple visibility checks. We'd have to
* guarantee that every moved tuple is properly marked XMIN_COMMITTED or
* XMIN_INVALID before the end of the operation. There are corner cases
- * where this does not happen, and getting rid of them all seems hard
- * (not to mention fragile to maintain). On the whole it's not worth it
+ * where this does not happen, and getting rid of them all seems hard (not
+ * to mention fragile to maintain). On the whole it's not worth it
* compared to telling people to use two operations. See pgsql-hackers
* discussion of 27-Nov-2004, and comments below for update_hint_bits().
*
- * Note: this is enforced here, and not in the grammar, since (a) we can
- * give a better error message, and (b) we might want to allow it again
+ * Note: this is enforced here, and not in the grammar, since (a) we can give
+ * a better error message, and (b) we might want to allow it again
* someday.
*/
if (vacstmt->vacuum && vacstmt->full && vacstmt->freeze)
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of PortalContext, it will go away eventually even
- * if we suffer an error; there's no need for special abort cleanup
- * logic.
+ * Since it is a child of PortalContext, it will go away eventually even if
+ * we suffer an error; there's no need for special abort cleanup logic.
*/
vac_context = AllocSetContextCreate(PortalContext,
"Vacuum",
all_rels = (relids == NIL && vacstmt->relation == NULL);
/*
- * Build list of relations to process, unless caller gave us one.
- * (If we build one, we put it in vac_context for safekeeping.)
+ * Build list of relations to process, unless caller gave us one. (If we
+ * build one, we put it in vac_context for safekeeping.)
*/
relations = get_rel_oids(relids, vacstmt->relation, stmttype);
/*
* It's a database-wide VACUUM.
*
- * Compute the initially applicable OldestXmin and FreezeLimit XIDs,
- * so that we can record these values at the end of the VACUUM.
- * Note that individual tables may well be processed with newer
- * values, but we can guarantee that no (non-shared) relations are
- * processed with older ones.
+ * Compute the initially applicable OldestXmin and FreezeLimit XIDs, so
+ * that we can record these values at the end of the VACUUM. Note that
+ * individual tables may well be processed with newer values, but we
+ * can guarantee that no (non-shared) relations are processed with
+ * older ones.
*
- * It is okay to record non-shared values in pg_database, even though
- * we may vacuum shared relations with older cutoffs, because only
- * the minimum of the values present in pg_database matters. We
- * can be sure that shared relations have at some time been
- * vacuumed with cutoffs no worse than the global minimum; for, if
- * there is a backend in some other DB with xmin = OLDXMIN that's
- * determining the cutoff with which we vacuum shared relations,
- * it is not possible for that database to have a cutoff newer
- * than OLDXMIN recorded in pg_database.
+ * It is okay to record non-shared values in pg_database, even though we
+ * may vacuum shared relations with older cutoffs, because only the
+ * minimum of the values present in pg_database matters. We can be
+ * sure that shared relations have at some time been vacuumed with
+ * cutoffs no worse than the global minimum; for, if there is a
+ * backend in some other DB with xmin = OLDXMIN that's determining the
+ * cutoff with which we vacuum shared relations, it is not possible
+ * for that database to have a cutoff newer than OLDXMIN recorded in
+ * pg_database.
*/
vacuum_set_xid_limits(vacstmt, false,
&initialOldestXmin,
/*
* Decide whether we need to start/commit our own transactions.
*
- * For VACUUM (with or without ANALYZE): always do so, so that we can
- * release locks as soon as possible. (We could possibly use the
- * outer transaction for a one-table VACUUM, but handling TOAST tables
- * would be problematic.)
+ * For VACUUM (with or without ANALYZE): always do so, so that we can release
+ * locks as soon as possible. (We could possibly use the outer
+ * transaction for a one-table VACUUM, but handling TOAST tables would be
+ * problematic.)
*
* For ANALYZE (no VACUUM): if inside a transaction block, we cannot
- * start/commit our own transactions. Also, there's no need to do so
- * if only processing one relation. For multiple relations when not
- * within a transaction block, use own transactions so we can release
- * locks sooner.
+ * start/commit our own transactions. Also, there's no need to do so if
+ * only processing one relation. For multiple relations when not within a
+ * transaction block, use own transactions so we can release locks sooner.
*/
if (vacstmt->vacuum)
use_own_xacts = true;
}
/*
- * If we are running ANALYZE without per-table transactions, we'll
- * need a memory context with table lifetime.
+ * If we are running ANALYZE without per-table transactions, we'll need a
+ * memory context with table lifetime.
*/
if (!use_own_xacts)
anl_context = AllocSetContextCreate(PortalContext,
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * vacuum_rel expects to be entered with no transaction active; it
- * will start and commit its own transaction. But we are called by an
- * SQL command, and so we are executing inside a transaction already.
- * We commit the transaction started in PostgresMain() here, and start
- * another one before exiting to match the commit waiting for us back
- * in PostgresMain().
+ * vacuum_rel expects to be entered with no transaction active; it will
+ * start and commit its own transaction. But we are called by an SQL
+ * command, and so we are executing inside a transaction already. We
+ * commit the transaction started in PostgresMain() here, and start
+ * another one before exiting to match the commit waiting for us back in
+ * PostgresMain().
*/
if (use_own_xacts)
{
MemoryContext old_context = NULL;
/*
- * If using separate xacts, start one for analyze.
- * Otherwise, we can use the outer transaction, but we
- * still need to call analyze_rel in a memory context that
- * will be cleaned up on return (else we leak memory while
- * processing multiple tables).
+ * If using separate xacts, start one for analyze. Otherwise,
+ * we can use the outer transaction, but we still need to call
+ * analyze_rel in a memory context that will be cleaned up on
+ * return (else we leak memory while processing multiple
+ * tables).
*/
if (use_own_xacts)
{
old_context = MemoryContextSwitchTo(anl_context);
/*
- * Tell the buffer replacement strategy that vacuum is
- * causing the IO
+ * Tell the buffer replacement strategy that vacuum is causing
+ * the IO
*/
StrategyHintVacuum(true);
if (vacstmt->vacuum)
{
/*
- * If it was a database-wide VACUUM, print FSM usage statistics
- * (we don't make you be superuser to see these).
+ * If it was a database-wide VACUUM, print FSM usage statistics (we
+ * don't make you be superuser to see these).
*/
if (all_rels)
PrintFreeSpaceMapStatistics(elevel);
/*
* If we completed a database-wide VACUUM without skipping any
- * relations, update the database's pg_database row with info
- * about the transaction IDs used, and try to truncate pg_clog.
+ * relations, update the database's pg_database row with info about
+ * the transaction IDs used, and try to truncate pg_clog.
*/
if (all_rels)
{
/*
* Clean up working storage --- note we must do this after
- * StartTransactionCommand, else we might be trying to delete the
- * active context!
+ * StartTransactionCommand, else we might be trying to delete the active
+ * context!
*/
MemoryContextDelete(vac_context);
vac_context = NULL;
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
- * Invalidate the tuple in the catcaches; this also arranges to flush
- * the relation's relcache entry. (If we fail to commit for some
- * reason, no flush will occur, but no great harm is done since there
- * are no noncritical state updates here.)
+ * Invalidate the tuple in the catcaches; this also arranges to flush the
+ * relation's relcache entry. (If we fail to commit for some reason, no
+ * flush will occur, but no great harm is done since there are no
+ * noncritical state updates here.)
*/
CacheInvalidateHeapTuple(rd, &rtup);
heap_close(relation, AccessShareLock);
/*
- * Do not truncate CLOG if we seem to have suffered wraparound
- * already; the computed minimum XID might be bogus.
+ * Do not truncate CLOG if we seem to have suffered wraparound already;
+ * the computed minimum XID might be bogus.
*/
if (vacuumAlreadyWrapped)
{
TruncateCLOG(vacuumXID);
/*
- * Do not update varsup.c if we seem to have suffered wraparound
- * already; the computed XID might be bogus.
+ * Do not update varsup.c if we seem to have suffered wraparound already;
+ * the computed XID might be bogus.
*/
if (frozenAlreadyWrapped)
{
age = (int32) (myXID - frozenXID);
if (age > (int32) ((MaxTransactionId >> 3) * 3))
ereport(WARNING,
- (errmsg("database \"%s\" must be vacuumed within %u transactions",
- NameStr(oldest_datname),
- (MaxTransactionId >> 1) - age),
- errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".",
- NameStr(oldest_datname))));
+ (errmsg("database \"%s\" must be vacuumed within %u transactions",
+ NameStr(oldest_datname),
+ (MaxTransactionId >> 1) - age),
+ errhint("To avoid a database shutdown, execute a full-database VACUUM in \"%s\".",
+ NameStr(oldest_datname))));
}
CHECK_FOR_INTERRUPTS();
/*
- * Race condition -- if the pg_class tuple has gone away since the
- * last time we saw it, we don't need to vacuum it.
+ * Race condition -- if the pg_class tuple has gone away since the last
+ * time we saw it, we don't need to vacuum it.
*/
if (!SearchSysCacheExists(RELOID,
ObjectIdGetDatum(relid),
}
/*
- * Determine the type of lock we want --- hard exclusive lock for a
- * FULL vacuum, but just ShareUpdateExclusiveLock for concurrent
- * vacuum. Either way, we can be sure that no other backend is
- * vacuuming the same table.
+ * Determine the type of lock we want --- hard exclusive lock for a FULL
+ * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum.
+ * Either way, we can be sure that no other backend is vacuuming the same
+ * table.
*/
lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
/*
- * Open the class, get an appropriate lock on it, and check
- * permissions.
+ * Open the class, get an appropriate lock on it, and check permissions.
*
- * We allow the user to vacuum a table if he is superuser, the table
- * owner, or the database owner (but in the latter case, only if it's
- * not a shared relation). pg_class_ownercheck includes the superuser
- * case.
+ * We allow the user to vacuum a table if he is superuser, the table owner,
+ * or the database owner (but in the latter case, only if it's not a
+ * shared relation). pg_class_ownercheck includes the superuser case.
*
- * Note we choose to treat permissions failure as a WARNING and keep
- * trying to vacuum the rest of the DB --- is this appropriate?
+ * Note we choose to treat permissions failure as a WARNING and keep trying
+ * to vacuum the rest of the DB --- is this appropriate?
*/
onerel = relation_open(relid, lmode);
}
/*
- * Check that it's a plain table; we used to do this in get_rel_oids()
- * but seems safer to check after we've locked the relation.
+ * Check that it's a plain table; we used to do this in get_rel_oids() but
+ * seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != expected_relkind)
{
relation_close(onerel, lmode);
StrategyHintVacuum(false);
CommitTransactionCommand();
- return true; /* assume no long-lived data in temp
- * tables */
+ return true; /* assume no long-lived data in temp tables */
}
/*
* Get a session-level lock too. This will protect our access to the
* relation across multiple transactions, so that we can vacuum the
- * relation's TOAST table (if any) secure in the knowledge that no one
- * is deleting the parent relation.
+ * relation's TOAST table (if any) secure in the knowledge that no one is
+ * deleting the parent relation.
*
* NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the
/*
* If the relation has a secondary toast rel, vacuum that too while we
* still hold the session lock on the master table. Note however that
- * "analyze" will not get done on the toast table. This is good,
- * because the toaster always uses hardcoded index access and
- * statistics are totally unimportant for toast relations.
+ * "analyze" will not get done on the toast table. This is good, because
+ * the toaster always uses hardcoded index access and statistics are
+ * totally unimportant for toast relations.
*/
if (toast_relid != InvalidOid)
{
{
VacPageListData vacuum_pages; /* List of pages to vacuum and/or
* clean indexes */
- VacPageListData fraged_pages; /* List of pages with space enough
- * for re-using */
+ VacPageListData fraged_pages; /* List of pages with space enough for
+ * re-using */
Relation *Irel;
int nindexes,
i;
/* report results to the stats collector, too */
pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared,
- vacstmt->analyze, vacrelstats->rel_tuples);
+ vacstmt->analyze, vacrelstats->rel_tuples);
}
/*
* Since we are holding exclusive lock on the relation, no other
- * backend can be accessing the page; however it is possible that
- * the background writer will try to write the page if it's already
- * marked dirty. To ensure that invalid data doesn't get written to
- * disk, we must take exclusive buffer lock wherever we potentially
- * modify pages.
+ * backend can be accessing the page; however it is possible that the
+ * background writer will try to write the page if it's already marked
+ * dirty. To ensure that invalid data doesn't get written to disk, we
+ * must take exclusive buffer lock wherever we potentially modify
+ * pages.
*/
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
VacPage vacpagecopy;
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
case HEAPTUPLE_LIVE:
/*
- * Tuple is good. Consider whether to replace its
- * xmin value with FrozenTransactionId.
+ * Tuple is good. Consider whether to replace its xmin
+ * value with FrozenTransactionId.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
case HEAPTUPLE_RECENTLY_DEAD:
/*
- * If tuple is recently deleted then we must not
- * remove it from relation.
+ * If tuple is recently deleted then we must not remove it
+ * from relation.
*/
nkeep += 1;
/*
- * If we do shrinking and this tuple is updated one
- * then remember it to construct updated tuple
- * dependencies.
+ * If we do shrinking and this tuple is updated one then
+ * remember it to construct updated tuple dependencies.
*/
if (do_shrinking &&
!(ItemPointerEquals(&(tuple.t_self),
{
free_vtlinks = 1000;
vtlinks = (VTupleLink) repalloc(vtlinks,
- (free_vtlinks + num_vtlinks) *
- sizeof(VTupleLinkData));
+ (free_vtlinks + num_vtlinks) *
+ sizeof(VTupleLinkData));
}
vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid;
vtlinks[num_vtlinks].this_tid = tuple.t_self;
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
- * This should not happen, since we hold exclusive
- * lock on the relation; shouldn't we raise an error?
- * (Actually, it can happen in system catalogs, since
- * we tend to release write lock before commit there.)
+ * This should not happen, since we hold exclusive lock on
+ * the relation; shouldn't we raise an error? (Actually,
+ * it can happen in system catalogs, since we tend to
+ * release write lock before commit there.)
*/
ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation",
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
- * This should not happen, since we hold exclusive
- * lock on the relation; shouldn't we raise an error?
- * (Actually, it can happen in system catalogs, since
- * we tend to release write lock before commit there.)
+ * This should not happen, since we hold exclusive lock on
+ * the relation; shouldn't we raise an error? (Actually,
+ * it can happen in system catalogs, since we tend to
+ * release write lock before commit there.)
*/
ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation",
ItemId lpp;
/*
- * Here we are building a temporary copy of the page with
- * dead tuples removed. Below we will apply
+ * Here we are building a temporary copy of the page with dead
+ * tuples removed. Below we will apply
* PageRepairFragmentation to the copy, so that we can
- * determine how much space will be available after
- * removal of dead tuples. But note we are NOT changing
- * the real page yet...
+ * determine how much space will be available after removal of
+ * dead tuples. But note we are NOT changing the real page
+ * yet...
*/
if (tempPage == NULL)
{
/*
* Add the page to fraged_pages if it has a useful amount of free
* space. "Useful" means enough for a minimal-sized tuple. But we
- * don't know that accurately near the start of the relation, so
- * add pages unconditionally if they have >= BLCKSZ/10 free space.
+ * don't know that accurately near the start of the relation, so add
+ * pages unconditionally if they have >= BLCKSZ/10 free space.
*/
do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10);
/*
* Include the page in empty_end_pages if it will be empty after
- * vacuuming; this is to keep us from using it as a move
- * destination.
+ * vacuuming; this is to keep us from using it as a move destination.
*/
if (notup)
{
RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead row versions cannot be removed yet.\n"
- "Nonremovable row versions range from %lu to %lu bytes long.\n"
+ "Nonremovable row versions range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n"
- "Total free space (including removable row versions) is %.0f bytes.\n"
+ "Total free space (including removable row versions) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n"
- "%u pages containing %.0f free bytes are potential move destinations.\n"
+ "%u pages containing %.0f free bytes are potential move destinations.\n"
"%s.",
nkeep,
(unsigned long) min_tlen, (unsigned long) max_tlen,
vacpage->offsets_used = vacpage->offsets_free = 0;
/*
- * Scan pages backwards from the last nonempty page, trying to move
- * tuples down to lower pages. Quit when we reach a page that we have
- * moved any tuples onto, or the first page if we haven't moved
- * anything, or when we find a page we cannot completely empty (this
- * last condition is handled by "break" statements within the loop).
+ * Scan pages backwards from the last nonempty page, trying to move tuples
+ * down to lower pages. Quit when we reach a page that we have moved any
+ * tuples onto, or the first page if we haven't moved anything, or when we
+ * find a page we cannot completely empty (this last condition is handled
+ * by "break" statements within the loop).
*
- * NB: this code depends on the vacuum_pages and fraged_pages lists being
- * in order by blkno.
+ * NB: this code depends on the vacuum_pages and fraged_pages lists being in
+ * order by blkno.
*/
nblocks = vacrelstats->rel_pages;
for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
vacuum_delay_point();
/*
- * Forget fraged_pages pages at or after this one; they're no
- * longer useful as move targets, since we only want to move down.
- * Note that since we stop the outer loop at last_move_dest_block,
- * pages removed here cannot have had anything moved onto them
- * already.
+ * Forget fraged_pages pages at or after this one; they're no longer
+ * useful as move targets, since we only want to move down. Note that
+ * since we stop the outer loop at last_move_dest_block, pages removed
+ * here cannot have had anything moved onto them already.
*
- * Also note that we don't change the stored fraged_pages list, only
- * our local variable num_fraged_pages; so the forgotten pages are
- * still available to be loaded into the free space map later.
+ * Also note that we don't change the stored fraged_pages list, only our
+ * local variable num_fraged_pages; so the forgotten pages are still
+ * available to be loaded into the free space map later.
*/
while (num_fraged_pages > 0 &&
- fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
+ fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
{
Assert(fraged_pages->pagedesc[num_fraged_pages - 1]->offsets_used == 0);
--num_fraged_pages;
else
Assert(!isempty);
- chain_tuple_moved = false; /* no one chain-tuple was moved
- * off this page, yet */
+ chain_tuple_moved = false; /* no one chain-tuple was moved off
+ * this page, yet */
vacpage->blkno = blkno;
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
elog(ERROR, "invalid XVAC in tuple header");
/*
- * If this (chain) tuple is moved by me already then I
- * have to check is it in vacpage or not - i.e. is it
- * moved while cleaning this page or some previous one.
+ * If this (chain) tuple is moved by me already then I have to
+ * check is it in vacpage or not - i.e. is it moved while
+ * cleaning this page or some previous one.
*/
/* Can't we Assert(keep_tuples > 0) here? */
}
/*
- * If this tuple is in a chain of tuples created in updates
- * by "recent" transactions then we have to move the whole chain
- * of tuples to other places, so that we can write new t_ctid
- * links that preserve the chain relationship.
+ * If this tuple is in a chain of tuples created in updates by
+ * "recent" transactions then we have to move the whole chain of
+ * tuples to other places, so that we can write new t_ctid links
+ * that preserve the chain relationship.
*
* This test is complicated. Read it as "if tuple is a recently
- * created updated version, OR if it is an obsoleted version".
- * (In the second half of the test, we needn't make any check
- * on XMAX --- it must be recently obsoleted, else scan_heap
- * would have deemed it removable.)
+ * created updated version, OR if it is an obsoleted version". (In
+ * the second half of the test, we needn't make any check on XMAX
+ * --- it must be recently obsoleted, else scan_heap would have
+ * deemed it removable.)
*
- * NOTE: this test is not 100% accurate: it is possible for a
- * tuple to be an updated one with recent xmin, and yet not
- * match any new_tid entry in the vtlinks list. Presumably
- * there was once a parent tuple with xmax matching the xmin,
- * but it's possible that that tuple has been removed --- for
- * example, if it had xmin = xmax and wasn't itself an updated
- * version, then HeapTupleSatisfiesVacuum would deem it removable
- * as soon as the xmin xact completes.
+ * NOTE: this test is not 100% accurate: it is possible for a tuple
+ * to be an updated one with recent xmin, and yet not match any
+ * new_tid entry in the vtlinks list. Presumably there was once a
+ * parent tuple with xmax matching the xmin, but it's possible
+ * that that tuple has been removed --- for example, if it had
+ * xmin = xmax and wasn't itself an updated version, then
+ * HeapTupleSatisfiesVacuum would deem it removable as soon as the
+ * xmin xact completes.
*
- * To be on the safe side, we abandon the repair_frag process if
- * we cannot find the parent tuple in vtlinks. This may be
- * overly conservative; AFAICS it would be safe to move the
- * chain.
+ * To be on the safe side, we abandon the repair_frag process if we
+ * cannot find the parent tuple in vtlinks. This may be overly
+ * conservative; AFAICS it would be safe to move the chain.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
- !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
- OldestXmin)) ||
+ !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
+ OldestXmin)) ||
(!(tuple.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_IS_LOCKED)) &&
!(ItemPointerEquals(&(tuple.t_self),
}
/*
- * If this tuple is in the begin/middle of the chain then
- * we have to move to the end of chain. As with any
- * t_ctid chase, we have to verify that each new tuple
- * is really the descendant of the tuple we came from.
+ * If this tuple is in the begin/middle of the chain then we
+ * have to move to the end of chain. As with any t_ctid
+ * chase, we have to verify that each new tuple is really the
+ * descendant of the tuple we came from.
*/
while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_IS_LOCKED)) &&
free_vtmove = 100;
/*
- * Now, walk backwards up the chain (towards older tuples)
- * and check if all items in chain can be moved. We record
- * all the moves that need to be made in the vtmove array.
+ * Now, walk backwards up the chain (towards older tuples) and
+ * check if all items in chain can be moved. We record all
+ * the moves that need to be made in the vtmove array.
*/
for (;;)
{
/* Done if at beginning of chain */
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
- TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
- OldestXmin))
- break; /* out of check-all-items loop */
+ TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
+ OldestXmin))
+ break; /* out of check-all-items loop */
/* Move to tuple with prior row version */
vtld.new_tid = tp.t_self;
}
tp.t_self = vtlp->this_tid;
Pbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&(tp.t_self)));
+ ItemPointerGetBlockNumber(&(tp.t_self)));
Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage,
- ItemPointerGetOffsetNumber(&(tp.t_self)));
+ ItemPointerGetOffsetNumber(&(tp.t_self)));
/* this can't happen since we saw tuple earlier: */
if (!ItemIdIsUsed(Pitemid))
elog(ERROR, "parent itemid marked as unused");
/*
* Read above about cases when !ItemIdIsUsed(nextItemid)
- * (child item is removed)... Due to the fact that at
- * the moment we don't remove unuseful part of
- * update-chain, it's possible to get non-matching parent
- * row here. Like as in the case which caused this
- * problem, we stop shrinking here. I could try to
- * find real parent row but want not to do it because
- * of real solution will be implemented anyway, later,
- * and we are too close to 6.5 release. - vadim
- * 06/11/99
+ * (child item is removed)... Due to the fact that at the
+ * moment we don't remove unuseful part of update-chain,
+ * it's possible to get non-matching parent row here. Like
+ * as in the case which caused this problem, we stop
+ * shrinking here. I could try to find real parent row but
+ * want not to do it because of real solution will be
+ * implemented anyway, later, and we are too close to 6.5
+ * release. - vadim 06/11/99
*/
if ((PTdata->t_infomask & HEAP_XMAX_IS_MULTI) ||
!(TransactionIdEquals(HeapTupleHeaderGetXmax(PTdata),
- HeapTupleHeaderGetXmin(tp.t_data))))
+ HeapTupleHeaderGetXmin(tp.t_data))))
{
ReleaseBuffer(Pbuf);
elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag");
if (chain_move_failed)
{
/*
- * Undo changes to offsets_used state. We don't
- * bother cleaning up the amount-free state, since
- * we're not going to do any further tuple motion.
+ * Undo changes to offsets_used state. We don't bother
+ * cleaning up the amount-free state, since we're not
+ * going to do any further tuple motion.
*/
for (i = 0; i < num_vtmove; i++)
{
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
Cbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&(tuple.t_self)));
+ ItemPointerGetBlockNumber(&(tuple.t_self)));
/* Get page to move to */
dst_buffer = ReadBuffer(onerel, destvacpage->blkno);
Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage,
- ItemPointerGetOffsetNumber(&(tuple.t_self)));
+ ItemPointerGetOffsetNumber(&(tuple.t_self)));
tuple.t_datamcxt = NULL;
tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
} /* walk along page */
/*
- * If we broke out of the walk-along-page loop early (ie, still
- * have offnum <= maxoff), then we failed to move some tuple off
- * this page. No point in shrinking any more, so clean up and
- * exit the per-page loop.
+ * If we broke out of the walk-along-page loop early (ie, still have
+ * offnum <= maxoff), then we failed to move some tuple off this page.
+ * No point in shrinking any more, so clean up and exit the per-page
+ * loop.
*/
if (offnum < maxoff && keep_tuples > 0)
{
OffsetNumber off;
/*
- * Fix vacpage state for any unvisited tuples remaining on
- * page
+ * Fix vacpage state for any unvisited tuples remaining on page
*/
for (off = OffsetNumberNext(offnum);
off <= maxoff;
continue;
/*
- * See comments in the walk-along-page loop above about
- * why only MOVED_OFF tuples should be found here.
+ * See comments in the walk-along-page loop above about why
+ * only MOVED_OFF tuples should be found here.
*/
if (htup->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected");
* We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our
- * exclusive access to the relation. However, that would require
- * a lot of extra code to close and re-open the relation, indexes,
- * etc. For now, a quick hack: record status of current
- * transaction as committed, and continue.
+ * exclusive access to the relation. However, that would require a
+ * lot of extra code to close and re-open the relation, indexes, etc.
+ * For now, a quick hack: record status of current transaction as
+ * committed, and continue.
*/
RecordTransactionCommit();
}
/*
* We are not going to move any more tuples across pages, but we still
- * need to apply vacuum_page to compact free space in the remaining
- * pages in vacuum_pages list. Note that some of these pages may also
- * be in the fraged_pages list, and may have had tuples moved onto
- * them; if so, we already did vacuum_page and needn't do it again.
+ * need to apply vacuum_page to compact free space in the remaining pages
+ * in vacuum_pages list. Note that some of these pages may also be in the
+ * fraged_pages list, and may have had tuples moved onto them; if so, we
+ * already did vacuum_page and needn't do it again.
*/
for (i = 0, curpage = vacuum_pages->pagedesc;
i < vacuumed_pages;
last_move_dest_block, num_moved);
/*
- * It'd be cleaner to make this report at the bottom of this routine,
- * but then the rusage would double-count the second pass of index
- * vacuuming. So do it here and ignore the relatively small amount of
- * processing that occurs below.
+ * It'd be cleaner to make this report at the bottom of this routine, but
+ * then the rusage would double-count the second pass of index vacuuming.
+ * So do it here and ignore the relatively small amount of processing that
+ * occurs below.
*/
ereport(elevel,
- (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
- RelationGetRelationName(onerel),
- num_moved, nblocks, blkno),
- errdetail("%s.",
- pg_rusage_show(&ru0))));
+ (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
+ RelationGetRelationName(onerel),
+ num_moved, nblocks, blkno),
+ errdetail("%s.",
+ pg_rusage_show(&ru0))));
/*
* Reflect the motion of system tuples to catalog cache here.
/* re-sort Nvacpagelist.pagedesc */
for (vpleft = Nvacpagelist.pagedesc,
- vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
+ vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
vpleft < vpright; vpleft++, vpright--)
{
vpsave = *vpleft;
}
/*
- * keep_tuples is the number of tuples that have been moved
- * off a page during chain moves but not been scanned over
- * subsequently. The tuple ids of these tuples are not
- * recorded as free offsets for any VacPage, so they will not
- * be cleared from the indexes.
+ * keep_tuples is the number of tuples that have been moved off a
+ * page during chain moves but not been scanned over subsequently.
+ * The tuple ids of these tuples are not recorded as free offsets
+ * for any VacPage, so they will not be cleared from the indexes.
*/
Assert(keep_tuples >= 0);
for (i = 0; i < nindexes; i++)
/*
* Clean moved-off tuples from last page in Nvacpagelist list.
*
- * We need only do this in this one page, because higher-numbered
- * pages are going to be truncated from the relation entirely.
- * But see comments for update_hint_bits().
+ * We need only do this in this one page, because higher-numbered pages
+ * are going to be truncated from the relation entirely. But see
+ * comments for update_hint_bits().
*/
if (vacpage->blkno == (blkno - 1) &&
vacpage->offsets_free > 0)
continue;
/*
- * See comments in the walk-along-page loop above about
- * why only MOVED_OFF tuples should be found here.
+ * See comments in the walk-along-page loop above about why
+ * only MOVED_OFF tuples should be found here.
*/
if (htup->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected");
else
{
/*
- * No XLOG record, but still need to flag that XID exists
- * on disk
+ * No XLOG record, but still need to flag that XID exists on
+ * disk
*/
MyXactMadeTempRelUpdate = true;
}
/*
* If this page was not used before - clean it.
*
- * NOTE: a nasty bug used to lurk here. It is possible for the source
- * and destination pages to be the same (since this tuple-chain member
- * can be on a page lower than the one we're currently processing in
- * the outer loop). If that's true, then after vacuum_page() the
- * source tuple will have been moved, and tuple.t_data will be
- * pointing at garbage. Therefore we must do everything that uses
- * old_tup->t_data BEFORE this step!!
+ * NOTE: a nasty bug used to lurk here. It is possible for the source and
+ * destination pages to be the same (since this tuple-chain member can be
+ * on a page lower than the one we're currently processing in the outer
+ * loop). If that's true, then after vacuum_page() the source tuple will
+ * have been moved, and tuple.t_data will be pointing at garbage.
+ * Therefore we must do everything that uses old_tup->t_data BEFORE this
+ * step!!
*
- * This path is different from the other callers of vacuum_page, because
- * we have already incremented the vacpage's offsets_used field to
- * account for the tuple(s) we expect to move onto the page. Therefore
- * vacuum_page's check for offsets_used == 0 is wrong. But since
- * that's a good debugging check for all other callers, we work around
- * it here rather than remove it.
+ * This path is different from the other callers of vacuum_page, because we
+ * have already incremented the vacpage's offsets_used field to account
+ * for the tuple(s) we expect to move onto the page. Therefore
+ * vacuum_page's check for offsets_used == 0 is wrong. But since that's a
+ * good debugging check for all other callers, we work around it here
+ * rather than remove it.
*/
if (!PageIsEmpty(dst_page) && cleanVpd)
{
}
/*
- * Update the state of the copied tuple, and store it on the
- * destination page.
+ * Update the state of the copied tuple, and store it on the destination
+ * page.
*/
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
ItemPointerSet(&(newtup.t_self), dst_vacpage->blkno, newoff);
/*
- * Set new tuple's t_ctid pointing to itself if last tuple in chain,
- * and to next tuple in chain otherwise. (Since we move the chain
- * in reverse order, this is actually the previously processed tuple.)
+ * Set new tuple's t_ctid pointing to itself if last tuple in chain, and
+ * to next tuple in chain otherwise. (Since we move the chain in reverse
+ * order, this is actually the previously processed tuple.)
*/
if (!ItemPointerIsValid(ctid))
newtup.t_data->t_ctid = newtup.t_self;
* register invalidation of source tuple in catcaches.
*
* (Note: we do not need to register the copied tuple, because we are not
- * changing the tuple contents and so there cannot be any need to
- * flush negative catcache entries.)
+ * changing the tuple contents and so there cannot be any need to flush
+ * negative catcache entries.)
*/
CacheInvalidateHeapTuple(rel, old_tup);
/*
* Even though we're not planning to delete anything, we use the
- * ambulkdelete call, because (a) the scan happens within the index AM
- * for more speed, and (b) it may want to pass private statistics to
- * the amvacuumcleanup call.
+ * ambulkdelete call, because (a) the scan happens within the index AM for
+ * more speed, and (b) it may want to pass private statistics to the
+ * amvacuumcleanup call.
*/
stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%u index pages have been deleted, %u are currently reusable.\n"
- "%s.",
- stats->pages_deleted, stats->pages_free,
- pg_rusage_show(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s.",
+ stats->pages_deleted, stats->pages_free,
+ pg_rusage_show(&ru0))));
/*
- * Check for tuple count mismatch. If the index is partial, then it's
- * OK for it to have fewer tuples than the heap; else we got trouble.
+ * Check for tuple count mismatch. If the index is partial, then it's OK
+ * for it to have fewer tuples than the heap; else we got trouble.
*/
if (stats->num_index_tuples != num_tuples)
{
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%.0f index row versions were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
- "%s.",
- stats->tuples_removed,
- stats->pages_deleted, stats->pages_free,
- pg_rusage_show(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
+ "%s.",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ pg_rusage_show(&ru0))));
/*
- * Check for tuple count mismatch. If the index is partial, then it's
- * OK for it to have fewer tuples than the heap; else we got trouble.
+ * Check for tuple count mismatch. If the index is partial, then it's OK
+ * for it to have fewer tuples than the heap; else we got trouble.
*/
if (stats->num_index_tuples != num_tuples + keep_tuples)
{
ereport(WARNING,
(errmsg("index \"%s\" contains %.0f row versions, but table contains %.0f row versions",
RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples + keep_tuples),
+ stats->num_index_tuples, num_tuples + keep_tuples),
errhint("Rebuild the index with REINDEX.")));
}
/*
* We only report pages with free space at least equal to the average
- * request size --- this avoids cluttering FSM with uselessly-small
- * bits of space. Although FSM would discard pages with little free
- * space anyway, it's important to do this prefiltering because (a) it
- * reduces the time spent holding the FSM lock in
- * RecordRelationFreeSpace, and (b) FSM uses the number of pages
- * reported as a statistic for guiding space management. If we didn't
- * threshold our reports the same way vacuumlazy.c does, we'd be
- * skewing that statistic.
+ * request size --- this avoids cluttering FSM with uselessly-small bits
+ * of space. Although FSM would discard pages with little free space
+ * anyway, it's important to do this prefiltering because (a) it reduces
+ * the time spent holding the FSM lock in RecordRelationFreeSpace, and (b)
+ * FSM uses the number of pages reported as a statistic for guiding space
+ * management. If we didn't threshold our reports the same way
+ * vacuumlazy.c does, we'd be skewing that statistic.
*/
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
for (i = 0; i < nPages; i++)
{
/*
- * fraged_pages may contain entries for pages that we later
- * decided to truncate from the relation; don't enter them into
- * the free space map!
+ * fraged_pages may contain entries for pages that we later decided to
+ * truncate from the relation; don't enter them into the free space
+ * map!
*/
if (pagedesc[i]->blkno >= rel_pages)
break;
/* allocate a VacPageData entry */
newvacpage = (VacPage) palloc(sizeof(VacPageData) +
- vacpage->offsets_free * sizeof(OffsetNumber));
+ vacpage->offsets_free * sizeof(OffsetNumber));
/* fill it in */
if (vacpage->offsets_free > 0)
}
/*
- * Release the resources acquired by vac_open_indexes. Optionally release
+ * Release the resources acquired by vac_open_indexes. Optionally release
* the locks (say NoLock to keep 'em).
*/
void
vac_is_partial_index(Relation indrel)
{
/*
- * If the index's AM doesn't support nulls, it's partial for our
- * purposes
+ * If the index's AM doesn't support nulls, it's partial for our purposes
*/
if (!indrel->rd_am->amindexnulls)
return true;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.60 2005/10/03 22:52:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.61 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Overall statistics about rel */
BlockNumber rel_pages;
double rel_tuples;
- BlockNumber pages_removed;
+ BlockNumber pages_removed;
double tuples_deleted;
BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
Size threshold; /* minimum interesting free space */
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
static void lazy_scan_index(Relation indrel, LVRelStats *vacrelstats);
static void lazy_vacuum_index(Relation indrel,
- double *index_tups_vacuumed,
- BlockNumber *index_pages_removed,
- LVRelStats *vacrelstats);
+ double *index_tups_vacuumed,
+ BlockNumber *index_pages_removed,
+ LVRelStats *vacrelstats);
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
int tupindex, LVRelStats *vacrelstats);
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
*/
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
- possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
+ possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
lazy_truncate_heap(onerel, vacrelstats);
/* Update shared free space map with final free space info */
/* report results to the stats collector, too */
pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared,
- vacstmt->analyze, vacrelstats->rel_tuples);
+ vacstmt->analyze, vacrelstats->rel_tuples);
}
* track of the total number of rows and pages removed from each index.
* index_tups_vacuumed[i] is the number removed so far from the i'th
* index. (For partial indexes this could well be different from
- * tups_vacuumed.) Likewise for index_pages_removed[i].
+ * tups_vacuumed.) Likewise for index_pages_removed[i].
*/
index_tups_vacuumed = (double *) palloc0(nindexes * sizeof(double));
index_pages_removed = (BlockNumber *) palloc0(nindexes * sizeof(BlockNumber));
vacuum_delay_point();
/*
- * If we are close to overrunning the available space for
- * dead-tuple TIDs, pause and do a cycle of vacuuming before we
- * tackle this page.
+ * If we are close to overrunning the available space for dead-tuple
+ * TIDs, pause and do a cycle of vacuuming before we tackle this page.
*/
if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage &&
vacrelstats->num_dead_tuples > 0)
if (PageIsNew(page))
{
/*
- * An all-zeroes page could be left over if a backend extends
- * the relation but crashes before initializing the page.
- * Reclaim such pages for use.
+ * An all-zeroes page could be left over if a backend extends the
+ * relation but crashes before initializing the page. Reclaim such
+ * pages for use.
*
- * We have to be careful here because we could be looking at
- * a page that someone has just added to the relation and not
- * yet been able to initialize (see RelationGetBufferForTuple).
- * To interlock against that, release the buffer read lock
- * (which we must do anyway) and grab the relation extension
- * lock before re-locking in exclusive mode. If the page is
- * still uninitialized by then, it must be left over from a
- * crashed backend, and we can initialize it.
+ * We have to be careful here because we could be looking at a page
+ * that someone has just added to the relation and not yet been
+ * able to initialize (see RelationGetBufferForTuple). To
+ * interlock against that, release the buffer read lock (which we
+ * must do anyway) and grab the relation extension lock before
+ * re-locking in exclusive mode. If the page is still
+ * uninitialized by then, it must be left over from a crashed
+ * backend, and we can initialize it.
*
- * We don't really need the relation lock when this is a new
- * or temp relation, but it's probably not worth the code space
- * to check that, since this surely isn't a critical path.
+ * We don't really need the relation lock when this is a new or temp
+ * relation, but it's probably not worth the code space to check
+ * that, since this surely isn't a critical path.
*
- * Note: the comparable code in vacuum.c need not worry
- * because it's got exclusive lock on the whole relation.
+ * Note: the comparable code in vacuum.c need not worry because it's
+ * got exclusive lock on the whole relation.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockRelationForExtension(onerel, ExclusiveLock);
if (PageIsNew(page))
{
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
empty_pages++;
lazy_record_free_space(vacrelstats, blkno,
case HEAPTUPLE_LIVE:
/*
- * Tuple is good. Consider whether to replace its
- * xmin value with FrozenTransactionId.
+ * Tuple is good. Consider whether to replace its xmin
+ * value with FrozenTransactionId.
*
- * NB: Since we hold only a shared buffer lock here, we
- * are assuming that TransactionId read/write is
- * atomic. This is not the only place that makes such
- * an assumption. It'd be possible to avoid the
- * assumption by momentarily acquiring exclusive lock,
- * but for the moment I see no need to.
+ * NB: Since we hold only a shared buffer lock here, we are
+ * assuming that TransactionId read/write is atomic. This
+ * is not the only place that makes such an assumption.
+ * It'd be possible to avoid the assumption by momentarily
+ * acquiring exclusive lock, but for the moment I see no
+ * need to.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
case HEAPTUPLE_RECENTLY_DEAD:
/*
- * If tuple is recently deleted then we must not
- * remove it from relation.
+ * If tuple is recently deleted then we must not remove it
+ * from relation.
*/
nkeep += 1;
break;
/*
* If we remembered any tuples for deletion, then the page will be
- * visited again by lazy_vacuum_heap, which will compute and
- * record its post-compaction free space. If not, then we're done
- * with this page, so remember its free space as-is.
+ * visited again by lazy_vacuum_heap, which will compute and record
+ * its post-compaction free space. If not, then we're done with this
+ * page, so remember its free space as-is.
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
{
pg_rusage_init(&ru0);
/*
- * Acquire appropriate type of lock on index: must be exclusive if
- * index AM isn't concurrent-safe.
+ * Acquire appropriate type of lock on index: must be exclusive if index
+ * AM isn't concurrent-safe.
*/
if (indrel->rd_am->amconcurrent)
LockRelation(indrel, RowExclusiveLock);
/*
* Even though we're not planning to delete anything, we use the
- * ambulkdelete call, because (a) the scan happens within the index AM
- * for more speed, and (b) it may want to pass private statistics to
- * the amvacuumcleanup call.
+ * ambulkdelete call, because (a) the scan happens within the index AM for
+ * more speed, and (b) it may want to pass private statistics to the
+ * amvacuumcleanup call.
*/
stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%u index pages have been deleted, %u are currently reusable.\n"
- "%s.",
- stats->pages_deleted, stats->pages_free,
- pg_rusage_show(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s.",
+ stats->pages_deleted, stats->pages_free,
+ pg_rusage_show(&ru0))));
pfree(stats);
}
pg_rusage_init(&ru0);
/*
- * Acquire appropriate type of lock on index: must be exclusive if
- * index AM isn't concurrent-safe.
+ * Acquire appropriate type of lock on index: must be exclusive if index
+ * AM isn't concurrent-safe.
*/
if (indrel->rd_am->amconcurrent)
LockRelation(indrel, RowExclusiveLock);
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%.0f index row versions were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
- "%s.",
- stats->tuples_removed,
- stats->pages_deleted, stats->pages_free,
- pg_rusage_show(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
+ "%s.",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ pg_rusage_show(&ru0))));
pfree(stats);
}
pg_rusage_init(&ru0);
/*
- * We need full exclusive lock on the relation in order to do
- * truncation. If we can't get it, give up rather than waiting --- we
- * don't want to block other backends, and we don't want to deadlock
- * (which is quite possible considering we already hold a lower-grade
- * lock).
+ * We need full exclusive lock on the relation in order to do truncation.
+ * If we can't get it, give up rather than waiting --- we don't want to
+ * block other backends, and we don't want to deadlock (which is quite
+ * possible considering we already hold a lower-grade lock).
*/
if (!ConditionalLockRelation(onerel, AccessExclusiveLock))
return;
/*
* Now that we have exclusive lock, look to see if the rel has grown
- * whilst we were vacuuming with non-exclusive lock. If so, give up;
- * the newly added pages presumably contain non-deletable tuples.
+ * whilst we were vacuuming with non-exclusive lock. If so, give up; the
+ * newly added pages presumably contain non-deletable tuples.
*/
new_rel_pages = RelationGetNumberOfBlocks(onerel);
if (new_rel_pages != old_rel_pages)
/*
* Scan backwards from the end to verify that the end pages actually
- * contain nothing we need to keep. This is *necessary*, not
- * optional, because other backends could have added tuples to these
- * pages whilst we were vacuuming.
+ * contain nothing we need to keep. This is *necessary*, not optional,
+ * because other backends could have added tuples to these pages whilst we
+ * were vacuuming.
*/
new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
case HEAPTUPLE_RECENTLY_DEAD:
/*
- * If tuple is recently deleted then we must not
- * remove it from relation.
+ * If tuple is recently deleted then we must not remove it
+ * from relation.
*/
break;
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
* If we fall out of the loop, all the previously-thought-to-be-empty
- * pages really are; we need not bother to look at the last
- * known-nonempty page.
+ * pages really are; we need not bother to look at the last known-nonempty
+ * page.
*/
return vacrelstats->nonempty_pages;
}
/*
* A page with less than stats->threshold free space will be forgotten
* immediately, and never passed to the free space map. Removing the
- * uselessly small entries early saves cycles, and in particular
- * reduces the amount of time we spend holding the FSM lock when we
- * finally call RecordRelationFreeSpace. Since the FSM will probably
- * drop pages with little free space anyway, there's no point in
- * making this really small.
+ * uselessly small entries early saves cycles, and in particular reduces
+ * the amount of time we spend holding the FSM lock when we finally call
+ * RecordRelationFreeSpace. Since the FSM will probably drop pages with
+ * little free space anyway, there's no point in making this really small.
*
- * XXX Is it worth trying to measure average tuple size, and using that
- * to adjust the threshold? Would be worthwhile if FSM has no stats
- * yet for this relation. But changing the threshold as we scan the
- * rel might lead to bizarre behavior, too. Also, it's probably
- * better if vacuum.c has the same thresholding behavior as we do
- * here.
+ * XXX Is it worth trying to measure average tuple size, and using that to
+ * adjust the threshold? Would be worthwhile if FSM has no stats yet for
+ * this relation. But changing the threshold as we scan the rel might
+ * lead to bizarre behavior, too. Also, it's probably better if vacuum.c
+ * has the same thresholding behavior as we do here.
*/
if (avail < vacrelstats->threshold)
return;
{
/*
* Scan backwards through the array, "sift-up" each value into its
- * correct position. We can start the scan at n/2-1 since each
- * entry above that position has no children to worry about.
+ * correct position. We can start the scan at n/2-1 since each entry
+ * above that position has no children to worry about.
*/
int l = n / 2;
{
/*
* Notionally, we replace the zero'th entry with the new data, and
- * then sift-up to maintain the heap property. Physically, the
- * new data doesn't get stored into the arrays until we find the
- * right location for it.
+ * then sift-up to maintain the heap property. Physically, the new
+ * data doesn't get stored into the arrays until we find the right
+ * location for it.
*/
int i = 0; /* i is where the "hole" is */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.113 2005/08/08 23:39:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.114 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for parameter \"datestyle\"")));
+ errmsg("invalid list syntax for parameter \"datestyle\"")));
return NULL;
}
else if (pg_strcasecmp(tok, "DEFAULT") == 0)
{
/*
- * Easiest way to get the current DEFAULT state is to fetch
- * the DEFAULT string from guc.c and recursively parse it.
+ * Easiest way to get the current DEFAULT state is to fetch the
+ * DEFAULT string from guc.c and recursively parse it.
*
- * We can't simply "return assign_datestyle(...)" because we need
- * to handle constructs like "DEFAULT, ISO".
+ * We can't simply "return assign_datestyle(...)" because we need to
+ * handle constructs like "DEFAULT, ISO".
*/
int saveDateStyle = DateStyle;
int saveDateOrder = DateOrder;
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized \"datestyle\" key word: \"%s\"",
- tok)));
+ errmsg("unrecognized \"datestyle\" key word: \"%s\"",
+ tok)));
ok = false;
break;
}
}
/*
- * Finally, it's safe to assign to the global variables; the
- * assignment cannot fail now.
+ * Finally, it's safe to assign to the global variables; the assignment
+ * cannot fail now.
*/
DateStyle = newDateStyle;
DateOrder = newDateOrder;
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport, which is not desirable for GUC. We did what we could
- * to guard against this in flatten_set_variable_args, but a
- * string coming in from postgresql.conf might contain anything.
+ * ereport, which is not desirable for GUC. We did what we could to
+ * guard against this in flatten_set_variable_args, but a string
+ * coming in from postgresql.conf might contain anything.
*/
interval = DatumGetIntervalP(DirectFunctionCall3(interval_in,
- CStringGetDatum(val),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(-1)));
+ CStringGetDatum(val),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(-1)));
pfree(val);
if (interval->month != 0)
* UNKNOWN is the value shown as the "default" for TimeZone in
* guc.c. We interpret it as being a complete no-op; we don't
* change the timezone setting. Note that if there is a known
- * timezone setting, we will return that name rather than
- * UNKNOWN as the canonical spelling.
+ * timezone setting, we will return that name rather than UNKNOWN
+ * as the canonical spelling.
*
- * During GUC initialization, since the timezone library isn't
- * set up yet, pg_get_timezone_name will return NULL and we
- * will leave the setting as UNKNOWN. If this isn't
- * overridden from the config file then
- * pg_timezone_initialize() will eventually select a default
- * value from the environment.
+ * During GUC initialization, since the timezone library isn't set up
+ * yet, pg_get_timezone_name will return NULL and we will leave
+ * the setting as UNKNOWN. If this isn't overridden from the
+ * config file then pg_timezone_initialize() will eventually
+ * select a default value from the environment.
*/
if (doit)
{
/*
* Otherwise assume it is a timezone name, and try to load it.
*/
- pg_tz *new_tz;
+ pg_tz *new_tz;
new_tz = pg_tzset(value);
{
ereport((source >= PGC_S_INTERACTIVE) ? ERROR : LOG,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("time zone \"%s\" appears to use leap seconds",
- value),
- errdetail("PostgreSQL does not support leap seconds.")));
+ errmsg("time zone \"%s\" appears to use leap seconds",
+ value),
+ errdetail("PostgreSQL does not support leap seconds.")));
return NULL;
}
if (!result)
return NULL;
snprintf(result, 64, "%.5f",
- (double) (-CTimeZone) / (double)SECS_PER_HOUR);
+ (double) (-CTimeZone) / (double) SECS_PER_HOUR);
}
else
result = strdup(value);
if (HasCTZSet)
{
- Interval interval;
+ Interval interval;
interval.month = 0;
interval.day = 0;
#endif
tzn = DatumGetCString(DirectFunctionCall1(interval_out,
- IntervalPGetDatum(&interval)));
+ IntervalPGetDatum(&interval)));
}
else
tzn = pg_get_timezone_name(global_timezone);
return NULL;
/*
- * Note: if we are in startup phase then SetClientEncoding may not be
- * able to really set the encoding. In this case we will assume that
- * the encoding is okay, and InitializeClientEncoding() will fix
- * things once initialization is complete.
+ * Note: if we are in startup phase then SetClientEncoding may not be able
+ * to really set the encoding. In this case we will assume that the
+ * encoding is okay, and InitializeClientEncoding() will fix things once
+ * initialization is complete.
*/
if (SetClientEncoding(encoding, doit) < 0)
{
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("conversion between %s and %s is not supported",
- value, GetDatabaseEncodingName())));
+ errmsg("conversion between %s and %s is not supported",
+ value, GetDatabaseEncodingName())));
return NULL;
}
return value;
const char *
assign_session_authorization(const char *value, bool doit, GucSource source)
{
- Oid roleid = InvalidOid;
+ Oid roleid = InvalidOid;
bool is_superuser = false;
const char *actual_rolename = NULL;
char *result;
(value[NAMEDATALEN] == 'T' || value[NAMEDATALEN] == 'F'))
{
/* might be a saved userid string */
- Oid savedoid;
+ Oid savedoid;
char *endptr;
savedoid = (Oid) strtoul(value + NAMEDATALEN + 1, &endptr, 10);
if (!IsTransactionState())
{
/*
- * Can't do catalog lookups, so fail. The upshot of this is
- * that session_authorization cannot be set in
- * postgresql.conf, which seems like a good thing anyway.
+ * Can't do catalog lookups, so fail. The upshot of this is that
+ * session_authorization cannot be set in postgresql.conf, which
+ * seems like a good thing anyway.
*/
return NULL;
}
* assign_session_authorization
*/
const char *value = session_authorization_string;
- Oid savedoid;
+ Oid savedoid;
char *endptr;
Assert(strspn(value, "x") == NAMEDATALEN &&
const char *
assign_role(const char *value, bool doit, GucSource source)
{
- Oid roleid = InvalidOid;
+ Oid roleid = InvalidOid;
bool is_superuser = false;
const char *actual_rolename = value;
char *result;
(value[NAMEDATALEN] == 'T' || value[NAMEDATALEN] == 'F'))
{
/* might be a saved userid string */
- Oid savedoid;
+ Oid savedoid;
char *endptr;
savedoid = (Oid) strtoul(value + NAMEDATALEN + 1, &endptr, 10);
if (!IsTransactionState())
{
/*
- * Can't do catalog lookups, so fail. The upshot of this is
- * that role cannot be set in postgresql.conf, which seems
- * like a good thing anyway.
+ * Can't do catalog lookups, so fail. The upshot of this is that
+ * role cannot be set in postgresql.conf, which seems like a good
+ * thing anyway.
*/
return NULL;
}
show_role(void)
{
/*
- * Extract the role name from the stored string; see
- * assign_role
+ * Extract the role name from the stored string; see assign_role
*/
const char *value = role_string;
- Oid savedoid;
+ Oid savedoid;
char *endptr;
/* This special case only applies if no SET ROLE has been done */
Assert(endptr != value + NAMEDATALEN + 1 && *endptr == ',');
/*
- * Check that the stored string still matches the effective setting,
- * else return "none". This is a kluge to deal with the fact that
- * SET SESSION AUTHORIZATION logically resets SET ROLE to NONE, but
- * we cannot set the GUC role variable from assign_session_authorization
- * (because we haven't got enough info to call set_config_option).
+ * Check that the stored string still matches the effective setting, else
+ * return "none". This is a kluge to deal with the fact that SET SESSION
+ * AUTHORIZATION logically resets SET ROLE to NONE, but we cannot set the
+ * GUC role variable from assign_session_authorization (because we haven't
+ * got enough info to call set_config_option).
*/
if (savedoid != GetCurrentRoleId())
return "none";
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.90 2005/04/14 01:38:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.91 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (IsA(node, Query))
{
- Query *query = (Query *) node;
- ListCell *rtable;
+ Query *query = (Query *) node;
+ ListCell *rtable;
- foreach (rtable, query->rtable)
+ foreach(rtable, query->rtable)
{
RangeTblEntry *rte = lfirst(rtable);
+
if (rte->rtekind == RTE_RELATION)
{
- Relation rel = heap_open(rte->relid, AccessShareLock);
- bool istemp = rel->rd_istemp;
+ Relation rel = heap_open(rte->relid, AccessShareLock);
+ bool istemp = rel->rd_istemp;
+
heap_close(rel, AccessShareLock);
if (istemp)
return true;
ListCell *t;
/*
- * create a list of ColumnDef nodes based on the names and types of
- * the (non-junk) targetlist items from the view's SELECT list.
+ * create a list of ColumnDef nodes based on the names and types of the
+ * (non-junk) targetlist items from the view's SELECT list.
*/
attrList = NIL;
foreach(t, tlist)
RelationGetRelationName(rel));
/*
- * Due to the namespace visibility rules for temporary
- * objects, we should only end up replacing a temporary view
- * with another temporary view, and vice versa.
+ * Due to the namespace visibility rules for temporary objects, we
+ * should only end up replacing a temporary view with another
+ * temporary view, and vice versa.
*/
Assert(relation->istemp == rel->rd_istemp);
/*
- * Create a tuple descriptor to compare against the existing view,
- * and verify it matches.
+ * Create a tuple descriptor to compare against the existing view, and
+ * verify it matches.
*/
descriptor = BuildDescForRelation(attrList);
checkViewTupleDesc(descriptor, rel->rd_att);
else
{
/*
- * now set the parameters for keys/inheritance etc. All of these
- * are uninteresting for views...
+ * now set the parameters for keys/inheritance etc. All of these are
+ * uninteresting for views...
*/
createStmt->relation = (RangeVar *) relation;
createStmt->tableElts = attrList;
/*
* finally create the relation (this will error out if there's an
- * existing view, so we don't need more code to complain if
- * "replace" is false).
+ * existing view, so we don't need more code to complain if "replace"
+ * is false).
*/
return DefineRelation(createStmt, RELKIND_VIEW);
}
newattr->atttypmod != oldattr->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("cannot change data type of view column \"%s\"",
- NameStr(oldattr->attname))));
+ errmsg("cannot change data type of view column \"%s\"",
+ NameStr(oldattr->attname))));
/* We can ignore the remaining attributes of an attribute... */
}
RuleStmt *rule;
/*
- * Create a RuleStmt that corresponds to the suitable rewrite rule
- * args for DefineQueryRewrite();
+ * Create a RuleStmt that corresponds to the suitable rewrite rule args
+ * for DefineQueryRewrite();
*/
rule = makeNode(RuleStmt);
rule->relation = copyObject((RangeVar *) view);
/*
* Make a copy of the given parsetree. It's not so much that we don't
- * want to scribble on our input, it's that the parser has a bad habit
- * of outputting multiple links to the same subtree for constructs
- * like BETWEEN, and we mustn't have OffsetVarNodes increment the
- * varno of a Var node twice. copyObject will expand any
- * multiply-referenced subtree into multiple copies.
+ * want to scribble on our input, it's that the parser has a bad habit of
+ * outputting multiple links to the same subtree for constructs like
+ * BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a
+ * Var node twice. copyObject will expand any multiply-referenced subtree
+ * into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
viewRel = relation_open(viewOid, AccessShareLock);
/*
- * Create the 2 new range table entries and form the new range
- * table... OLD first, then NEW....
+ * Create the 2 new range table entries and form the new range table...
+ * OLD first, then NEW....
*/
rt_entry1 = addRangeTableEntryForRelation(NULL, viewRel,
makeAlias("*OLD*", NIL),
Oid viewOid;
/*
- * If the user didn't explicitly ask for a temporary view, check
- * whether we need one implicitly.
+ * If the user didn't explicitly ask for a temporary view, check whether
+ * we need one implicitly.
*/
if (!view->istemp)
{
(errmsg("view \"%s\" will be a temporary view",
view->relname)));
}
-
+
/*
* Create the view relation
*
- * NOTE: if it already exists and replace is false, the xact will be
- * aborted.
+ * NOTE: if it already exists and replace is false, the xact will be aborted.
*/
viewOid = DefineVirtualRelation(view, viewParse->targetList, replace);
/*
- * The relation we have just created is not visible to any other
- * commands running with the same transaction & command id. So,
- * increment the command id counter (but do NOT pfree any memory!!!!)
+ * The relation we have just created is not visible to any other commands
+ * running with the same transaction & command id. So, increment the
+ * command id counter (but do NOT pfree any memory!!!!)
*/
CommandCounterIncrement();
/*
- * The range table of 'viewParse' does not contain entries for the
- * "OLD" and "NEW" relations. So... add them!
+ * The range table of 'viewParse' does not contain entries for the "OLD"
+ * and "NEW" relations. So... add them!
*/
viewParse = UpdateRangeTableOfViewParse(viewOid, viewParse);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.84 2005/05/15 21:19:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.85 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* NOTE: the semantics of this are that the first ExecProcNode following
* the restore operation will yield the same tuple as the first one following
- * the mark operation. It is unspecified what happens to the plan node's
+ * the mark operation. It is unspecified what happens to the plan node's
* result TupleTableSlot. (In most cases the result slot is unchanged by
* a restore, but the node may choose to clear it or to load it with the
- * restored-to tuple.) Hence the caller should discard any previously
+ * restored-to tuple.) Hence the caller should discard any previously
* returned TupleTableSlot after doing a restore.
*/
void
{
/*
* At a table scan node, we check whether ExecAssignScanProjectionInfo
- * decided to do projection or not. Most non-scan nodes always
- * project and so we can return "false" immediately. For nodes that
- * don't project but just pass up input tuples, we have to recursively
- * examine the input plan node.
+ * decided to do projection or not. Most non-scan nodes always project
+ * and so we can return "false" immediately. For nodes that don't project
+ * but just pass up input tuples, we have to recursively examine the input
+ * plan node.
*
- * Note: Hash and Material are listed here because they sometimes return
- * an original input tuple, not a copy. But Sort and SetOp never
- * return an original tuple, so they can be treated like projecting
- * nodes.
+ * Note: Hash and Material are listed here because they sometimes return an
+ * original input tuple, not a copy. But Sort and SetOp never return an
+ * original tuple, so they can be treated like projecting nodes.
*/
switch (nodeTag(node))
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.15 2005/05/29 04:23:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.16 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
oldContext = MemoryContextSwitchTo(evalContext);
/*
- * We cannot report a match without checking all the fields, but we
- * can report a non-match as soon as we find unequal fields. So,
- * start comparing at the last field (least significant sort key).
- * That's the most likely to be different if we are dealing with
- * sorted input.
+ * We cannot report a match without checking all the fields, but we can
+ * report a non-match as soon as we find unequal fields. So, start
+ * comparing at the last field (least significant sort key). That's the
+ * most likely to be different if we are dealing with sorted input.
*/
result = true;
oldContext = MemoryContextSwitchTo(evalContext);
/*
- * We cannot report a match without checking all the fields, but we
- * can report a non-match as soon as we find unequal fields. So,
- * start comparing at the last field (least significant sort key).
- * That's the most likely to be different if we are dealing with
- * sorted input.
+ * We cannot report a match without checking all the fields, but we can
+ * report a non-match as soon as we find unequal fields. So, start
+ * comparing at the last field (least significant sort key). That's the
+ * most likely to be different if we are dealing with sorted input.
*/
result = false;
Assert(entrysize >= sizeof(TupleHashEntryData));
hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt,
- sizeof(TupleHashTableData));
+ sizeof(TupleHashTableData));
hashtable->numCols = numCols;
hashtable->keyColIdx = keyColIdx;
hashtable->tablecxt = tablecxt;
hashtable->tempcxt = tempcxt;
hashtable->entrysize = entrysize;
- hashtable->tableslot = NULL; /* will be made on first lookup */
+ hashtable->tableslot = NULL; /* will be made on first lookup */
hashtable->inputslot = NULL;
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.hcxt = tablecxt;
hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
return hashtable;
}
TupleDesc tupdesc;
oldContext = MemoryContextSwitchTo(hashtable->tablecxt);
+
/*
* We copy the input tuple descriptor just for safety --- we assume
* all input tuples will have equivalent descriptors.
/*
* created new entry
*
- * Zero any caller-requested space in the entry. (This zaps
- * the "key data" dynahash.c copied into the new entry, but we
- * don't care since we're about to overwrite it anyway.)
+ * Zero any caller-requested space in the entry. (This zaps the "key
+ * data" dynahash.c copied into the new entry, but we don't care
+ * since we're about to overwrite it anyway.)
*/
MemSet(entry, 0, hashtable->entrysize);
TupleHashTableMatch(const void *key1, const void *key2, Size keysize)
{
HeapTuple tuple1 = ((const TupleHashEntryData *) key1)->firstTuple;
+
#ifdef USE_ASSERT_CHECKING
HeapTuple tuple2 = ((const TupleHashEntryData *) key2)->firstTuple;
#endif
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.49 2005/04/06 16:34:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.50 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* Initialize the Junk filter.
*
- * The source targetlist is passed in. The output tuple descriptor is
+ * The source targetlist is passed in. The output tuple descriptor is
* built from the non-junk tlist entries, plus the passed specification
* of whether to include room for an OID or not.
* An optional resultSlot can be passed as well.
* Now calculate the mapping between the original tuple's attributes and
* the "clean" tuple's attributes.
*
- * The "map" is an array of "cleanLength" attribute numbers, i.e. one
- * entry for every attribute of the "clean" tuple. The value of this
- * entry is the attribute number of the corresponding attribute of the
- * "original" tuple. (Zero indicates a NULL output attribute, but we
- * do not use that feature in this routine.)
+ * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
+ * for every attribute of the "clean" tuple. The value of this entry is
+ * the attribute number of the corresponding attribute of the "original"
+ * tuple. (Zero indicates a NULL output attribute, but we do not use that
+ * feature in this routine.)
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)
slot = MakeSingleTupleTableSlot(cleanTupType);
/*
- * Calculate the mapping between the original tuple's attributes and
- * the "clean" tuple's attributes.
+ * Calculate the mapping between the original tuple's attributes and the
+ * "clean" tuple's attributes.
*
- * The "map" is an array of "cleanLength" attribute numbers, i.e. one
- * entry for every attribute of the "clean" tuple. The value of this
- * entry is the attribute number of the corresponding attribute of the
- * "original" tuple. We store zero for any deleted attributes, marking
- * that a NULL is needed in the output tuple.
+ * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
+ * for every attribute of the "clean" tuple. The value of this entry is
+ * the attribute number of the corresponding attribute of the "original"
+ * tuple. We store zero for any deleted attributes, marking that a NULL
+ * is needed in the output tuple.
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)
ListCell *t;
/*
- * Look in the junkfilter's target list for an attribute with
- * the given name
+ * Look in the junkfilter's target list for an attribute with the given
+ * name
*/
foreach(t, junkfilter->jf_targetList)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.255 2005/08/26 03:07:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.256 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
/*
- * extract information from the query descriptor and the query
- * feature.
+ * extract information from the query descriptor and the query feature.
*/
operation = queryDesc->operation;
dest = queryDesc->dest;
{
AclMode requiredPerms;
Oid relOid;
- Oid userid;
+ Oid userid;
/*
- * Only plain-relation RTEs need to be checked here. Subquery RTEs
- * are checked by ExecInitSubqueryScan if the subquery is still a
- * separate subquery --- if it's been pulled up into our query level
- * then the RTEs are in our rangetable and will be checked here.
- * Function RTEs are checked by init_fcache when the function is
- * prepared for execution. Join and special RTEs need no checks.
+ * Only plain-relation RTEs need to be checked here. Subquery RTEs are
+ * checked by ExecInitSubqueryScan if the subquery is still a separate
+ * subquery --- if it's been pulled up into our query level then the RTEs
+ * are in our rangetable and will be checked here. Function RTEs are
+ * checked by init_fcache when the function is prepared for execution.
+ * Join and special RTEs need no checks.
*/
if (rte->rtekind != RTE_RELATION)
return;
relOid = rte->relid;
/*
- * userid to check as: current user unless we have a setuid
- * indication.
+ * userid to check as: current user unless we have a setuid indication.
*
- * Note: GetUserId() is presently fast enough that there's no harm in
- * calling it separately for each RTE. If that stops being true, we
- * could call it once in ExecCheckRTPerms and pass the userid down
- * from there. But for now, no need for the extra clutter.
+ * Note: GetUserId() is presently fast enough that there's no harm in calling
+ * it separately for each RTE. If that stops being true, we could call it
+ * once in ExecCheckRTPerms and pass the userid down from there. But for
+ * now, no need for the extra clutter.
*/
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
/*
- * We must have *all* the requiredPerms bits, so use aclmask not
- * aclcheck.
+ * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
*/
if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
!= requiredPerms)
else
{
/*
- * Single result relation identified by
- * parseTree->resultRelation
+ * Single result relation identified by parseTree->resultRelation
*/
numResultRelations = 1;
resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
/*
* Detect whether we're doing SELECT INTO. If so, set the es_into_oids
- * flag appropriately so that the plan tree will be initialized with
- * the correct tuple descriptors.
+ * flag appropriately so that the plan tree will be initialized with the
+ * correct tuple descriptors.
*/
do_select_into = false;
}
/*
- * initialize the executor "tuple" table. We need slots for all the
- * plan nodes, plus possibly output slots for the junkfilter(s). At
- * this point we aren't sure if we need junkfilters, so just add slots
- * for them unconditionally.
+ * initialize the executor "tuple" table. We need slots for all the plan
+ * nodes, plus possibly output slots for the junkfilter(s). At this point
+ * we aren't sure if we need junkfilters, so just add slots for them
+ * unconditionally.
*/
{
int nSlots = ExecCountSlotsNode(plan);
estate->es_useEvalPlan = false;
/*
- * initialize the private state information for all the nodes in the
- * query tree. This opens files, allocates storage and leaves us
- * ready to start processing tuples.
+ * initialize the private state information for all the nodes in the query
+ * tree. This opens files, allocates storage and leaves us ready to start
+ * processing tuples.
*/
planstate = ExecInitNode(plan, estate);
/*
- * Get the tuple descriptor describing the type of tuples to return.
- * (this is especially important if we are creating a relation with
- * "SELECT INTO")
+ * Get the tuple descriptor describing the type of tuples to return. (this
+ * is especially important if we are creating a relation with "SELECT
+ * INTO")
*/
tupType = ExecGetResultType(planstate);
/*
- * Initialize the junk filter if needed. SELECT and INSERT queries
- * need a filter if there are any junk attrs in the tlist. INSERT and
- * SELECT INTO also need a filter if the plan may return raw disk
- * tuples (else heap_insert will be scribbling on the source
- * relation!). UPDATE and DELETE always need a filter, since there's
- * always a junk 'ctid' attribute present --- no need to look first.
+ * Initialize the junk filter if needed. SELECT and INSERT queries need a
+ * filter if there are any junk attrs in the tlist. INSERT and SELECT
+ * INTO also need a filter if the plan may return raw disk tuples (else
+ * heap_insert will be scribbling on the source relation!). UPDATE and
+ * DELETE always need a filter, since there's always a junk 'ctid'
+ * attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
if (junk_filter_needed)
{
/*
- * If there are multiple result relations, each one needs its
- * own junk filter. Note this is only possible for
- * UPDATE/DELETE, so we can't be fooled by some needing a
- * filter and some not.
+ * If there are multiple result relations, each one needs its own
+ * junk filter. Note this is only possible for UPDATE/DELETE, so
+ * we can't be fooled by some needing a filter and some not.
*/
if (parseTree->resultRelations != NIL)
{
JunkFilter *j;
j = ExecInitJunkFilter(subplan->plan->targetlist,
- resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
- ExecAllocTableSlot(estate->es_tupleTable));
+ resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
+ ExecAllocTableSlot(estate->es_tupleTable));
resultRelInfo->ri_junkFilter = j;
resultRelInfo++;
}
/*
- * Set active junkfilter too; at this point ExecInitAppend
- * has already selected an active result relation...
+ * Set active junkfilter too; at this point ExecInitAppend has
+ * already selected an active result relation...
*/
estate->es_junkFilter =
estate->es_result_relation_info->ri_junkFilter;
j = ExecInitJunkFilter(planstate->plan->targetlist,
tupType->tdhasoid,
- ExecAllocTableSlot(estate->es_tupleTable));
+ ExecAllocTableSlot(estate->es_tupleTable));
estate->es_junkFilter = j;
if (estate->es_result_relation_info)
estate->es_result_relation_info->ri_junkFilter = j;
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the into relation. Note
- * that AlterTableCreateToastTable ends with
- * CommandCounterIncrement(), so that the TOAST table will be
- * visible for insertion.
+ * If necessary, create a TOAST table for the into relation. Note that
+ * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
+ * that the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(intoRelationId, true);
/*
* We can skip WAL-logging the insertions, unless PITR is in use.
*
- * Note that for a non-temp INTO table, this is safe only because
- * we know that the catalog changes above will have been WAL-logged,
- * and so RecordTransactionCommit will think it needs to WAL-log the
- * eventual transaction commit. Else the commit might be lost, even
- * though all the data is safely fsync'd ...
+ * Note that for a non-temp INTO table, this is safe only because we know
+ * that the catalog changes above will have been WAL-logged, and so
+ * RecordTransactionCommit will think it needs to WAL-log the eventual
+ * transaction commit. Else the commit might be lost, even though all
+ * the data is safely fsync'd ...
*/
estate->es_into_relation_use_wal = XLogArchivingActive();
}
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change sequence \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_TOASTVALUE:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change TOAST relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_VIEW:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change view \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
}
resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
if (resultRelInfo->ri_TrigDesc)
{
- int n = resultRelInfo->ri_TrigDesc->numtriggers;
+ int n = resultRelInfo->ri_TrigDesc->numtriggers;
resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
palloc0(n * sizeof(FmgrInfo));
/*
* If there are indices on the result relation, open them and save
- * descriptors in the result relation info, so that we can add new
- * index entries for the tuples we add/update. We need not do this
- * for a DELETE, however, since deletion doesn't affect indexes.
+ * descriptors in the result relation info, so that we can add new index
+ * entries for the tuples we add/update. We need not do this for a
+ * DELETE, however, since deletion doesn't affect indexes.
*/
if (resultRelationDesc->rd_rel->relhasindex &&
operation != CMD_DELETE)
estate->es_tupleTable = NULL;
/*
- * close the result relation(s) if any, but hold locks until xact
- * commit.
+ * close the result relation(s) if any, but hold locks until xact commit.
*/
resultRelInfo = estate->es_result_relations;
for (i = estate->es_num_result_relations; i > 0; i--)
if (estate->es_into_relation_descriptor != NULL)
{
/*
- * If we skipped using WAL, and it's not a temp relation,
- * we must force the relation down to disk before it's
- * safe to commit the transaction. This requires forcing
- * out any dirty buffers and then doing a forced fsync.
+ * If we skipped using WAL, and it's not a temp relation, we must
+ * force the relation down to disk before it's safe to commit the
+ * transaction. This requires forcing out any dirty buffers and then
+ * doing a forced fsync.
*/
if (!estate->es_into_relation_use_wal &&
!estate->es_into_relation_descriptor->rd_istemp)
}
/*
- * Loop until we've processed the proper number of tuples from the
- * plan.
+ * Loop until we've processed the proper number of tuples from the plan.
*/
for (;;)
}
/*
- * if we have a junk filter, then project a new tuple with the
- * junk removed.
+ * if we have a junk filter, then project a new tuple with the junk
+ * removed.
*
* Store this new "clean" tuple in the junkfilter's resultSlot.
- * (Formerly, we stored it back over the "dirty" tuple, which is
- * WRONG because that tuple slot has the wrong descriptor.)
+ * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
+ * because that tuple slot has the wrong descriptor.)
*
* Also, extract all the junk information we need.
*/
elog(ERROR, "ctid is NULL");
tupleid = (ItemPointer) DatumGetPointer(datum);
- tuple_ctid = *tupleid; /* make sure we don't free the
- * ctid!! */
+ tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
tupleid = &tuple_ctid;
}
+
/*
* Process any FOR UPDATE or FOR SHARE locking requested.
*/
ItemPointerData update_ctid;
TransactionId update_xmax;
TupleTableSlot *newSlot;
- LockTupleMode lockmode;
- HTSU_Result test;
+ LockTupleMode lockmode;
+ HTSU_Result test;
if (!ExecGetJunkAttribute(junkfilter,
slot,
case HeapTupleUpdated:
if (IsXactIsoLevelSerializable)
ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
if (!ItemPointerEquals(&update_ctid,
&tuple.t_self))
{
/*
* if tuple was deleted or PlanQual failed for
- * updated tuple - we must not return this
- * tuple!
+ * updated tuple - we must not return this tuple!
*/
goto lnext;
}
/*
- * now that we have a tuple, do the appropriate thing with it..
- * either return it to the user, add it to a relation someplace,
- * delete it from a relation, or modify some of its attributes.
+ * now that we have a tuple, do the appropriate thing with it.. either
+ * return it to the user, add it to a relation someplace, delete it
+ * from a relation, or modify some of its attributes.
*/
switch (operation)
{
}
/*
- * check our tuple count.. if we've processed the proper number
- * then quit, else loop again and process more tuples. Zero
- * numberTuples means no limit.
+ * check our tuple count.. if we've processed the proper number then
+ * quit, else loop again and process more tuples. Zero numberTuples
+ * means no limit.
*/
current_tuple_count++;
if (numberTuples && numberTuples == current_tuple_count)
Oid newId;
/*
- * get the heap tuple out of the tuple table slot, making sure
- * we have a writable copy
+ * get the heap tuple out of the tuple table slot, making sure we have a
+ * writable copy
*/
tuple = ExecMaterializeSlot(slot);
/* BEFORE ROW INSERT Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
{
/*
* Insert modified tuple into tuple table slot, replacing the
- * original. We assume that it was allocated in per-tuple
- * memory context, and therefore will go away by itself. The
- * tuple table slot should not try to clear it.
+ * original. We assume that it was allocated in per-tuple memory
+ * context, and therefore will go away by itself. The tuple table
+ * slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
/*
* insert the tuple
*
- * Note: heap_insert returns the tid (location) of the new tuple
- * in the t_self field.
+ * Note: heap_insert returns the tid (location) of the new tuple in the
+ * t_self field.
*/
newId = heap_insert(resultRelationDesc, tuple,
estate->es_snapshot->curcid,
{
ResultRelInfo *resultRelInfo;
Relation resultRelationDesc;
- HTSU_Result result;
+ HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
/* BEFORE ROW DELETE Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
{
bool dodelete;
/*
* delete the tuple
*
- * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
- * the row to be deleted is visible to that snapshot, and throw a can't-
- * serialize error if not. This is a special-case behavior needed for
+ * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
+ * row to be deleted is visible to that snapshot, and throw a can't-
+ * serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
ldelete:;
* Note: Normally one would think that we have to delete index tuples
* associated with the heap tuple now..
*
- * ... but in POSTGRES, we have no need to do this because the vacuum
- * daemon automatically opens an index scan and deletes index tuples
- * when it finds deleted heap tuples. -cim 9/27/89
+ * ... but in POSTGRES, we have no need to do this because the vacuum daemon
+ * automatically opens an index scan and deletes index tuples when it
+ * finds deleted heap tuples. -cim 9/27/89
*/
/* AFTER ROW DELETE Triggers */
HeapTuple tuple;
ResultRelInfo *resultRelInfo;
Relation resultRelationDesc;
- HTSU_Result result;
+ HTSU_Result result;
ItemPointerData update_ctid;
TransactionId update_xmax;
elog(ERROR, "cannot UPDATE during bootstrap");
/*
- * get the heap tuple out of the tuple table slot, making sure
- * we have a writable copy
+ * get the heap tuple out of the tuple table slot, making sure we have a
+ * writable copy
*/
tuple = ExecMaterializeSlot(slot);
/* BEFORE ROW UPDATE Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
{
HeapTuple newtuple;
{
/*
* Insert modified tuple into tuple table slot, replacing the
- * original. We assume that it was allocated in per-tuple
- * memory context, and therefore will go away by itself. The
- * tuple table slot should not try to clear it.
+ * original. We assume that it was allocated in per-tuple memory
+ * context, and therefore will go away by itself. The tuple table
+ * slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
/*
* Check the constraints of the tuple
*
- * If we generate a new candidate tuple after EvalPlanQual testing, we
- * must loop back here and recheck constraints. (We don't need to
- * redo triggers, however. If there are any BEFORE triggers then
- * trigger.c will have done heap_lock_tuple to lock the correct tuple,
- * so there's no need to do them again.)
+ * If we generate a new candidate tuple after EvalPlanQual testing, we must
+ * loop back here and recheck constraints. (We don't need to redo
+ * triggers, however. If there are any BEFORE triggers then trigger.c
+ * will have done heap_lock_tuple to lock the correct tuple, so there's no
+ * need to do them again.)
*/
lreplace:;
if (resultRelationDesc->rd_att->constr)
/*
* replace the heap tuple
*
- * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
- * the row to be updated is visible to that snapshot, and throw a can't-
- * serialize error if not. This is a special-case behavior needed for
+ * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
+ * row to be updated is visible to that snapshot, and throw a can't-
+ * serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
result = heap_update(resultRelationDesc, tupleid, tuple,
(estate->es_processed)++;
/*
- * Note: instead of having to update the old index tuples associated
- * with the heap tuple, all we do is form and insert new index tuples.
- * This is because UPDATEs are actually DELETEs and INSERTs, and index
- * tuple deletion is done automagically by the vacuum daemon. All we
- * do is insert new index tuples. -cim 9/27/89
+ * Note: instead of having to update the old index tuples associated with
+ * the heap tuple, all we do is form and insert new index tuples. This is
+ * because UPDATEs are actually DELETEs and INSERTs, and index tuple
+ * deletion is done automagically by the vacuum daemon. All we do is
+ * insert new index tuples. -cim 9/27/89
*/
/*
* insert index entries for tuple
*
- * Note: heap_update returns the tid (location) of the new tuple
- * in the t_self field.
+ * Note: heap_update returns the tid (location) of the new tuple in the
+ * t_self field.
*/
if (resultRelInfo->ri_NumIndices > 0)
ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
/*
* If first time through for this result relation, build expression
- * nodetrees for rel's constraint expressions. Keep them in the
- * per-query memory context so they'll survive throughout the query.
+ * nodetrees for rel's constraint expressions. Keep them in the per-query
+ * memory context so they'll survive throughout the query.
*/
if (resultRelInfo->ri_ConstraintExprs == NULL)
{
}
/*
- * We will use the EState's per-tuple context for evaluating
- * constraint expressions (creating it if it's not already there).
+ * We will use the EState's per-tuple context for evaluating constraint
+ * expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("null value in column \"%s\" violates not-null constraint",
- NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
+ NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
}
}
{
/*
* If xmin isn't what we're expecting, the slot must have been
- * recycled and reused for an unrelated tuple. This implies
- * that the latest version of the row was deleted, so we need
- * do nothing. (Should be safe to examine xmin without getting
+ * recycled and reused for an unrelated tuple. This implies that
+ * the latest version of the row was deleted, so we need do
+ * nothing. (Should be safe to examine xmin without getting
* buffer's content lock, since xmin never changes in an existing
* tuple.)
*/
elog(ERROR, "t_xmin is uncommitted in tuple to be updated");
/*
- * If tuple is being updated by other transaction then we have
- * to wait for its commit/abort.
+ * If tuple is being updated by other transaction then we have to
+ * wait for its commit/abort.
*/
if (TransactionIdIsValid(SnapshotDirty->xmax))
{
}
/*
- * If the referenced slot was actually empty, the latest version
- * of the row must have been deleted, so we need do nothing.
+ * If the referenced slot was actually empty, the latest version of
+ * the row must have been deleted, so we need do nothing.
*/
if (tuple.t_data == NULL)
{
/*
* If we get here, the tuple was found but failed SnapshotDirty.
- * Assuming the xmin is either a committed xact or our own xact
- * (as it certainly should be if we're trying to modify the tuple),
- * this must mean that the row was updated or deleted by either
- * a committed xact or our own xact. If it was deleted, we can
- * ignore it; if it was updated then chain up to the next version
- * and repeat the whole test.
+ * Assuming the xmin is either a committed xact or our own xact (as it
+ * certainly should be if we're trying to modify the tuple), this must
+ * mean that the row was updated or deleted by either a committed xact
+ * or our own xact. If it was deleted, we can ignore it; if it was
+ * updated then chain up to the next version and repeat the whole
+ * test.
*
- * As above, it should be safe to examine xmax and t_ctid without
- * the buffer content lock, because they can't be changing.
+ * As above, it should be safe to examine xmax and t_ctid without the
+ * buffer content lock, because they can't be changing.
*/
if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid))
{
}
/*
- * For UPDATE/DELETE we have to return tid of actual row we're
- * executing PQ for.
+ * For UPDATE/DELETE we have to return tid of actual row we're executing
+ * PQ for.
*/
*tid = tuple.t_self;
}
/*
- * If this is request for another RTE - Ra, - then we have to check
- * wasn't PlanQual requested for Ra already and if so then Ra' row was
- * updated again and we have to re-start old execution for Ra and
- * forget all what we done after Ra was suspended. Cool? -:))
+ * If this is request for another RTE - Ra, - then we have to check wasn't
+ * PlanQual requested for Ra already and if so then Ra' row was updated
+ * again and we have to re-start old execution for Ra and forget all what
+ * we done after Ra was suspended. Cool? -:))
*/
if (epq != NULL && epq->rti != rti &&
epq->estate->es_evTuple[rti - 1] != NULL)
}
/*
- * If we are requested for another RTE then we have to suspend
- * execution of current PlanQual and start execution for new one.
+ * If we are requested for another RTE then we have to suspend execution
+ * of current PlanQual and start execution for new one.
*/
if (epq == NULL || epq->rti != rti)
{
Assert(epq->rti == rti);
/*
- * Ok - we're requested for the same RTE. Unfortunately we still have
- * to end and restart execution of the plan, because ExecReScan
- * wouldn't ensure that upper plan nodes would reset themselves. We
- * could make that work if insertion of the target tuple were
- * integrated with the Param mechanism somehow, so that the upper plan
- * nodes know that their children's outputs have changed.
+ * Ok - we're requested for the same RTE. Unfortunately we still have to
+ * end and restart execution of the plan, because ExecReScan wouldn't
+ * ensure that upper plan nodes would reset themselves. We could make
+ * that work if insertion of the target tuple were integrated with the
+ * Param mechanism somehow, so that the upper plan nodes know that their
+ * children's outputs have changed.
*
* Note that the stack of free evalPlanQual nodes is quite useless at the
* moment, since it only saves us from pallocing/releasing the
- * evalPlanQual nodes themselves. But it will be useful once we
- * implement ReScan instead of end/restart for re-using PlanQual
- * nodes.
+ * evalPlanQual nodes themselves. But it will be useful once we implement
+ * ReScan instead of end/restart for re-using PlanQual nodes.
*/
if (endNode)
{
*
* Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
* instead copy down changeable state from the top plan (including
- * es_result_relation_info, es_junkFilter) and reset locally
- * changeable state in the epq (including es_param_exec_vals,
- * es_evTupleNull).
+ * es_result_relation_info, es_junkFilter) and reset locally changeable
+ * state in the epq (including es_param_exec_vals, es_evTupleNull).
*/
EvalPlanQualStart(epq, estate, epq->next);
/*
- * free old RTE' tuple, if any, and store target tuple where
- * relation's scan node will see it
+ * free old RTE' tuple, if any, and store target tuple where relation's
+ * scan node will see it
*/
epqstate = epq->estate;
if (epqstate->es_evTuple[rti - 1] != NULL)
oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt);
/*
- * The epqstates share the top query's copy of unchanging state such
- * as the snapshot, rangetable, result-rel info, and external Param
- * info. They need their own copies of local state, including a tuple
- * table, es_param_exec_vals, etc.
+ * The epqstates share the top query's copy of unchanging state such as
+ * the snapshot, rangetable, result-rel info, and external Param info.
+ * They need their own copies of local state, including a tuple table,
+ * es_param_exec_vals, etc.
*/
epqstate->es_direction = ForwardScanDirection;
epqstate->es_snapshot = estate->es_snapshot;
epqstate->es_topPlan = estate->es_topPlan;
/*
- * Each epqstate must have its own es_evTupleNull state, but all the
- * stack entries share es_evTuple state. This allows sub-rechecks to
- * inherit the value being examined by an outer recheck.
+ * Each epqstate must have its own es_evTupleNull state, but all the stack
+ * entries share es_evTuple state. This allows sub-rechecks to inherit
+ * the value being examined by an outer recheck.
*/
epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
if (priorepq == NULL)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.50 2005/04/19 22:35:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.51 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Initialize any initPlans present in this node. The planner put
- * them in a separate list for us.
+ * Initialize any initPlans present in this node. The planner put them in
+ * a separate list for us.
*/
subps = NIL;
foreach(l, node->initPlan)
/*
* Initialize any subPlans present in this node. These were found by
- * ExecInitExpr during initialization of the PlanState. Note we must
- * do this after initializing initPlans, in case their arguments
- * contain subPlans (is that actually possible? perhaps not).
+ * ExecInitExpr during initialization of the PlanState. Note we must do
+ * this after initializing initPlans, in case their arguments contain
+ * subPlans (is that actually possible? perhaps not).
*/
foreach(l, result->subPlan)
{
Node *
MultiExecProcNode(PlanState *node)
{
- Node *result;
+ Node *result;
CHECK_FOR_INTERRUPTS();
switch (nodeTag(node))
{
- /*
- * Only node types that actually support multiexec will be listed
- */
+ /*
+ * Only node types that actually support multiexec will be listed
+ */
case T_HashState:
result = MultiExecHash((HashState *) node);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.180 2005/06/26 22:05:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.181 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalConvertRowtype(ConvertRowtypeExprState *cstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCaseTestExpr(ExprState *exprstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalMinMax(MinMaxExprState *minmaxExpr,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNullIf(FuncExprState *nullIfExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
isDone));
/*
- * If refexpr yields NULL, and it's a fetch, then result is NULL. In
- * the assignment case, we'll cons up something below.
+ * If refexpr yields NULL, and it's a fetch, then result is NULL. In the
+ * assignment case, we'll cons up something below.
*/
if (*isNull)
{
NULL));
/*
- * If any index expr yields NULL, result is NULL or source
- * array
+ * If any index expr yields NULL, result is NULL or source array
*/
if (eisnull)
{
/*
* Evaluate the value to be assigned into the array.
*
- * XXX At some point we'll need to look into making the old value of
- * the array element available via CaseTestExpr, as is done by
- * ExecEvalFieldStore. This is not needed now but will be needed
- * to support arrays of composite types; in an assignment to a
- * field of an array member, the parser would generate a
- * FieldStore that expects to fetch its input tuple via
- * CaseTestExpr.
+ * XXX At some point we'll need to look into making the old value of the
+ * array element available via CaseTestExpr, as is done by
+ * ExecEvalFieldStore. This is not needed now but will be needed to
+ * support arrays of composite types; in an assignment to a field of
+ * an array member, the parser would generate a FieldStore that
+ * expects to fetch its input tuple via CaseTestExpr.
*/
sourceData = ExecEvalExpr(astate->refassgnexpr,
econtext,
NULL);
/*
- * For now, can't cope with inserting NULL into an array, so make
- * it a no-op per discussion above...
+ * For now, can't cope with inserting NULL into an array, so make it a
+ * no-op per discussion above...
*/
if (eisnull)
return PointerGetDatum(array_source);
/*
- * For an assignment, if all the subscripts and the input
- * expression are non-null but the original array is null, then
- * substitute an empty (zero-dimensional) array and proceed with
- * the assignment. This only works for varlena arrays, though; for
- * fixed-length array types we punt and return the null input
- * array.
+ * For an assignment, if all the subscripts and the input expression
+ * are non-null but the original array is null, then substitute an
+ * empty (zero-dimensional) array and proceed with the assignment.
+ * This only works for varlena arrays, though; for fixed-length array
+ * types we punt and return the null input array.
*/
if (*isNull)
{
else
resultArray = array_set_slice(array_source, i,
upper.indx, lower.indx,
- (ArrayType *) DatumGetPointer(sourceData),
+ (ArrayType *) DatumGetPointer(sourceData),
astate->refattrlength,
astate->refelemlength,
astate->refelembyval,
/*
* Get the slot and attribute number we want
*
- * The asserts check that references to system attributes only appear at
- * the level of a relation scan; at higher levels, system attributes
- * must be treated as ordinary variables (since we no longer have
- * access to the original tuple).
+ * The asserts check that references to system attributes only appear at the
+ * level of a relation scan; at higher levels, system attributes must be
+ * treated as ordinary variables (since we no longer have access to the
+ * original tuple).
*/
attnum = variable->varattno;
}
#ifdef USE_ASSERT_CHECKING
+
/*
* Some checks that are only applied for user attribute numbers (bogus
* system attnums will be caught inside slot_getattr).
Assert(attnum <= tuple_type->natts);
/*
- * This assert checks that the datatype the plan expects to get
- * (as told by our "variable" argument) is in fact the datatype of
- * the attribute being fetched (as seen in the current context,
- * identified by our "econtext" argument). Otherwise crashes are
- * likely.
+ * This assert checks that the datatype the plan expects to get (as
+ * told by our "variable" argument) is in fact the datatype of the
+ * attribute being fetched (as seen in the current context, identified
+ * by our "econtext" argument). Otherwise crashes are likely.
*
* Note that we can't check dropped columns, since their atttypid has
* been zeroed.
Assert(variable->vartype == tuple_type->attrs[attnum - 1]->atttypid ||
tuple_type->attrs[attnum - 1]->attisdropped);
}
-#endif /* USE_ASSERT_CHECKING */
+#endif /* USE_ASSERT_CHECKING */
return slot_getattr(slot, attnum, isNull);
}
if (thisParamKind == PARAM_EXEC)
{
/*
- * PARAM_EXEC params (internal executor parameters) are stored in
- * the ecxt_param_exec_vals array, and can be accessed by array
- * index.
+ * PARAM_EXEC params (internal executor parameters) are stored in the
+ * ecxt_param_exec_vals array, and can be accessed by array index.
*/
ParamExecData *prm;
else
{
/*
- * All other parameter types must be sought in
- * ecxt_param_list_info.
+ * All other parameter types must be sought in ecxt_param_list_info.
*/
ParamListInfo paramInfo;
tupDesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
/*
- * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set
- * all the fields in the struct just in case user tries to inspect
- * system columns.
+ * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all
+ * the fields in the struct just in case user tries to inspect system
+ * columns.
*/
tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple);
ItemPointerSetInvalid(&(tmptup.t_self));
elog(ERROR, "attribute \"%s\" does not exist", attname);
/*
- * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set
- * all the fields in the struct just in case user tries to inspect
- * system columns.
+ * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all
+ * the fields in the struct just in case user tries to inspect system
+ * columns.
*/
tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple);
ItemPointerSetInvalid(&(tmptup.t_self));
/*
* Safety check on nargs. Under normal circumstances this should never
- * fail, as parser should check sooner. But possibly it might fail
- * if server has been compiled with FUNC_MAX_ARGS smaller than some
- * functions declared in pg_proc?
+ * fail, as parser should check sooner. But possibly it might fail if
+ * server has been compiled with FUNC_MAX_ARGS smaller than some functions
+ * declared in pg_proc?
*/
if (list_length(fcache->args) > FUNC_MAX_ARGS)
ereport(ERROR,
if (thisArgIsDone != ExprSingleResult)
{
/*
- * We allow only one argument to have a set value; we'd need
- * much more complexity to keep track of multiple set
- * arguments (cf. ExecTargetList) and it doesn't seem worth
- * it.
+ * We allow only one argument to have a set value; we'd need much
+ * more complexity to keep track of multiple set arguments (cf.
+ * ExecTargetList) and it doesn't seem worth it.
*/
if (argIsDone != ExprSingleResult)
ereport(ERROR,
check_stack_depth();
/*
- * arguments is a list of expressions to evaluate before passing to
- * the function manager. We skip the evaluation if it was already
- * done in the previous call (ie, we are continuing the evaluation of
- * a set-valued function). Otherwise, collect the current argument
- * values into fcinfo.
+ * arguments is a list of expressions to evaluate before passing to the
+ * function manager. We skip the evaluation if it was already done in the
+ * previous call (ie, we are continuing the evaluation of a set-valued
+ * function). Otherwise, collect the current argument values into fcinfo.
*/
if (!fcache->setArgsValid)
{
}
/*
- * If function returns set, prepare a resultinfo node for
- * communication
+ * If function returns set, prepare a resultinfo node for communication
*/
if (fcache->func.fn_retset)
{
}
/*
- * now return the value gotten by calling the function manager,
- * passing the function the evaluated parameter values.
+ * now return the value gotten by calling the function manager, passing
+ * the function the evaluated parameter values.
*/
if (fcache->func.fn_retset || hasSetArg)
{
/*
- * We need to return a set result. Complain if caller not ready
- * to accept one.
+ * We need to return a set result. Complain if caller not ready to
+ * accept one.
*/
if (isDone == NULL)
ereport(ERROR,
errmsg("set-valued function called in context that cannot accept a set")));
/*
- * This loop handles the situation where we have both a set
- * argument and a set-valued function. Once we have exhausted the
- * function's value(s) for a particular argument value, we have to
- * get the next argument value and start the function over again.
- * We might have to do it more than once, if the function produces
- * an empty result set for a particular input value.
+ * This loop handles the situation where we have both a set argument
+ * and a set-valued function. Once we have exhausted the function's
+ * value(s) for a particular argument value, we have to get the next
+ * argument value and start the function over again. We might have to
+ * do it more than once, if the function produces an empty result set
+ * for a particular input value.
*/
for (;;)
{
/*
- * If function is strict, and there are any NULL arguments,
- * skip calling the function (at least for this set of args).
+ * If function is strict, and there are any NULL arguments, skip
+ * calling the function (at least for this set of args).
*/
bool callit = true;
{
/*
* Got a result from current argument. If function itself
- * returns set, save the current argument values to re-use
- * on the next call.
+ * returns set, save the current argument values to re-use on
+ * the next call.
*/
if (fcache->func.fn_retset && *isDone == ExprMultipleResult)
{
{
RegisterExprContextCallback(econtext,
ShutdownFuncExpr,
- PointerGetDatum(fcache));
+ PointerGetDatum(fcache));
fcache->shutdown_reg = true;
}
}
}
/*
- * If we reach here, loop around to run the function on the
- * new argument.
+ * If we reach here, loop around to run the function on the new
+ * argument.
*/
}
}
* Non-set case: much easier.
*
* We change the ExprState function pointer to use the simpler
- * ExecMakeFunctionResultNoSets on subsequent calls. This amounts
- * to assuming that no argument can return a set if it didn't do
- * so the first time.
+ * ExecMakeFunctionResultNoSets on subsequent calls. This amounts to
+ * assuming that no argument can return a set if it didn't do so the
+ * first time.
*/
fcache->xprstate.evalfunc = (ExprStateEvalFunc) ExecMakeFunctionResultNoSets;
InitFunctionCallInfoData(fcinfo, &(fcache->func), i, NULL, NULL);
/*
- * If function is strict, and there are any NULL arguments, skip
- * calling the function and return NULL.
+ * If function is strict, and there are any NULL arguments, skip calling
+ * the function and return NULL.
*/
if (fcache->func.fn_strict)
{
* ExecMakeTableFunctionResult
*
* Evaluate a table function, producing a materialized result in a Tuplestore
- * object. *returnDesc is set to the tupledesc actually returned by the
+ * object. *returnDesc is set to the tupledesc actually returned by the
* function, or NULL if it didn't provide one.
*/
Tuplestorestate *
get_typtype(funcrettype) == 'c');
/*
- * Prepare a resultinfo node for communication. We always do this
- * even if not expecting a set result, so that we can pass
- * expectedDesc. In the generic-expression case, the expression
- * doesn't actually get to see the resultinfo, but set it up anyway
- * because we use some of the fields as our own state variables.
+ * Prepare a resultinfo node for communication. We always do this even if
+ * not expecting a set result, so that we can pass expectedDesc. In the
+ * generic-expression case, the expression doesn't actually get to see the
+ * resultinfo, but set it up anyway because we use some of the fields as
+ * our own state variables.
*/
InitFunctionCallInfoData(fcinfo, NULL, 0, NULL, (Node *) &rsinfo);
rsinfo.type = T_ReturnSetInfo;
rsinfo.setDesc = NULL;
/*
- * Normally the passed expression tree will be a FuncExprState, since
- * the grammar only allows a function call at the top level of a table
- * function reference. However, if the function doesn't return set
- * then the planner might have replaced the function call via
- * constant-folding or inlining. So if we see any other kind of
- * expression node, execute it via the general ExecEvalExpr() code;
- * the only difference is that we don't get a chance to pass a special
- * ReturnSetInfo to any functions buried in the expression.
+ * Normally the passed expression tree will be a FuncExprState, since the
+ * grammar only allows a function call at the top level of a table
+ * function reference. However, if the function doesn't return set then
+ * the planner might have replaced the function call via constant-folding
+ * or inlining. So if we see any other kind of expression node, execute
+ * it via the general ExecEvalExpr() code; the only difference is that we
+ * don't get a chance to pass a special ReturnSetInfo to any functions
+ * buried in the expression.
*/
if (funcexpr && IsA(funcexpr, FuncExprState) &&
IsA(funcexpr->expr, FuncExpr))
* Evaluate the function's argument list.
*
* Note: ideally, we'd do this in the per-tuple context, but then the
- * argument values would disappear when we reset the context in
- * the inner loop. So do it in caller context. Perhaps we should
- * make a separate context just to hold the evaluated arguments?
+ * argument values would disappear when we reset the context in the
+ * inner loop. So do it in caller context. Perhaps we should make a
+ * separate context just to hold the evaluated arguments?
*/
fcinfo.flinfo = &(fcache->func);
argDone = ExecEvalFuncArgs(&fcinfo, fcache->args, econtext);
}
/*
- * Switch to short-lived context for calling the function or
- * expression.
+ * Switch to short-lived context for calling the function or expression.
*/
MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
HeapTuple tuple;
/*
- * reset per-tuple memory context before each call of the function
- * or expression. This cleans up any local memory the function may
- * leak when called.
+ * reset per-tuple memory context before each call of the function or
+ * expression. This cleans up any local memory the function may leak
+ * when called.
*/
ResetExprContext(econtext);
break;
/*
- * Can't do anything very useful with NULL rowtype values.
- * For a function returning set, we consider this a protocol
- * violation (but another alternative would be to just ignore
- * the result and "continue" to get another row). For a function
- * not returning set, we fall out of the loop; we'll cons up
- * an all-nulls result row below.
+ * Can't do anything very useful with NULL rowtype values. For a
+ * function returning set, we consider this a protocol violation
+ * (but another alternative would be to just ignore the result and
+ * "continue" to get another row). For a function not returning
+ * set, we fall out of the loop; we'll cons up an all-nulls result
+ * row below.
*/
if (returnsTuple && fcinfo.isnull)
{
}
/*
- * If first time through, build tupdesc and tuplestore for
- * result
+ * If first time through, build tupdesc and tuplestore for result
*/
if (first_time)
{
if (returnsTuple)
{
/*
- * Use the type info embedded in the rowtype Datum to
- * look up the needed tupdesc. Make a copy for the
- * query.
+ * Use the type info embedded in the rowtype Datum to look
+ * up the needed tupdesc. Make a copy for the query.
*/
HeapTupleHeader td;
td = DatumGetHeapTupleHeader(result);
tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(td),
- HeapTupleHeaderGetTypMod(td));
+ HeapTupleHeaderGetTypMod(td));
tupdesc = CreateTupleDescCopy(tupdesc);
}
else
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM does not support set arguments")));
+ errmsg("IS DISTINCT FROM does not support set arguments")));
Assert(fcinfo.nargs == 2);
if (fcinfo.argnull[0] && fcinfo.argnull[1])
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("op ANY/ALL (array) does not support set arguments")));
+ errmsg("op ANY/ALL (array) does not support set arguments")));
Assert(fcinfo.nargs == 2);
/*
- * If the array is NULL then we return NULL --- it's not very
- * meaningful to do anything else, even if the operator isn't strict.
+ * If the array is NULL then we return NULL --- it's not very meaningful
+ * to do anything else, even if the operator isn't strict.
*/
if (fcinfo.argnull[1])
{
/*
* If the array is empty, we return either FALSE or TRUE per the useOr
* flag. This is correct even if the scalar is NULL; since we would
- * evaluate the operator zero times, it matters not whether it would
- * want to return NULL.
+ * evaluate the operator zero times, it matters not whether it would want
+ * to return NULL.
*/
nitems = ArrayGetNItems(ARR_NDIM(arr), ARR_DIMS(arr));
if (nitems <= 0)
return BoolGetDatum(!useOr);
/*
- * If the scalar is NULL, and the function is strict, return NULL.
- * This is just to avoid having to test for strictness inside the
- * loop. (XXX but if arrays could have null elements, we'd need a
- * test anyway.)
+ * If the scalar is NULL, and the function is strict, return NULL. This is
+ * just to avoid having to test for strictness inside the loop. (XXX but
+ * if arrays could have null elements, we'd need a test anyway.)
*/
if (fcinfo.argnull[0] && sstate->fxprstate.func.fn_strict)
{
}
/*
- * We arrange to look up info about the element type only once per
- * series of calls, assuming the element type doesn't change
- * underneath us.
+ * We arrange to look up info about the element type only once per series
+ * of calls, assuming the element type doesn't change underneath us.
*/
if (sstate->element_type != ARR_ELEMTYPE(arr))
{
expr_value = ExecEvalExpr(clause, econtext, isNull, NULL);
/*
- * if the expression evaluates to null, then we just cascade the null
- * back to whoever called us.
+ * if the expression evaluates to null, then we just cascade the null back
+ * to whoever called us.
*/
if (*isNull)
return expr_value;
/*
- * evaluation of 'not' is simple.. expr is false, then return 'true'
- * and vice versa.
+ * evaluation of 'not' is simple.. expr is false, then return 'true' and
+ * vice versa.
*/
return BoolGetDatum(!DatumGetBool(expr_value));
}
AnyNull = false;
/*
- * If any of the clauses is TRUE, the OR result is TRUE regardless of
- * the states of the rest of the clauses, so we can stop evaluating
- * and return TRUE immediately. If none are TRUE and one or more is
- * NULL, we return NULL; otherwise we return FALSE. This makes sense
- * when you interpret NULL as "don't know": if we have a TRUE then the
- * OR is TRUE even if we aren't sure about some of the other inputs.
- * If all the known inputs are FALSE, but we have one or more "don't
- * knows", then we have to report that we "don't know" what the OR's
- * result should be --- perhaps one of the "don't knows" would have
- * been TRUE if we'd known its value. Only when all the inputs are
- * known to be FALSE can we state confidently that the OR's result is
- * FALSE.
+ * If any of the clauses is TRUE, the OR result is TRUE regardless of the
+ * states of the rest of the clauses, so we can stop evaluating and return
+ * TRUE immediately. If none are TRUE and one or more is NULL, we return
+ * NULL; otherwise we return FALSE. This makes sense when you interpret
+ * NULL as "don't know": if we have a TRUE then the OR is TRUE even if we
+ * aren't sure about some of the other inputs. If all the known inputs are
+ * FALSE, but we have one or more "don't knows", then we have to report
+ * that we "don't know" what the OR's result should be --- perhaps one of
+ * the "don't knows" would have been TRUE if we'd known its value. Only
+ * when all the inputs are known to be FALSE can we state confidently that
+ * the OR's result is FALSE.
*/
foreach(clause, clauses)
{
AnyNull = false;
/*
- * If any of the clauses is FALSE, the AND result is FALSE regardless
- * of the states of the rest of the clauses, so we can stop evaluating
- * and return FALSE immediately. If none are FALSE and one or more is
- * NULL, we return NULL; otherwise we return TRUE. This makes sense
- * when you interpret NULL as "don't know", using the same sort of
- * reasoning as for OR, above.
+ * If any of the clauses is FALSE, the AND result is FALSE regardless of
+ * the states of the rest of the clauses, so we can stop evaluating and
+ * return FALSE immediately. If none are FALSE and one or more is NULL,
+ * we return NULL; otherwise we return TRUE. This makes sense when you
+ * interpret NULL as "don't know", using the same sort of reasoning as for
+ * OR, above.
*/
foreach(clause, clauses)
/* ----------------------------------------------------------------
* ExecEvalConvertRowtype
*
- * Evaluate a rowtype coercion operation. This may require
+ * Evaluate a rowtype coercion operation. This may require
* rearranging field positions.
* ----------------------------------------------------------------
*/
tmptup.t_data = tuple;
/*
- * Extract all the values of the old tuple, offsetting the arrays
- * so that invalues[0] is NULL and invalues[1] is the first
- * source attribute; this exactly matches the numbering convention
- * in attrMap.
+ * Extract all the values of the old tuple, offsetting the arrays so that
+ * invalues[0] is NULL and invalues[1] is the first source attribute; this
+ * exactly matches the numbering convention in attrMap.
*/
heap_deform_tuple(&tmptup, cstate->indesc, invalues + 1, inisnull + 1);
invalues[0] = (Datum) 0;
*isDone = ExprSingleResult;
/*
- * If there's a test expression, we have to evaluate it and save the
- * value where the CaseTestExpr placeholders can find it. We must save
- * and restore prior setting of econtext's caseValue fields, in case
- * this node is itself within a larger CASE.
+ * If there's a test expression, we have to evaluate it and save the value
+ * where the CaseTestExpr placeholders can find it. We must save and
+ * restore prior setting of econtext's caseValue fields, in case this node
+ * is itself within a larger CASE.
*/
save_datum = econtext->caseValue_datum;
save_isNull = econtext->caseValue_isNull;
{
econtext->caseValue_datum = ExecEvalExpr(caseExpr->arg,
econtext,
- &econtext->caseValue_isNull,
+ &econtext->caseValue_isNull,
NULL);
}
/*
- * we evaluate each of the WHEN clauses in turn, as soon as one is
- * true we return the corresponding result. If none are true then we
- * return the value of the default clause, or NULL if there is none.
+ * we evaluate each of the WHEN clauses in turn, as soon as one is true we
+ * return the corresponding result. If none are true then we return the
+ * value of the default clause, or NULL if there is none.
*/
foreach(clause, clauses)
{
NULL);
/*
- * if we have a true test, then we return the result, since the
- * case statement is satisfied. A NULL result from the test is
- * not considered true.
+ * if we have a true test, then we return the result, since the case
+ * statement is satisfied. A NULL result from the test is not
+ * considered true.
*/
if (DatumGetBool(clause_value) && !*isNull)
{
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot merge incompatible arrays"),
errdetail("Array with element type %s cannot be "
- "included in ARRAY construct with element type %s.",
+ "included in ARRAY construct with element type %s.",
format_type_be(ARR_ELEMTYPE(array)),
format_type_be(element_type))));
if (ndims <= 0 || ndims > MAXDIM)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions (%d) exceeds " \
- "the maximum allowed (%d)", ndims, MAXDIM)));
+ errmsg("number of array dimensions (%d) exceeds " \
+ "the maximum allowed (%d)", ndims, MAXDIM)));
elem_dims = (int *) palloc(elem_ndims * sizeof(int));
memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int));
elem_ndims * sizeof(int)) != 0)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("multidimensional arrays must have array "
- "expressions with matching dimensions")));
+ errmsg("multidimensional arrays must have array "
+ "expressions with matching dimensions")));
}
elem_ndatabytes = ARR_SIZE(array) - ARR_OVERHEAD(elem_ndims);
ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
- Datum result = (Datum) 0;
+ Datum result = (Datum) 0;
MinMaxOp op = ((MinMaxExpr *) minmaxExpr->xprstate.expr)->op;
FunctionCallInfoData locfcinfo;
- ListCell *arg;
+ ListCell *arg;
if (isDone)
*isDone = ExprSingleResult;
locfcinfo.arg[1] = value;
locfcinfo.isnull = false;
cmpresult = DatumGetInt32(FunctionCallInvoke(&locfcinfo));
- if (locfcinfo.isnull) /* probably should not happen */
+ if (locfcinfo.isnull) /* probably should not happen */
continue;
if (cmpresult > 0 && op == IS_LEAST)
result = value;
if (*isNull)
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
- errmsg("domain %s does not allow null values",
- format_type_be(ctest->resulttype))));
+ errmsg("domain %s does not allow null values",
+ format_type_be(ctest->resulttype))));
break;
case DOM_CONSTRAINT_CHECK:
{
* Set up value to be returned by CoerceToDomainValue
* nodes. We must save and restore prior setting of
* econtext's domainValue fields, in case this node is
- * itself within a check expression for another
- * domain.
+ * itself within a check expression for another domain.
*/
save_datum = econtext->domainValue_datum;
save_isNull = econtext->domainValue_isNull;
}
/*
- * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set
- * all the fields in the struct just in case user tries to inspect
- * system columns.
+ * heap_getattr needs a HeapTuple not a bare HeapTupleHeader. We set all
+ * the fields in the struct just in case user tries to inspect system
+ * columns.
*/
tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple);
ItemPointerSetInvalid(&(tmptup.t_self));
if (!*isNull)
{
/*
- * heap_deform_tuple needs a HeapTuple not a bare HeapTupleHeader.
- * We set all the fields in the struct just in case.
+ * heap_deform_tuple needs a HeapTuple not a bare HeapTupleHeader. We
+ * set all the fields in the struct just in case.
*/
HeapTupleHeader tuphdr;
HeapTupleData tmptup;
Assert(fieldnum > 0 && fieldnum <= tupDesc->natts);
/*
- * Use the CaseTestExpr mechanism to pass down the old value of
- * the field being replaced; this is useful in case we have a
- * nested field update situation. It's safe to reuse the CASE
- * mechanism because there cannot be a CASE between here and where
- * the value would be needed.
+ * Use the CaseTestExpr mechanism to pass down the old value of the
+ * field being replaced; this is useful in case we have a nested field
+ * update situation. It's safe to reuse the CASE mechanism because
+ * there cannot be a CASE between here and where the value would be
+ * needed.
*/
econtext->caseValue_datum = values[fieldnum - 1];
econtext->caseValue_isNull = isnull[fieldnum - 1];
/*
* Complain if the aggregate's argument contains any
* aggregates; nested agg functions are semantically
- * nonsensical. (This should have been caught
- * earlier, but we defend against it here anyway.)
+ * nonsensical. (This should have been caught earlier,
+ * but we defend against it here anyway.)
*/
if (naggs != aggstate->numaggs)
ereport(ERROR,
elog(ERROR, "SubPlan found with no parent plan");
/*
- * Here we just add the SubPlanState nodes to
- * parent->subPlan. The subplans will be initialized
- * later.
+ * Here we just add the SubPlanState nodes to parent->subPlan.
+ * The subplans will be initialized later.
*/
parent->subPlan = lcons(sstate, parent->subPlan);
sstate->sub_estate = NULL;
{
ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node;
ConvertRowtypeExprState *cstate = makeNode(ConvertRowtypeExprState);
- int i;
- int n;
+ int i;
+ int n;
cstate->xprstate.evalfunc = (ExprStateEvalFunc) ExecEvalConvertRowtype;
cstate->arg = ExecInitExpr(convert->arg, parent);
int j;
if (att->attisdropped)
- continue; /* attrMap[i] is already 0 */
+ continue; /* attrMap[i] is already 0 */
attname = NameStr(att->attname);
atttypid = att->atttypid;
atttypmod = att->atttypmod;
elog(ERROR, "attribute \"%s\" of type %s does not match corresponding attribute of type %s",
attname,
format_type_be(cstate->indesc->tdtypeid),
- format_type_be(cstate->outdesc->tdtypeid));
+ format_type_be(cstate->outdesc->tdtypeid));
cstate->attrMap[i] = (AttrNumber) (j + 1);
break;
}
if (!attrs[i]->attisdropped)
{
/*
- * Guard against ALTER COLUMN TYPE on rowtype
- * since the RowExpr was created. XXX should we
- * check typmod too? Not sure we can be sure
- * it'll be the same.
+ * Guard against ALTER COLUMN TYPE on rowtype since
+ * the RowExpr was created. XXX should we check
+ * typmod too? Not sure we can be sure it'll be the
+ * same.
*/
if (exprType((Node *) e) != attrs[i]->atttypid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("ROW() column has type %s instead of type %s",
- format_type_be(exprType((Node *) e)),
- format_type_be(attrs[i]->atttypid))));
+ format_type_be(exprType((Node *) e)),
+ format_type_be(attrs[i]->atttypid))));
}
else
{
/*
- * Ignore original expression and insert a NULL.
- * We don't really care what type of NULL it is,
- * so always make an int4 NULL.
+ * Ignore original expression and insert a NULL. We
+ * don't really care what type of NULL it is, so
+ * always make an int4 NULL.
*/
e = (Expr *) makeNullConst(INT4OID);
}
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
- * Evaluate the qual conditions one at a time. If we find a FALSE
- * result, we can stop evaluating and return FALSE --- the AND result
- * must be FALSE. Also, if we find a NULL result when resultForNull
- * is FALSE, we can stop and return FALSE --- the AND result must be
- * FALSE or NULL in that case, and the caller doesn't care which.
+ * Evaluate the qual conditions one at a time. If we find a FALSE result,
+ * we can stop evaluating and return FALSE --- the AND result must be
+ * FALSE. Also, if we find a NULL result when resultForNull is FALSE, we
+ * can stop and return FALSE --- the AND result must be FALSE or NULL in
+ * that case, and the caller doesn't care which.
*
- * If we get to the end of the list, we can return TRUE. This will
- * happen when the AND result is indeed TRUE, or when the AND result
- * is NULL (one or more NULL subresult, with all the rest TRUE) and
- * the caller has specified resultForNull = TRUE.
+ * If we get to the end of the list, we can return TRUE. This will happen
+ * when the AND result is indeed TRUE, or when the AND result is NULL (one
+ * or more NULL subresult, with all the rest TRUE) and the caller has
+ * specified resultForNull = TRUE.
*/
result = true;
if (*isDone == ExprSingleResult)
{
/*
- * all sets are done, so report that tlist expansion is
- * complete.
+ * all sets are done, so report that tlist expansion is complete.
*/
*isDone = ExprEndResult;
MemoryContextSwitchTo(oldContext);
else
{
/*
- * We have some done and some undone sets. Restart the done
- * ones so that we can deliver a tuple (if possible).
+ * We have some done and some undone sets. Restart the done ones
+ * so that we can deliver a tuple (if possible).
*/
foreach(tl, targetlist)
{
if (itemIsDone[resind] == ExprEndResult)
{
/*
- * Oh dear, this item is returning an empty set.
- * Guess we can't make a tuple after all.
+ * Oh dear, this item is returning an empty set. Guess
+ * we can't make a tuple after all.
*/
*isDone = ExprEndResult;
break;
}
/*
- * If we cannot make a tuple because some sets are empty, we
- * still have to cycle the nonempty sets to completion, else
- * resources will not be released from subplans etc.
+ * If we cannot make a tuple because some sets are empty, we still
+ * have to cycle the nonempty sets to completion, else resources
+ * will not be released from subplans etc.
*
* XXX is that still necessary?
*/
projInfo->pi_lastScanVar);
/*
- * Assign to result by direct extraction of fields from source
- * slots ... a mite ugly, but fast ...
+ * Assign to result by direct extraction of fields from source slots ... a
+ * mite ugly, but fast ...
*/
for (i = list_length(projInfo->pi_targetlist) - 1; i >= 0; i--)
{
slot = projInfo->pi_slot;
/*
- * Clear any former contents of the result slot. This makes it
- * safe for us to use the slot's Datum/isnull arrays as workspace.
- * (Also, we can return the slot as-is if we decide no rows can
- * be projected.)
+ * Clear any former contents of the result slot. This makes it safe for
+ * us to use the slot's Datum/isnull arrays as workspace. (Also, we can
+ * return the slot as-is if we decide no rows can be projected.)
*/
ExecClearTuple(slot);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.36 2005/05/22 22:30:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.37 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
projInfo = node->ps.ps_ProjInfo;
/*
- * If we have neither a qual to check nor a projection to do,
- * just skip all the overhead and return the raw scan tuple.
+ * If we have neither a qual to check nor a projection to do, just skip
+ * all the overhead and return the raw scan tuple.
*/
if (!qual && !projInfo)
return (*accessMtd) (node);
/*
- * Check to see if we're still projecting out tuples from a previous
- * scan tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous scan
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->ps.ps_TupFromTlist)
{
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a scan tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a scan tuple.
*/
econtext = node->ps.ps_ExprContext;
ResetExprContext(econtext);
/*
- * get a tuple from the access method loop until we obtain a tuple
- * which passes the qualification.
+ * get a tuple from the access method loop until we obtain a tuple which
+ * passes the qualification.
*/
for (;;)
{
slot = (*accessMtd) (node);
/*
- * if the slot returned by the accessMtd contains NULL, then it
- * means there is nothing more to scan so we just return an empty
- * slot, being careful to use the projection result slot so it has
- * correct tupleDesc.
+ * if the slot returned by the accessMtd contains NULL, then it means
+ * there is nothing more to scan so we just return an empty slot,
+ * being careful to use the projection result slot so it has correct
+ * tupleDesc.
*/
if (TupIsNull(slot))
{
* check that the current tuple satisfies the qual-clause
*
* check for non-nil qual here to avoid a function call to ExecQual()
- * when the qual is nil ... saves only a few cycles, but they add
- * up ...
+ * when the qual is nil ... saves only a few cycles, but they add up
+ * ...
*/
if (!qual || ExecQual(qual, econtext, false))
{
if (projInfo)
{
/*
- * Form a projection tuple, store it in the result tuple
- * slot and return it --- unless we find we can project no
- * tuples from this scan tuple, in which case continue
- * scan.
+ * Form a projection tuple, store it in the result tuple slot
+ * and return it --- unless we find we can project no tuples
+ * from this scan tuple, in which case continue scan.
*/
resultSlot = ExecProject(projInfo, &isDone);
if (isDone != ExprEndResult)
return false; /* tlist too long */
/*
- * If the plan context requires a particular hasoid setting, then that
- * has to match, too.
+ * If the plan context requires a particular hasoid setting, then that has
+ * to match, too.
*/
if (ExecContextForcesOids(ps, &hasoid) &&
hasoid != tupdesc->tdhasoid)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.87 2005/04/06 16:34:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.88 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* allocate the table itself
*/
newtable = (TupleTable) palloc(sizeof(TupleTableData) +
- (tableSize - 1) * sizeof(TupleTableSlot));
+ (tableSize - 1) *sizeof(TupleTableSlot));
newtable->size = tableSize;
newtable->next = 0;
Assert(table != NULL);
/*
- * first free all the valid pointers in the tuple array and drop
- * refcounts of any referenced buffers, if that's what the caller
- * wants. (There is probably no good reason for the caller ever not
- * to want it!)
+ * first free all the valid pointers in the tuple array and drop refcounts
+ * of any referenced buffers, if that's what the caller wants. (There is
+ * probably no good reason for the caller ever not to want it!)
*/
if (shouldFree)
{
Assert(table != NULL);
/*
- * We expect that the table was made big enough to begin with.
- * We cannot reallocate it on the fly since previous plan nodes
- * have already got pointers to individual entries.
+ * We expect that the table was made big enough to begin with. We cannot
+ * reallocate it on the fly since previous plan nodes have already got
+ * pointers to individual entries.
*/
if (table->next >= table->size)
elog(ERROR, "plan requires more slots than are available");
ExecClearTuple(slot);
/*
- * Release any old descriptor. Also release old Datum/isnull arrays
- * if present (we don't bother to check if they could be re-used).
+ * Release any old descriptor. Also release old Datum/isnull arrays if
+ * present (we don't bother to check if they could be re-used).
*/
if (slot->tts_shouldFreeDesc)
FreeTupleDesc(slot->tts_tupleDescriptor);
slot->tts_shouldFreeDesc = shouldFree;
/*
- * Allocate Datum/isnull arrays of the appropriate size. These must
- * have the same lifetime as the slot, so allocate in the slot's own
- * context.
+ * Allocate Datum/isnull arrays of the appropriate size. These must have
+ * the same lifetime as the slot, so allocate in the slot's own context.
*/
slot->tts_values = (Datum *)
MemoryContextAlloc(slot->tts_mcxt, tupdesc->natts * sizeof(Datum));
slot->tts_tuple = tuple;
/*
- * If tuple is on a disk page, keep the page pinned as long as we hold
- * a pointer into it. We assume the caller already has such a pin.
+ * If tuple is on a disk page, keep the page pinned as long as we hold a
+ * pointer into it. We assume the caller already has such a pin.
*/
slot->tts_buffer = buffer;
if (BufferIsValid(buffer))
Assert(!slot->tts_isempty);
/*
- * If we have a physical tuple, and it's locally palloc'd, we have
- * nothing to do.
+ * If we have a physical tuple, and it's locally palloc'd, we have nothing
+ * to do.
*/
if (slot->tts_tuple && slot->tts_shouldFree)
return slot->tts_tuple;
/*
* Otherwise, copy or build a tuple, and then store it as the new slot
- * value. (Note: tts_nvalid will be reset to zero here. There are
- * cases in which this could be optimized but it's probably not worth
- * worrying about.)
+ * value. (Note: tts_nvalid will be reset to zero here. There are cases
+ * in which this could be optimized but it's probably not worth worrying
+ * about.)
*
- * We may be called in a context that is shorter-lived than the
- * tuple slot, but we have to ensure that the materialized tuple
- * will survive anyway.
+ * We may be called in a context that is shorter-lived than the tuple slot,
+ * but we have to ensure that the materialized tuple will survive anyway.
*/
oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
newTuple = ExecCopySlotTuple(slot);
MemoryContext oldContext;
/*
- * There might be ways to optimize this when the source is virtual,
- * but for now just always build a physical copy. Make sure it is
- * in the right context.
+ * There might be ways to optimize this when the source is virtual, but
+ * for now just always build a physical copy. Make sure it is in the
+ * right context.
*/
oldContext = MemoryContextSwitchTo(dstslot->tts_mcxt);
newTuple = ExecCopySlotTuple(srcslot);
attinmeta->tupdesc = BlessTupleDesc(tupdesc);
/*
- * Gather info needed later to call the "in" function for each
- * attribute
+ * Gather info needed later to call the "in" function for each attribute
*/
attinfuncinfo = (FmgrInfo *) palloc0(natts * sizeof(FmgrInfo));
attioparams = (Oid *) palloc0(natts * sizeof(Oid));
tuple = heap_formtuple(tupdesc, dvalues, nulls);
/*
- * Release locally palloc'd space. XXX would probably be good to
- * pfree values of pass-by-reference datums, as well.
+ * Release locally palloc'd space. XXX would probably be good to pfree
+ * values of pass-by-reference datums, as well.
*/
pfree(dvalues);
pfree(nulls);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.125 2005/08/01 20:31:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.126 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int NTupleAppended;
int NTupleDeleted;
int NIndexTupleInserted;
-extern int NIndexTupleProcessed; /* have to be defined in the
- * access method level so that the
+extern int NIndexTupleProcessed; /* have to be defined in the access
+ * method level so that the
* cinterface.a will link ok. */
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Make the EState node within the per-query context. This way, we
- * don't need a separate pfree() operation for it at shutdown.
+ * Make the EState node within the per-query context. This way, we don't
+ * need a separate pfree() operation for it at shutdown.
*/
oldcontext = MemoryContextSwitchTo(qcontext);
FreeExecutorState(EState *estate)
{
/*
- * Shut down and free any remaining ExprContexts. We do this
- * explicitly to ensure that any remaining shutdown callbacks get
- * called (since they might need to release resources that aren't
- * simply memory within the per-query memory context).
+ * Shut down and free any remaining ExprContexts. We do this explicitly
+ * to ensure that any remaining shutdown callbacks get called (since they
+ * might need to release resources that aren't simply memory within the
+ * per-query memory context).
*/
while (estate->es_exprcontexts)
{
/*
- * XXX: seems there ought to be a faster way to implement this
- * than repeated list_delete(), no?
+ * XXX: seems there ought to be a faster way to implement this than
+ * repeated list_delete(), no?
*/
FreeExprContext((ExprContext *) linitial(estate->es_exprcontexts));
/* FreeExprContext removed the list link for us */
econtext->ecxt_callbacks = NULL;
/*
- * Link the ExprContext into the EState to ensure it is shut down when
- * the EState is freed. Because we use lcons(), shutdowns will occur
- * in reverse order of creation, which may not be essential but can't
- * hurt.
+ * Link the ExprContext into the EState to ensure it is shut down when the
+ * EState is freed. Because we use lcons(), shutdowns will occur in
+ * reverse order of creation, which may not be essential but can't hurt.
*/
estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts);
}
/*
- * ExecTypeFromTL needs the parse-time representation of the tlist,
- * not a list of ExprStates. This is good because some plan nodes
- * don't bother to set up planstate->targetlist ...
+ * ExecTypeFromTL needs the parse-time representation of the tlist, not a
+ * list of ExprStates. This is good because some plan nodes don't bother
+ * to set up planstate->targetlist ...
*/
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
ExecAssignResultType(planstate, tupDesc, true);
/*
* Determine whether the target list consists entirely of simple Var
- * references (ie, references to non-system attributes). If so,
- * we can use the simpler ExecVariableList instead of ExecTargetList.
+ * references (ie, references to non-system attributes). If so, we can
+ * use the simpler ExecVariableList instead of ExecTargetList.
*/
isVarList = true;
foreach(tl, targetList)
AttrNumber lastOuterVar = 0;
AttrNumber lastScanVar = 0;
- projInfo->pi_itemIsDone = NULL; /* not needed */
+ projInfo->pi_itemIsDone = NULL; /* not needed */
projInfo->pi_varSlotOffsets = varSlotOffsets = (int *)
palloc0(len * sizeof(int));
projInfo->pi_varNumbers = varNumbers = (int *)
palloc0(len * sizeof(int));
/*
- * Set up the data needed by ExecVariableList. The slots in which
- * the variables can be found at runtime are denoted by the offsets
- * of their slot pointers within the econtext. This rather grotty
- * representation is needed because the caller may not have given
- * us the real econtext yet (see hacks in nodeSubplan.c).
+ * Set up the data needed by ExecVariableList. The slots in which the
+ * variables can be found at runtime are denoted by the offsets of
+ * their slot pointers within the econtext. This rather grotty
+ * representation is needed because the caller may not have given us
+ * the real econtext yet (see hacks in nodeSubplan.c).
*/
foreach(tl, targetList)
{
*
* However ... there is no particular need to do it during ExecEndNode,
* because FreeExecutorState will free any remaining ExprContexts within
- * the EState. Letting FreeExecutorState do it allows the ExprContexts to
+ * the EState. Letting FreeExecutorState do it allows the ExprContexts to
* be freed in reverse order of creation, rather than order of creation as
* will happen if we delete them here, which saves O(N^2) work in the list
* cleanup inside FreeExprContext.
ExecFreeExprContext(PlanState *planstate)
{
/*
- * Per above discussion, don't actually delete the ExprContext.
- * We do unlink it from the plan node, though.
+ * Per above discussion, don't actually delete the ExprContext. We do
+ * unlink it from the plan node, though.
*/
planstate->ps_ExprContext = NULL;
}
* to a new tablespace.
*
* If the index AM is not safe for concurrent updates, obtain an
- * exclusive lock on the index to lock out other updaters as well
- * as readers (index_beginscan places AccessShareLock).
+ * exclusive lock on the index to lock out other updaters as well as
+ * readers (index_beginscan places AccessShareLock).
*
- * If there are multiple not-concurrent-safe indexes, all backends
- * must lock the indexes in the same order or we will get deadlocks
- * here. This is guaranteed by RelationGetIndexList(), which promises
- * to return the index list in OID order.
+ * If there are multiple not-concurrent-safe indexes, all backends must
+ * lock the indexes in the same order or we will get deadlocks here.
+ * This is guaranteed by RelationGetIndexList(), which promises to
+ * return the index list in OID order.
*
* The locks will be released in ExecCloseIndices.
*/
heapRelation = resultRelInfo->ri_RelationDesc;
/*
- * We will use the EState's per-tuple context for evaluating
- * predicates and index expressions (creating it if it's not already
- * there).
+ * We will use the EState's per-tuple context for evaluating predicates
+ * and index expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);
List *predicate;
/*
- * If predicate state not set up yet, create it (in the
- * estate's per-query context)
+ * If predicate state not set up yet, create it (in the estate's
+ * per-query context)
*/
predicate = indexInfo->ii_PredicateState;
if (predicate == NIL)
}
/*
- * FormIndexDatum fills in its values and isnull parameters with
- * the appropriate values for the column(s) of the index.
+ * FormIndexDatum fills in its values and isnull parameters with the
+ * appropriate values for the column(s) of the index.
*/
FormIndexDatum(indexInfo,
slot,
isnull);
/*
- * The index AM does the rest. Note we suppress unique-index
- * checks if we are being called from VACUUM, since VACUUM may
- * need to move dead tuples that have the same keys as live ones.
+ * The index AM does the rest. Note we suppress unique-index checks
+ * if we are being called from VACUUM, since VACUUM may need to move
+ * dead tuples that have the same keys as live ones.
*/
index_insert(relationDescs[i], /* index relation */
- values, /* array of index Datums */
- isnull, /* null flags */
- tupleid, /* tid of heap tuple */
+ values, /* array of index Datums */
+ isnull, /* null flags */
+ tupleid, /* tid of heap tuple */
heapRelation,
relationDescs[i]->rd_index->indisunique && !is_vacuum);
Bitmapset *parmset;
/*
- * The plan node only depends on params listed in its allParam set.
- * Don't include anything else into its chgParam set.
+ * The plan node only depends on params listed in its allParam set. Don't
+ * include anything else into its chgParam set.
*/
parmset = bms_intersect(node->plan->allParam, newchg);
/*
- * Keep node->chgParam == NULL if there's not actually any members;
- * this allows the simplest possible tests in executor node files.
+ * Keep node->chgParam == NULL if there's not actually any members; this
+ * allows the simplest possible tests in executor node files.
*/
if (!bms_is_empty(parmset))
node->chgParam = bms_join(node->chgParam, parmset);
return;
/*
- * Call the callbacks in econtext's per-tuple context. This ensures
- * that any memory they might leak will get cleaned up.
+ * Call the callbacks in econtext's per-tuple context. This ensures that
+ * any memory they might leak will get cleaned up.
*/
oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.97 2005/04/10 18:04:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.98 2005/10/15 02:49:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* non-export function prototypes */
static execution_state *init_execution_state(List *queryTree_list,
- bool readonly_func);
+ bool readonly_func);
static void init_sql_fcache(FmgrInfo *finfo);
static void postquel_start(execution_state *es, SQLFunctionCachePtr fcache);
static TupleTableSlot *postquel_getnext(execution_state *es);
IsA(queryTree->utilityStmt, TransactionStmt))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL statement name */
+ /* translator: %s is a SQL statement name */
errmsg("%s is not allowed in a SQL function",
CreateQueryTag(queryTree))));
if (readonly_func && !QueryIsReadOnly(queryTree))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL statement name */
+ /* translator: %s is a SQL statement name */
errmsg("%s is not allowed in a non-volatile function",
CreateQueryTag(queryTree))));
procedureStruct = (Form_pg_proc) GETSTRUCT(procedureTuple);
/*
- * get the result type from the procedure tuple, and check for
- * polymorphic result type; if so, find out the actual result type.
+ * get the result type from the procedure tuple, and check for polymorphic
+ * result type; if so, find out the actual result type.
*/
rettype = procedureStruct->prorettype;
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("could not determine actual result type for function declared to return type %s",
- format_type_be(procedureStruct->prorettype))));
+ format_type_be(procedureStruct->prorettype))));
}
fcache->rettype = rettype;
typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
/*
- * get the type length and by-value flag from the type tuple; also do
- * a preliminary check for returnsTuple (this may prove inaccurate,
- * see below).
+ * get the type length and by-value flag from the type tuple; also do a
+ * preliminary check for returnsTuple (this may prove inaccurate, see
+ * below).
*/
fcache->typlen = typeStruct->typlen;
fcache->typbyval = typeStruct->typbyval;
rettype == RECORDOID);
/*
- * Parse and rewrite the queries. We need the argument type info to
- * pass to the parser.
+ * Parse and rewrite the queries. We need the argument type info to pass
+ * to the parser.
*/
nargs = procedureStruct->pronargs;
haspolyarg = false;
queryTree_list = pg_parse_and_rewrite(src, argOidVect, nargs);
/*
- * If the function has any arguments declared as polymorphic types,
- * then it wasn't type-checked at definition time; must do so now.
+ * If the function has any arguments declared as polymorphic types, then
+ * it wasn't type-checked at definition time; must do so now.
*
- * Also, force a type-check if the declared return type is a rowtype; we
- * need to find out whether we are actually returning the whole tuple
- * result, or just regurgitating a rowtype expression result. In the
- * latter case we clear returnsTuple because we need not act different
- * from the scalar result case.
+ * Also, force a type-check if the declared return type is a rowtype; we need
+ * to find out whether we are actually returning the whole tuple result,
+ * or just regurgitating a rowtype expression result. In the latter case
+ * we clear returnsTuple because we need not act different from the scalar
+ * result case.
*
- * In the returnsTuple case, check_sql_fn_retval will also construct
- * a JunkFilter we can use to coerce the returned rowtype to the desired
+ * In the returnsTuple case, check_sql_fn_retval will also construct a
+ * JunkFilter we can use to coerce the returned rowtype to the desired
* form.
*/
if (haspolyarg || fcache->returnsTuple)
/*
* In a read-only function, use the surrounding query's snapshot;
* otherwise take a new snapshot for each query. The snapshot should
- * include a fresh command ID so that all work to date in this
- * transaction is visible. We copy in both cases so that postquel_end
- * can unconditionally do FreeSnapshot.
+ * include a fresh command ID so that all work to date in this transaction
+ * is visible. We copy in both cases so that postquel_end can
+ * unconditionally do FreeSnapshot.
*/
if (fcache->readonly_func)
snapshot = CopySnapshot(ActiveSnapshot);
if (TupIsNull(slot))
{
/*
- * We fall out here for all cases except where we have obtained
- * a row from a function's final SELECT.
+ * We fall out here for all cases except where we have obtained a row
+ * from a function's final SELECT.
*/
postquel_end(es);
fcinfo->isnull = true;
}
/*
- * If we got a row from a command within the function it has to be
- * the final command. All others shouldn't be returning anything.
+ * If we got a row from a command within the function it has to be the
+ * final command. All others shouldn't be returning anything.
*/
Assert(LAST_POSTQUEL_COMMAND(es));
/*
- * Set up to return the function value. For pass-by-reference
- * datatypes, be sure to allocate the result in resultcontext,
- * not the current memory context (which has query lifespan).
+ * Set up to return the function value. For pass-by-reference datatypes,
+ * be sure to allocate the result in resultcontext, not the current memory
+ * context (which has query lifespan).
*/
oldcontext = MemoryContextSwitchTo(resultcontext);
if (fcache->returnsTuple)
{
/*
- * We are returning the whole tuple, so filter it and apply the
- * proper labeling to make it a valid Datum. There are several
- * reasons why we do this:
+ * We are returning the whole tuple, so filter it and apply the proper
+ * labeling to make it a valid Datum. There are several reasons why
+ * we do this:
*
- * 1. To copy the tuple out of the child execution context and
- * into the desired result context.
+ * 1. To copy the tuple out of the child execution context and into the
+ * desired result context.
*
- * 2. To remove any junk attributes present in the raw subselect
- * result. (This is probably not absolutely necessary, but it
- * seems like good policy.)
+ * 2. To remove any junk attributes present in the raw subselect result.
+ * (This is probably not absolutely necessary, but it seems like good
+ * policy.)
*
- * 3. To insert dummy null columns if the declared result type
- * has any attisdropped columns.
+ * 3. To insert dummy null columns if the declared result type has any
+ * attisdropped columns.
*/
HeapTuple newtup;
HeapTupleHeader dtup;
newtup = ExecRemoveJunk(fcache->junkFilter, slot);
/*
- * Compress out the HeapTuple header data. We assume that
- * heap_form_tuple made the tuple with header and body in one
- * palloc'd chunk. We want to return a pointer to the chunk
- * start so that it will work if someone tries to free it.
+ * Compress out the HeapTuple header data. We assume that
+ * heap_form_tuple made the tuple with header and body in one palloc'd
+ * chunk. We want to return a pointer to the chunk start so that it
+ * will work if someone tries to free it.
*/
t_len = newtup->t_len;
dtup = (HeapTupleHeader) newtup;
memmove((char *) dtup, (char *) newtup->t_data, t_len);
/*
- * Use the declared return type if it's not RECORD; else take
- * the type from the computed result, making sure a typmod has
- * been assigned.
+ * Use the declared return type if it's not RECORD; else take the type
+ * from the computed result, making sure a typmod has been assigned.
*/
if (fcache->rettype != RECORDOID)
{
else
{
/*
- * Returning a scalar, which we have to extract from the first
- * column of the SELECT result, and then copy into result
- * context if needed.
+ * Returning a scalar, which we have to extract from the first column
+ * of the SELECT result, and then copy into result context if needed.
*/
value = slot_getattr(slot, 1, &(fcinfo->isnull));
es = fcache->func_state;
/*
- * Convert params to appropriate format if starting a fresh execution.
- * (If continuing execution, we can re-use prior params.)
+ * Convert params to appropriate format if starting a fresh execution. (If
+ * continuing execution, we can re-use prior params.)
*/
if (es && es->status == F_EXEC_START)
postquel_sub_params(fcache, fcinfo);
/*
* Execute each command in the function one after another until we're
- * executing the final command and get a result or we run out of
- * commands.
+ * executing the final command and get a result or we run out of commands.
*/
while (es)
{
}
/*
- * If we got a result from a command within the function it has to be
- * the final command. All others shouldn't be returning anything.
+ * If we got a result from a command within the function it has to be the
+ * final command. All others shouldn't be returning anything.
*/
Assert(LAST_POSTQUEL_COMMAND(es));
errmsg("set-valued function called in context that cannot accept a set")));
/*
- * Ensure we will get shut down cleanly if the exprcontext is not
- * run to completion.
+ * Ensure we will get shut down cleanly if the exprcontext is not run
+ * to completion.
*/
if (!fcache->shutdown_reg)
{
fn_name = NameStr(functup->proname);
/*
- * If there is a syntax error position, convert to internal syntax
- * error
+ * If there is a syntax error position, convert to internal syntax error
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
}
/*
- * Try to determine where in the function we failed. If there is a
- * query with non-null QueryDesc, finger it. (We check this rather
- * than looking for F_EXEC_RUN state, so that errors during
- * ExecutorStart or ExecutorEnd are blamed on the appropriate query;
- * see postquel_start and postquel_end.)
+ * Try to determine where in the function we failed. If there is a query
+ * with non-null QueryDesc, finger it. (We check this rather than looking
+ * for F_EXEC_RUN state, so that errors during ExecutorStart or
+ * ExecutorEnd are blamed on the appropriate query; see postquel_start and
+ * postquel_end.)
*/
if (fcache)
{
if (rettype != VOIDOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Function's final statement must be a SELECT.")));
return false;
}
tlist = parse->targetList;
/*
- * The last query must be a SELECT if and only if return type isn't
- * VOID.
+ * The last query must be a SELECT if and only if return type isn't VOID.
*/
if (rettype == VOIDOID)
{
if (cmd == CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Function's final statement must not be a SELECT.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Function's final statement must not be a SELECT.")));
return false;
}
if (cmd != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Function's final statement must be a SELECT.")));
/*
* Count the non-junk entries in the result targetlist.
{
/*
* For base-type returns, the target list should have exactly one
- * entry, and its type should agree with what the user declared.
- * (As of Postgres 7.2, we accept binary-compatible types too.)
+ * entry, and its type should agree with what the user declared. (As
+ * of Postgres 7.2, we accept binary-compatible types too.)
*/
if (tlistlen != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Final SELECT must return exactly one column.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Final SELECT must return exactly one column.")));
restype = exprType((Node *) ((TargetEntry *) linitial(tlist))->expr);
if (!IsBinaryCoercible(restype, rettype))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
errdetail("Actual return type is %s.",
format_type_be(restype))));
}
{
/* Returns a rowtype */
TupleDesc tupdesc;
- int tupnatts; /* physical number of columns in tuple */
- int tuplogcols; /* # of nondeleted columns in tuple */
- int colindex; /* physical column index */
+ int tupnatts; /* physical number of columns in tuple */
+ int tuplogcols; /* # of nondeleted columns in tuple */
+ int colindex; /* physical column index */
/*
- * If the target list is of length 1, and the type of the varnode
- * in the target list matches the declared return type, this is
- * okay. This can happen, for example, where the body of the
- * function is 'SELECT func2()', where func2 has the same return
- * type as the function that's calling it.
+ * If the target list is of length 1, and the type of the varnode in
+ * the target list matches the declared return type, this is okay.
+ * This can happen, for example, where the body of the function is
+ * 'SELECT func2()', where func2 has the same return type as the
+ * function that's calling it.
*/
if (tlistlen == 1)
{
if (get_func_result_type(func_id, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
{
/*
- * Assume we are returning the whole tuple.
- * Crosschecking against what the caller expects will happen at
- * runtime.
+ * Assume we are returning the whole tuple. Crosschecking against
+ * what the caller expects will happen at runtime.
*/
if (junkFilter)
*junkFilter = ExecInitJunkFilter(tlist, false, NULL);
Assert(tupdesc);
/*
- * Verify that the targetlist matches the return tuple type.
- * We scan the non-deleted attributes to ensure that they match the
- * datatypes of the non-resjunk columns.
+ * Verify that the targetlist matches the return tuple type. We scan
+ * the non-deleted attributes to ensure that they match the datatypes
+ * of the non-resjunk columns.
*/
tupnatts = tupdesc->natts;
tuplogcols = 0; /* we'll count nondeleted cols as we go */
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Final SELECT returns too many columns.")));
+ errdetail("Final SELECT returns too many columns.")));
attr = tupdesc->attrs[colindex - 1];
} while (attr->attisdropped);
tuplogcols++;
if (tlistlen != tuplogcols)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
errdetail("Final SELECT returns too few columns.")));
/* Set up junk filter if needed */
if (junkFilter)
*junkFilter = ExecInitJunkFilterConversion(tlist,
- CreateTupleDescCopy(tupdesc),
- NULL);
+ CreateTupleDescCopy(tupdesc),
+ NULL);
/* Report that we are returning entire tuple result */
return true;
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type %s is not supported for SQL functions",
- format_type_be(rettype))));
+ errmsg("return type %s is not supported for SQL functions",
+ format_type_be(rettype))));
return false;
}
* Copyright (c) 2001-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.12 2005/04/16 20:07:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.13 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
InstrStopNode(Instrumentation *instr, bool returnedTuple)
{
- instr_time endtime;
+ instr_time endtime;
/* count the returned tuples */
if (returnedTuple)
instr->counter.tv_usec -= 1000000;
instr->counter.tv_sec++;
}
-#else /* WIN32 */
+#else /* WIN32 */
instr->counter.QuadPart += (endtime.QuadPart - instr->starttime.QuadPart);
#endif
* pass-by-ref inputs, but in the aggregate case we know the left input is
* either the initial transition value or a previous function result, and
* in either case its value need not be preserved. See int8inc() for an
- * example. Notice that advance_transition_function() is coded to avoid a
+ * example. Notice that advance_transition_function() is coded to avoid a
* data copy step when the previous transition value pointer is returned.
*
*
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.134 2005/06/28 05:08:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.135 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* fmgr lookup data for transfer functions --- only valid when
- * corresponding oid is not InvalidOid. Note in particular that
- * fn_strict flags are kept here.
+ * corresponding oid is not InvalidOid. Note in particular that fn_strict
+ * flags are kept here.
*/
FmgrInfo transfn;
FmgrInfo finalfn;
Oid sortOperator;
/*
- * fmgr lookup data for input type's equality operator --- only
- * set/used when aggregate has DISTINCT flag.
+ * fmgr lookup data for input type's equality operator --- only set/used
+ * when aggregate has DISTINCT flag.
*/
FmgrInfo equalfn;
transtypeByVal;
/*
- * These values are working state that is initialized at the start of
- * an input tuple group and updated for each input tuple.
+ * These values are working state that is initialized at the start of an
+ * input tuple group and updated for each input tuple.
*
* For a simple (non DISTINCT) aggregate, we just feed the input values
* straight to the transition function. If it's DISTINCT, we pass the
- * input values into a Tuplesort object; then at completion of the
- * input tuple group, we scan the sorted values, eliminate duplicates,
- * and run the transition function on the rest.
+ * input values into a Tuplesort object; then at completion of the input
+ * tuple group, we scan the sorted values, eliminate duplicates, and run
+ * the transition function on the rest.
*/
Tuplesortstate *sortstate; /* sort object, if a DISTINCT agg */
bool noTransValue; /* true if transValue not set yet */
/*
- * Note: noTransValue initially has the same value as
- * transValueIsNull, and if true both are cleared to false at the same
- * time. They are not the same though: if transfn later returns a
- * NULL, we want to keep that NULL and not auto-replace it with a
- * later input value. Only the first non-NULL input will be
- * auto-substituted.
+ * Note: noTransValue initially has the same value as transValueIsNull,
+ * and if true both are cleared to false at the same time. They are not
+ * the same though: if transfn later returns a NULL, we want to keep that
+ * NULL and not auto-replace it with a later input value. Only the first
+ * non-NULL input will be auto-substituted.
*/
} AggStatePerGroupData;
}
/*
- * If we are reinitializing after a group boundary, we have to
- * free any prior transValue to avoid memory leakage. We must
- * check not only the isnull flag but whether the pointer is NULL;
- * since pergroupstate is initialized with palloc0, the initial
- * condition has isnull = 0 and null pointer.
+ * If we are reinitializing after a group boundary, we have to free
+ * any prior transValue to avoid memory leakage. We must check not
+ * only the isnull flag but whether the pointer is NULL; since
+ * pergroupstate is initialized with palloc0, the initial condition
+ * has isnull = 0 and null pointer.
*/
if (!peraggstate->transtypeByVal &&
!pergroupstate->transValueIsNull &&
/*
* (Re)set transValue to the initial value.
*
- * Note that when the initial value is pass-by-ref, we must copy it
- * (into the aggcontext) since we will pfree the transValue later.
+ * Note that when the initial value is pass-by-ref, we must copy it (into
+ * the aggcontext) since we will pfree the transValue later.
*/
if (peraggstate->initValueIsNull)
pergroupstate->transValue = peraggstate->initValue;
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(peraggstate->initValue,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
MemoryContextSwitchTo(oldContext);
}
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
/*
- * If the initial value for the transition state doesn't exist in
- * the pg_aggregate table then we will let the first non-NULL
- * value returned from the outer procNode become the initial
- * value. (This is useful for aggregates like max() and min().)
- * The noTransValue flag signals that we still need to do this.
+ * If the initial value for the transition state doesn't exist in the
+ * pg_aggregate table then we will let the first non-NULL value
+ * returned from the outer procNode become the initial value. (This is
+ * useful for aggregates like max() and min().) The noTransValue flag
+ * signals that we still need to do this.
*/
pergroupstate->noTransValue = peraggstate->initValueIsNull;
}
if (pergroupstate->noTransValue)
{
/*
- * transValue has not been initialized. This is the first
- * non-NULL input value. We use it as the initial value for
- * transValue. (We already checked that the agg's input type
- * is binary-compatible with its transtype, so straight copy
- * here is OK.)
+ * transValue has not been initialized. This is the first non-NULL
+ * input value. We use it as the initial value for transValue. (We
+ * already checked that the agg's input type is binary-compatible
+ * with its transtype, so straight copy here is OK.)
*
- * We must copy the datum into aggcontext if it is pass-by-ref.
- * We do not need to pfree the old transValue, since it's
- * NULL.
+ * We must copy the datum into aggcontext if it is pass-by-ref. We do
+ * not need to pfree the old transValue, since it's NULL.
*/
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(newVal,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
pergroupstate->transValueIsNull = false;
pergroupstate->noTransValue = false;
MemoryContextSwitchTo(oldContext);
{
/*
* Don't call a strict function with NULL inputs. Note it is
- * possible to get here despite the above tests, if the
- * transfn is strict *and* returned a NULL on a prior cycle.
- * If that happens we will propagate the NULL all the way to
- * the end.
+ * possible to get here despite the above tests, if the transfn is
+ * strict *and* returned a NULL on a prior cycle. If that happens
+ * we will propagate the NULL all the way to the end.
*/
return;
}
newVal = FunctionCallInvoke(&fcinfo);
/*
- * If pass-by-ref datatype, must copy the new value into aggcontext
- * and pfree the prior transValue. But if transfn returned a pointer
- * to its first input, we don't need to do anything.
+ * If pass-by-ref datatype, must copy the new value into aggcontext and
+ * pfree the prior transValue. But if transfn returned a pointer to its
+ * first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
- DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
+ DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
{
if (!fcinfo.isnull)
{
tuplesort_performsort(peraggstate->sortstate);
/*
- * Note: if input type is pass-by-ref, the datums returned by the sort
- * are freshly palloc'd in the per-query context, so we must be
- * careful to pfree them when they are no longer needed.
+ * Note: if input type is pass-by-ref, the datums returned by the sort are
+ * freshly palloc'd in the per-query context, so we must be careful to
+ * pfree them when they are no longer needed.
*/
while (tuplesort_getdatum(peraggstate->sortstate, true,
&newVal, &isNull))
{
/*
- * DISTINCT always suppresses nulls, per SQL spec, regardless of
- * the transition function's strictness.
+ * DISTINCT always suppresses nulls, per SQL spec, regardless of the
+ * transition function's strictness.
*/
if (isNull)
continue;
/*
- * Clear and select the working context for evaluation of the
- * equality function and transition function.
+ * Clear and select the working context for evaluation of the equality
+ * function and transition function.
*/
MemoryContextReset(workcontext);
oldContext = MemoryContextSwitchTo(workcontext);
while (!aggstate->agg_done)
{
/*
- * If we don't already have the first tuple of the new group,
- * fetch it from the outer plan.
+ * If we don't already have the first tuple of the new group, fetch it
+ * from the outer plan.
*/
if (aggstate->grp_firstTuple == NULL)
{
if (!TupIsNull(outerslot))
{
/*
- * Make a copy of the first input tuple; we will use this
- * for comparisons (in group mode) and for projection.
+ * Make a copy of the first input tuple; we will use this for
+ * comparisons (in group mode) and for projection.
*/
aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot);
}
{
/*
* Store the copied first input tuple in the tuple table slot
- * reserved for it. The tuple will be deleted when it is
- * cleared from the slot.
+ * reserved for it. The tuple will be deleted when it is cleared
+ * from the slot.
*/
ExecStoreTuple(aggstate->grp_firstTuple,
firstSlot,
outerslot,
node->numCols, node->grpColIdx,
aggstate->eqfunctions,
- tmpcontext->ecxt_per_tuple_memory))
+ tmpcontext->ecxt_per_tuple_memory))
{
/*
* Save the first input tuple of the next group.
/*
* If we have no first tuple (ie, the outerPlan didn't return
* anything), create a dummy all-nulls input tuple for use by
- * ExecQual/ExecProject. 99.44% of the time this is a waste of
- * cycles, because ordinarily the projected output tuple's
- * targetlist cannot contain any direct (non-aggregated)
- * references to input columns, so the dummy tuple will not be
- * referenced. However there are special cases where this isn't so
- * --- in particular an UPDATE involving an aggregate will have a
- * targetlist reference to ctid. We need to return a null for
- * ctid in that situation, not coredump.
+ * ExecQual/ExecProject. 99.44% of the time this is a waste of cycles,
+ * because ordinarily the projected output tuple's targetlist cannot
+ * contain any direct (non-aggregated) references to input columns, so
+ * the dummy tuple will not be referenced. However there are special
+ * cases where this isn't so --- in particular an UPDATE involving an
+ * aggregate will have a targetlist reference to ctid. We need to
+ * return a null for ctid in that situation, not coredump.
*
- * The values returned for the aggregates will be the initial values
- * of the transition functions.
+ * The values returned for the aggregates will be the initial values of
+ * the transition functions.
*/
if (TupIsNull(firstSlot))
{
econtext->ecxt_scantuple = firstSlot;
/*
- * Check the qual (HAVING clause); if the group does not match,
- * ignore it and loop back to try to process another group.
+ * Check the qual (HAVING clause); if the group does not match, ignore
+ * it and loop back to try to process another group.
*/
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the aggregate
- * results and the representative input tuple. Note we do not
- * support aggregates returning sets ...
+ * Form and return a projection tuple using the aggregate results
+ * and the representative input tuple. Note we do not support
+ * aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
tmpcontext = aggstate->tmpcontext;
/*
- * Process each outer-plan tuple, and then fetch the next one, until
- * we exhaust the outer plan.
+ * Process each outer-plan tuple, and then fetch the next one, until we
+ * exhaust the outer plan.
*/
for (;;)
{
ResetExprContext(econtext);
/*
- * Store the copied first input tuple in the tuple table slot
- * reserved for it, so that it can be used in ExecProject.
+ * Store the copied first input tuple in the tuple table slot reserved
+ * for it, so that it can be used in ExecProject.
*/
ExecStoreTuple(entry->shared.firstTuple,
firstSlot,
econtext->ecxt_scantuple = firstSlot;
/*
- * Check the qual (HAVING clause); if the group does not match,
- * ignore it and loop back to try to process another group.
+ * Check the qual (HAVING clause); if the group does not match, ignore
+ * it and loop back to try to process another group.
*/
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the aggregate
- * results and the representative input tuple. Note we do not
- * support aggregates returning sets ...
+ * Form and return a projection tuple using the aggregate results
+ * and the representative input tuple. Note we do not support
+ * aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
/*
* Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a
- * little by using ExecAssignExprContext() to build both.
+ * processing and one for per-output-tuple processing. We cheat a little
+ * by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &aggstate->ss.ps);
aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
/*
* We also need a long-lived memory context for holding hashtable data
- * structures and transition values. NOTE: the details of what is
- * stored in aggcontext and what is stored in the regular per-query
- * memory context are driven by a simple decision: we want to reset
- * the aggcontext in ExecReScanAgg to recover no-longer-wanted space.
+ * structures and transition values. NOTE: the details of what is stored
+ * in aggcontext and what is stored in the regular per-query memory
+ * context are driven by a simple decision: we want to reset the
+ * aggcontext in ExecReScanAgg to recover no-longer-wanted space.
*/
aggstate->aggcontext =
AllocSetContextCreate(CurrentMemoryContext,
* initialize child expressions
*
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
- * contain other agg calls in their arguments. This would make no
- * sense under SQL semantics anyway (and it's forbidden by the spec).
- * Because that is true, we don't need to worry about evaluating the
- * aggs in any particular order.
+ * contain other agg calls in their arguments. This would make no sense
+ * under SQL semantics anyway (and it's forbidden by the spec). Because
+ * that is true, we don't need to worry about evaluating the aggs in any
+ * particular order.
*/
aggstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->plan.targetlist,
if (numaggs <= 0)
{
/*
- * This is not an error condition: we might be using the Agg node
- * just to do hash-based grouping. Even in the regular case,
- * constant-expression simplification could optimize away all of
- * the Aggrefs in the targetlist and qual. So keep going, but
- * force local copy of numaggs positive so that palloc()s below
- * don't choke.
+ * This is not an error condition: we might be using the Agg node just
+ * to do hash-based grouping. Even in the regular case,
+ * constant-expression simplification could optimize away all of the
+ * Aggrefs in the targetlist and qual. So keep going, but force local
+ * copy of numaggs positive so that palloc()s below don't choke.
*/
numaggs = 1;
}
/*
- * If we are grouping, precompute fmgr lookup data for inner loop. We
- * need both equality and hashing functions to do it by hashing, but
- * only equality if not hashing.
+ * If we are grouping, precompute fmgr lookup data for inner loop. We need
+ * both equality and hashing functions to do it by hashing, but only
+ * equality if not hashing.
*/
if (node->numCols > 0)
{
}
/*
- * Set up aggregate-result storage in the output expr context, and
- * also allocate my private per-agg working storage
+ * Set up aggregate-result storage in the output expr context, and also
+ * allocate my private per-agg working storage
*/
econtext = aggstate->ss.ps.ps_ExprContext;
econtext->ecxt_aggvalues = (Datum *) palloc0(sizeof(Datum) * numaggs);
/*
* Perform lookups of aggregate function info, and initialize the
* unchanging fields of the per-agg data. We also detect duplicate
- * aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0").
- * When duplicates are detected, we only make an AggStatePerAgg struct
- * for the first one. The clones are simply pointed at the same
- * result entry by giving them duplicate aggno values.
+ * aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0"). When
+ * duplicates are detected, we only make an AggStatePerAgg struct for the
+ * first one. The clones are simply pointed at the same result entry by
+ * giving them duplicate aggno values.
*/
aggno = -1;
foreach(l, aggstate->aggs)
peraggstate->aggref = aggref;
/*
- * Get actual datatype of the input. We need this because it may
- * be different from the agg's declared input type, when the agg
- * accepts ANY (eg, COUNT(*)) or ANYARRAY or ANYELEMENT.
+ * Get actual datatype of the input. We need this because it may be
+ * different from the agg's declared input type, when the agg accepts
+ * ANY (eg, COUNT(*)) or ANYARRAY or ANYELEMENT.
*/
inputType = exprType((Node *) aggref->target);
/* Check that aggregate owner has permission to call component fns */
{
HeapTuple procTuple;
- Oid aggOwner;
+ Oid aggOwner;
procTuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(aggref->aggfnoid),
&peraggstate->transtypeByVal);
/*
- * initval is potentially null, so don't try to access it as a
- * struct field. Must do it the hard way with SysCacheGetAttr.
+ * initval is potentially null, so don't try to access it as a struct
+ * field. Must do it the hard way with SysCacheGetAttr.
*/
textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
Anum_pg_aggregate_agginitval,
aggtranstype);
/*
- * If the transfn is strict and the initval is NULL, make sure
- * input type and transtype are the same (or at least binary-
- * compatible), so that it's OK to use the first input value as
- * the initial transValue. This should have been checked at agg
- * definition time, but just in case...
+ * If the transfn is strict and the initval is NULL, make sure input
+ * type and transtype are the same (or at least binary- compatible),
+ * so that it's OK to use the first input value as the initial
+ * transValue. This should have been checked at agg definition time,
+ * but just in case...
*/
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
{
if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED)
{
/*
- * In the hashed case, if we haven't yet built the hash table then
- * we can just return; nothing done yet, so nothing to undo. If
- * subnode's chgParam is not NULL then it will be re-scanned by
- * ExecProcNode, else no reason to re-scan it at all.
+ * In the hashed case, if we haven't yet built the hash table then we
+ * can just return; nothing done yet, so nothing to undo. If subnode's
+ * chgParam is not NULL then it will be re-scanned by ExecProcNode,
+ * else no reason to re-scan it at all.
*/
if (!node->table_filled)
return;
/*
* If we do have the hash table and the subplan does not have any
- * parameter changes, then we can just rescan the existing hash
- * table; no need to build it again.
+ * parameter changes, then we can just rescan the existing hash table;
+ * no need to build it again.
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
{
else
{
/*
- * Reset the per-group state (in particular, mark transvalues
- * null)
+ * Reset the per-group state (in particular, mark transvalues null)
*/
MemSet(node->pergroup, 0,
sizeof(AggStatePerGroupData) * node->numaggs);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.64 2005/05/22 22:30:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.65 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (whichplan < appendstate->as_firstplan)
{
/*
- * if scanning in reverse, we start at the last scan in the list
- * and then proceed back to the first.. in any case we inform
- * ExecAppend that we are at the end of the line by returning
- * FALSE
+ * if scanning in reverse, we start at the last scan in the list and
+ * then proceed back to the first.. in any case we inform ExecAppend
+ * that we are at the end of the line by returning FALSE
*/
appendstate->as_whichplan = appendstate->as_firstplan;
return FALSE;
else if (whichplan > appendstate->as_lastplan)
{
/*
- * as above, end the scan if we go beyond the last scan in our
- * list..
+ * as above, end the scan if we go beyond the last scan in our list..
*/
appendstate->as_whichplan = appendstate->as_lastplan;
return FALSE;
/*
* initialize the scan
*
- * If we are controlling the target relation, select the proper
- * active ResultRelInfo and junk filter for this target.
+ * If we are controlling the target relation, select the proper active
+ * ResultRelInfo and junk filter for this target.
*/
if (((Append *) appendstate->ps.plan)->isTarget)
{
appendstate->as_nplans = nplans;
/*
- * Do we want to scan just one subplan? (Special case for
- * EvalPlanQual) XXX pretty dirty way of determining that this case
- * applies ...
+ * Do we want to scan just one subplan? (Special case for EvalPlanQual)
+ * XXX pretty dirty way of determining that this case applies ...
*/
if (node->isTarget && estate->es_evTuple != NULL)
{
#define APPEND_NSLOTS 1
/*
- * append nodes still have Result slots, which hold pointers to
- * tuples, so we have to initialize them.
+ * append nodes still have Result slots, which hold pointers to tuples, so
+ * we have to initialize them.
*/
ExecInitResultTupleSlot(estate, &appendstate->ps);
}
/*
- * Initialize tuple type. (Note: in an inherited UPDATE situation,
- * the tuple type computed here corresponds to the parent table, which
- * is really a lie since tuples returned from child subplans will not
- * all look the same.)
+ * Initialize tuple type. (Note: in an inherited UPDATE situation, the
+ * tuple type computed here corresponds to the parent table, which is
+ * really a lie since tuples returned from child subplans will not all
+ * look the same.)
*/
ExecAssignResultTypeFromTL(&appendstate->ps);
appendstate->ps.ps_ProjInfo = NULL;
if (!TupIsNull(result))
{
/*
- * If the subplan gave us something then return it as-is.
- * We do NOT make use of the result slot that was set up in
- * ExecInitAppend, first because there's no reason to and
- * second because it may have the wrong tuple descriptor in
+ * If the subplan gave us something then return it as-is. We do
+ * NOT make use of the result slot that was set up in
+ * ExecInitAppend, first because there's no reason to and second
+ * because it may have the wrong tuple descriptor in
* inherited-UPDATE cases.
*/
return result;
}
/*
- * Go on to the "next" subplan in the appropriate direction.
- * If no more subplans, return the empty slot set up for us
- * by ExecInitAppend.
+ * Go on to the "next" subplan in the appropriate direction. If no
+ * more subplans, return the empty slot set up for us by
+ * ExecInitAppend.
*/
if (ScanDirectionIsForward(node->ps.state->es_direction))
node->as_whichplan++;
UpdateChangedParamSet(subnode, node->ps.chgParam);
/*
- * if chgParam of subnode is not null then plan will be re-scanned
- * by first ExecProcNode.
+ * if chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode.
*/
if (subnode->chgParam == NULL)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapAnd.c,v 1.3 2005/08/28 22:47:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapAnd.c,v 1.4 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* ExecInitBitmapAnd - initialize the BitmapAnd node
* MultiExecBitmapAnd - retrieve the result bitmap from the node
* ExecEndBitmapAnd - shut down the BitmapAnd node
- * ExecReScanBitmapAnd - rescan the BitmapAnd node
+ * ExecReScanBitmapAnd - rescan the BitmapAnd node
*
* NOTES
* BitmapAnd nodes don't make use of their left and right
elog(ERROR, "unrecognized result from subplan");
if (result == NULL)
- result = subresult; /* first subplan */
+ result = subresult; /* first subplan */
else
{
tbm_intersect(result, subresult);
}
/*
- * If at any stage we have a completely empty bitmap, we can fall
- * out without evaluating the remaining subplans, since ANDing them
- * can no longer change the result. (Note: the fact that indxpath.c
- * orders the subplans by selectivity should make this case more
- * likely to occur.)
+ * If at any stage we have a completely empty bitmap, we can fall out
+ * without evaluating the remaining subplans, since ANDing them can no
+ * longer change the result. (Note: the fact that indxpath.c orders
+ * the subplans by selectivity should make this case more likely to
+ * occur.)
*/
if (tbm_is_empty(result))
break;
/* must provide our own instrumentation support */
if (node->ps.instrument)
- InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */);
+ InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */ );
return (Node *) result;
}
*
* NOTE: it is critical that this plan type only be used with MVCC-compliant
* snapshots (ie, regular snapshots, not SnapshotNow or one of the other
- * special snapshots). The reason is that since index and heap scans are
+ * special snapshots). The reason is that since index and heap scans are
* decoupled, there can be no assurance that the index tuple prompting a
* visit to a particular heap TID still exists when the visit is made.
* Therefore the tuple might not exist anymore either (which is OK because
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.3 2005/10/06 02:29:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.4 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
tbmres = node->tbmres;
/*
- * Clear any reference to the previously returned tuple. The idea
- * here is to not have the tuple slot be the last holder of a pin on
- * that tuple's buffer; if it is, we'll need a separate visit to the
- * bufmgr to release the buffer. By clearing here, we get to have the
- * release done by ReleaseAndReadBuffer, below.
+ * Clear any reference to the previously returned tuple. The idea here is
+ * to not have the tuple slot be the last holder of a pin on that tuple's
+ * buffer; if it is, we'll need a separate visit to the bufmgr to release
+ * the buffer. By clearing here, we get to have the release done by
+ * ReleaseAndReadBuffer, below.
*/
ExecClearTuple(slot);
ResetExprContext(econtext);
if (!ExecQual(node->bitmapqualorig, econtext, false))
- ExecClearTuple(slot); /* would not be returned by scan */
+ ExecClearTuple(slot); /* would not be returned by scan */
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[scanrelid - 1] = true;
}
/*
- * If we haven't yet performed the underlying index scan, do it,
- * and prepare the bitmap to be iterated over.
+ * If we haven't yet performed the underlying index scan, do it, and
+ * prepare the bitmap to be iterated over.
*/
if (tbm == NULL)
{
}
/*
- * Ignore any claimed entries past what we think is the end of
- * the relation. (This is probably not necessary given that we
- * got AccessShareLock before performing any of the indexscans,
- * but let's be safe.)
+ * Ignore any claimed entries past what we think is the end of the
+ * relation. (This is probably not necessary given that we got
+ * AccessShareLock before performing any of the indexscans, but
+ * let's be safe.)
*/
if (tbmres->blockno >= scandesc->rs_nblocks)
{
}
/*
- * Acquire pin on the current heap page. We'll hold the pin
- * until done looking at the page. We trade in any pin we
- * held before.
+ * Acquire pin on the current heap page. We'll hold the pin until
+ * done looking at the page. We trade in any pin we held before.
*/
scandesc->rs_cbuf = ReleaseAndReadBuffer(scandesc->rs_cbuf,
scandesc->rs_rd,
tbmres->blockno);
/*
- * Determine how many entries we need to look at on this page.
- * If the bitmap is lossy then we need to look at each physical
- * item pointer; otherwise we just look through the offsets
- * listed in tbmres.
+ * Determine how many entries we need to look at on this page. If
+ * the bitmap is lossy then we need to look at each physical item
+ * pointer; otherwise we just look through the offsets listed in
+ * tbmres.
*/
if (tbmres->ntuples >= 0)
{
else
{
/* lossy case */
- Page dp;
+ Page dp;
LockBuffer(scandesc->rs_cbuf, BUFFER_LOCK_SHARE);
dp = (Page) BufferGetPage(scandesc->rs_cbuf);
ItemPointerSet(&scandesc->rs_ctup.t_self, tbmres->blockno, targoffset);
/*
- * Fetch the heap tuple and see if it matches the snapshot.
- * We use heap_release_fetch to avoid useless bufmgr traffic.
+ * Fetch the heap tuple and see if it matches the snapshot. We use
+ * heap_release_fetch to avoid useless bufmgr traffic.
*/
if (heap_release_fetch(scandesc->rs_rd,
scandesc->rs_snapshot,
&scandesc->rs_pgstat_info))
{
/*
- * Set up the result slot to point to this tuple.
- * Note that the slot acquires a pin on the buffer.
+ * Set up the result slot to point to this tuple. Note that the
+ * slot acquires a pin on the buffer.
*/
ExecStoreTuple(&scandesc->rs_ctup,
slot,
node->tbmres = NULL;
/*
- * Always rescan the input immediately, to ensure we can pass down
- * any outer tuple that might be used in index quals.
+ * Always rescan the input immediately, to ensure we can pass down any
+ * outer tuple that might be used in index quals.
*/
ExecReScan(outerPlanState(node), exprCtxt);
}
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
- * ExecInitBitmapHeapScan. This lock should be held till end of
- * transaction. (There is a faction that considers this too much
- * locking, however.)
+ * ExecInitBitmapHeapScan. This lock should be held till end of
+ * transaction. (There is a faction that considers this too much locking,
+ * however.)
*/
heap_close(relation, NoLock);
}
scanstate->ss.ss_currentRelation = currentRelation;
/*
- * Even though we aren't going to do a conventional seqscan, it is
- * useful to create a HeapScanDesc --- this checks the relation size
- * and sets up statistical infrastructure for us.
+ * Even though we aren't going to do a conventional seqscan, it is useful
+ * to create a HeapScanDesc --- this checks the relation size and sets up
+ * statistical infrastructure for us.
*/
scanstate->ss.ss_currentScanDesc = heap_beginscan(currentRelation,
estate->es_snapshot,
/*
* One problem is that heap_beginscan counts a "sequential scan" start,
* when we actually aren't doing any such thing. Reverse out the added
- * scan count. (Eventually we may want to count bitmap scans separately.)
+ * scan count. (Eventually we may want to count bitmap scans separately.)
*/
pgstat_discount_heap_scan(&scanstate->ss.ss_currentScanDesc->rs_pgstat_info);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.9 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.10 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
scandesc = node->biss_ScanDesc;
/*
- * If we have runtime keys and they've not already been set up, do it
- * now.
+ * If we have runtime keys and they've not already been set up, do it now.
*/
if (node->biss_RuntimeKeyInfo && !node->biss_RuntimeKeysReady)
ExecReScan((PlanState *) node, NULL);
/*
* Prepare the result bitmap. Normally we just create a new one to pass
- * back; however, our parent node is allowed to store a pre-made one
- * into node->biss_result, in which case we just OR our tuple IDs into
- * the existing bitmap. (This saves needing explicit UNION steps.)
+ * back; however, our parent node is allowed to store a pre-made one into
+ * node->biss_result, in which case we just OR our tuple IDs into the
+ * existing bitmap. (This saves needing explicit UNION steps.)
*/
if (node->biss_result)
{
*/
for (;;)
{
- bool more = index_getmulti(scandesc, tids, MAX_TIDS, &ntids);
+ bool more = index_getmulti(scandesc, tids, MAX_TIDS, &ntids);
if (ntids > 0)
{
ExprContext *econtext;
ExprState **runtimeKeyInfo;
- econtext = node->biss_RuntimeContext; /* context for runtime
- * keys */
+ econtext = node->biss_RuntimeContext; /* context for runtime keys */
runtimeKeyInfo = node->biss_RuntimeKeyInfo;
if (econtext)
econtext->ecxt_outertuple = exprCtxt->ecxt_outertuple;
/*
- * Reset the runtime-key context so we don't leak memory as each
- * outer tuple is scanned. Note this assumes that we will
- * recalculate *all* runtime keys on each call.
+ * Reset the runtime-key context so we don't leak memory as each outer
+ * tuple is scanned. Note this assumes that we will recalculate *all*
+ * runtime keys on each call.
*/
ResetExprContext(econtext);
}
/*
- * If we are doing runtime key calculations (ie, the index keys depend
- * on data from an outer scan), compute the new key values
+ * If we are doing runtime key calculations (ie, the index keys depend on
+ * data from an outer scan), compute the new key values
*/
if (runtimeKeyInfo)
{
/*
* Miscellaneous initialization
*
- * We do not need a standard exprcontext for this node, though we may
- * decide below to create a runtime-key exprcontext
+ * We do not need a standard exprcontext for this node, though we may decide
+ * below to create a runtime-key exprcontext
*/
/*
indexstate->biss_NumScanKeys = numScanKeys;
/*
- * If we have runtime keys, we need an ExprContext to evaluate them.
- * We could just create a "standard" plan node exprcontext, but to
- * keep the code looking similar to nodeIndexscan.c, it seems better
- * to stick with the approach of using a separate ExprContext.
+ * If we have runtime keys, we need an ExprContext to evaluate them. We
+ * could just create a "standard" plan node exprcontext, but to keep the
+ * code looking similar to nodeIndexscan.c, it seems better to stick with
+ * the approach of using a separate ExprContext.
*/
if (have_runtime_keys)
{
/*
* We do not open or lock the base relation here. We assume that an
- * ancestor BitmapHeapScan node is holding AccessShareLock on the
- * heap relation throughout the execution of the plan tree.
+ * ancestor BitmapHeapScan node is holding AccessShareLock on the heap
+ * relation throughout the execution of the plan tree.
*/
indexstate->ss.ss_currentRelation = NULL;
indexstate->ss.ss_currentScanDesc = NULL;
/*
- * open the index relation and initialize relation and scan
- * descriptors. Note we acquire no locks here; the index machinery
- * does its own locks and unlocks.
+ * open the index relation and initialize relation and scan descriptors.
+ * Note we acquire no locks here; the index machinery does its own locks
+ * and unlocks.
*/
indexstate->biss_RelationDesc = index_open(node->indexid);
indexstate->biss_ScanDesc =
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapOr.c,v 1.2 2005/04/20 15:48:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapOr.c,v 1.3 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
TIDBitmap *subresult;
/*
- * We can special-case BitmapIndexScan children to avoid an
- * explicit tbm_union step for each child: just pass down the
- * current result bitmap and let the child OR directly into it.
+ * We can special-case BitmapIndexScan children to avoid an explicit
+ * tbm_union step for each child: just pass down the current result
+ * bitmap and let the child OR directly into it.
*/
if (IsA(subnode, BitmapIndexScanState))
{
- if (result == NULL) /* first subplan */
+ if (result == NULL) /* first subplan */
{
/* XXX should we use less than work_mem for this? */
result = tbm_create(work_mem * 1024L);
elog(ERROR, "unrecognized result from subplan");
if (result == NULL)
- result = subresult; /* first subplan */
+ result = subresult; /* first subplan */
else
{
tbm_union(result, subresult);
/* must provide our own instrumentation support */
if (node->ps.instrument)
- InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */);
+ InstrStopNodeMulti(node->ps.instrument, 0 /* XXX */ );
return (Node *) result;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.34 2005/05/22 22:30:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeFunctionscan.c,v 1.35 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
tuplestorestate = node->tuplestorestate;
/*
- * If first time through, read all tuples from function and put them
- * in a tuplestore. Subsequent calls just fetch tuples from
- * tuplestore.
+ * If first time through, read all tuples from function and put them in a
+ * tuplestore. Subsequent calls just fetch tuples from tuplestore.
*/
if (tuplestorestate == NULL)
{
/*
* If function provided a tupdesc, cross-check it. We only really
- * need to do this for functions returning RECORD, but might as
- * well do it always.
+ * need to do this for functions returning RECORD, but might as well
+ * do it always.
*/
- if (funcTupdesc)
+ if (funcTupdesc)
tupledesc_match(node->tupdesc, funcTupdesc);
}
Assert(rte->rtekind == RTE_FUNCTION);
/*
- * Now determine if the function returns a simple or composite type,
- * and build an appropriate tupdesc.
+ * Now determine if the function returns a simple or composite type, and
+ * build an appropriate tupdesc.
*/
functypclass = get_expr_result_type(rte->funcexpr,
&funcrettype,
/*
* For RECORD results, make sure a typmod has been assigned. (The
- * function should do this for itself, but let's cover things in case
- * it doesn't.)
+ * function should do this for itself, but let's cover things in case it
+ * doesn't.)
*/
BlessTupleDesc(tupdesc);
return;
/*
- * Here we have a choice whether to drop the tuplestore (and recompute
- * the function outputs) or just rescan it. This should depend on
- * whether the function expression contains parameters and/or is
- * marked volatile. FIXME soon.
+ * Here we have a choice whether to drop the tuplestore (and recompute the
+ * function outputs) or just rescan it. This should depend on whether the
+ * function expression contains parameters and/or is marked volatile.
+ * FIXME soon.
*/
if (node->ss.ps.chgParam != NULL)
{
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function return row and query-specified return row do not match"),
- errdetail("Returned type %s at ordinal position %d, but query expects %s.",
+ errdetail("Returned type %s at ordinal position %d, but query expects %s.",
format_type_be(sattr->atttypid),
i + 1,
format_type_be(dattr->atttypid))));
* locate group boundaries.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeGroup.c,v 1.61 2005/03/16 21:38:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeGroup.c,v 1.62 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
/*
- * If first time through, acquire first input tuple and determine
- * whether to return it or not.
+ * If first time through, acquire first input tuple and determine whether
+ * to return it or not.
*/
if (TupIsNull(firsttupleslot))
{
/* Copy tuple, set up as input for qual test and projection */
ExecCopySlot(firsttupleslot, outerslot);
econtext->ecxt_scantuple = firsttupleslot;
+
/*
- * Check the qual (HAVING clause); if the group does not match,
- * ignore it and fall into scan loop.
+ * Check the qual (HAVING clause); if the group does not match, ignore
+ * it and fall into scan loop.
*/
if (ExecQual(node->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the first input
- * tuple.
+ * Form and return a projection tuple using the first input tuple.
*/
return ExecProject(node->ss.ps.ps_ProjInfo, NULL);
}
/*
* This loop iterates once per input tuple group. At the head of the
- * loop, we have finished processing the first tuple of the group and
- * now need to scan over all the other group members.
+ * loop, we have finished processing the first tuple of the group and now
+ * need to scan over all the other group members.
*/
for (;;)
{
econtext->ecxt_per_tuple_memory))
break;
}
+
/*
- * We have the first tuple of the next input group. See if we
- * want to return it.
+ * We have the first tuple of the next input group. See if we want to
+ * return it.
*/
/* Copy tuple, set up as input for qual test and projection */
ExecCopySlot(firsttupleslot, outerslot);
econtext->ecxt_scantuple = firsttupleslot;
+
/*
- * Check the qual (HAVING clause); if the group does not match,
- * ignore it and loop back to scan the rest of the group.
+ * Check the qual (HAVING clause); if the group does not match, ignore
+ * it and loop back to scan the rest of the group.
*/
if (ExecQual(node->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the first input
- * tuple.
+ * Form and return a projection tuple using the first input tuple.
*/
return ExecProject(node->ss.ps.ps_ProjInfo, NULL);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.95 2005/09/25 19:37:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
InstrStopNodeMulti(node->ps.instrument, hashtable->totalTuples);
/*
- * We do not return the hash table directly because it's not a subtype
- * of Node, and so would violate the MultiExecProcNode API. Instead,
- * our parent Hashjoin node is expected to know how to fish it out
- * of our node state. Ugly but not really worth cleaning up, since
- * Hashjoin knows quite a bit more about Hash besides that.
+ * We do not return the hash table directly because it's not a subtype of
+ * Node, and so would violate the MultiExecProcNode API. Instead, our
+ * parent Hashjoin node is expected to know how to fish it out of our node
+ * state. Ugly but not really worth cleaning up, since Hashjoin knows
+ * quite a bit more about Hash besides that.
*/
return NULL;
}
outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate);
/*
- * initialize tuple type. no need to initialize projection info
- * because this node doesn't do projections
+ * initialize tuple type. no need to initialize projection info because
+ * this node doesn't do projections
*/
ExecAssignResultTypeFromOuterPlan(&hashstate->ps);
hashstate->ps.ps_ProjInfo = NULL;
MemoryContext oldcxt;
/*
- * Get information about the size of the relation to be hashed (it's
- * the "outer" subtree of this node, but the inner relation of the
- * hashjoin). Compute the appropriate size of the hash table.
+ * Get information about the size of the relation to be hashed (it's the
+ * "outer" subtree of this node, but the inner relation of the hashjoin).
+ * Compute the appropriate size of the hash table.
*/
outerNode = outerPlan(node);
/*
* Initialize the hash table control block.
*
- * The hashtable control block is just palloc'd from the executor's
- * per-query memory context.
+ * The hashtable control block is just palloc'd from the executor's per-query
+ * memory context.
*/
hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
hashtable->nbuckets = nbuckets;
}
/*
- * Create temporary memory contexts in which to keep the hashtable
- * working storage. See notes in executor/hashjoin.h.
+ * Create temporary memory contexts in which to keep the hashtable working
+ * storage. See notes in executor/hashjoin.h.
*/
hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
"HashTableContext",
ntuples = 1000.0;
/*
- * Estimate tupsize based on footprint of tuple in hashtable... note
- * this does not allow for any palloc overhead. The manipulations of
- * spaceUsed don't count palloc overhead either.
+ * Estimate tupsize based on footprint of tuple in hashtable... note this
+ * does not allow for any palloc overhead. The manipulations of spaceUsed
+ * don't count palloc overhead either.
*/
tupsize = MAXALIGN(sizeof(HashJoinTupleData)) +
MAXALIGN(sizeof(HeapTupleHeaderData)) +
if (inner_rel_bytes > hash_table_bytes)
{
/* We'll need multiple batches */
- long lbuckets;
- double dbatch;
- int minbatch;
+ long lbuckets;
+ double dbatch;
+ int minbatch;
lbuckets = (hash_table_bytes / tupsize) / NTUP_PER_BUCKET;
lbuckets = Min(lbuckets, INT_MAX);
nbuckets = (int) lbuckets;
dbatch = ceil(inner_rel_bytes / hash_table_bytes);
- dbatch = Min(dbatch, INT_MAX/2);
+ dbatch = Min(dbatch, INT_MAX / 2);
minbatch = (int) dbatch;
nbatch = 2;
while (nbatch < minbatch)
else
{
/* We expect the hashtable to fit in memory */
- double dbuckets;
+ double dbuckets;
dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
dbuckets = Min(dbuckets, INT_MAX);
* We want nbuckets to be prime so as to avoid having bucket and batch
* numbers depend on only some bits of the hash code. Choose the next
* larger prime from the list in hprimes[]. (This also enforces that
- * nbuckets is not very small, by the simple expedient of not putting
- * any very small entries in hprimes[].)
+ * nbuckets is not very small, by the simple expedient of not putting any
+ * very small entries in hprimes[].)
*/
for (i = 0; i < (int) lengthof(hprimes); i++)
{
return;
/* safety check to avoid overflow */
- if (oldnbatch > INT_MAX/2)
+ if (oldnbatch > INT_MAX / 2)
return;
nbatch = oldnbatch * 2;
hashtable->nbatch = nbatch;
/*
- * Scan through the existing hash table entries and dump out any
- * that are no longer of the current batch.
+ * Scan through the existing hash table entries and dump out any that are
+ * no longer of the current batch.
*/
ninmemory = nfreed = 0;
#endif
/*
- * If we dumped out either all or none of the tuples in the table,
- * disable further expansion of nbatch. This situation implies that
- * we have enough tuples of identical hashvalues to overflow spaceAllowed.
- * Increasing nbatch will not fix it since there's no way to subdivide
- * the group any more finely.
- * We have to just gut it out and hope the server has enough RAM.
+ * If we dumped out either all or none of the tuples in the table, disable
+ * further expansion of nbatch. This situation implies that we have
+ * enough tuples of identical hashvalues to overflow spaceAllowed.
+ * Increasing nbatch will not fix it since there's no way to subdivide the
+ * group any more finely. We have to just gut it out and hope the server
+ * has enough RAM.
*/
if (nfreed == 0 || nfreed == ninmemory)
{
MemoryContext oldContext;
/*
- * We reset the eval context each time to reclaim any memory leaked in
- * the hashkey expressions.
+ * We reset the eval context each time to reclaim any memory leaked in the
+ * hashkey expressions.
*/
ResetExprContext(econtext);
int *bucketno,
int *batchno)
{
- uint32 nbuckets = (uint32) hashtable->nbuckets;
- uint32 nbatch = (uint32) hashtable->nbatch;
+ uint32 nbuckets = (uint32) hashtable->nbuckets;
+ uint32 nbatch = (uint32) hashtable->nbatch;
if (nbatch > 1)
{
uint32 hashvalue = hjstate->hj_CurHashValue;
/*
- * hj_CurTuple is NULL to start scanning a new bucket, or the address
- * of the last tuple returned from the current bucket.
+ * hj_CurTuple is NULL to start scanning a new bucket, or the address of
+ * the last tuple returned from the current bucket.
*/
if (hashTuple == NULL)
hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
int nbuckets = hashtable->nbuckets;
/*
- * Release all the hash buckets and tuples acquired in the prior pass,
- * and reinitialize the context for a new pass.
+ * Release all the hash buckets and tuples acquired in the prior pass, and
+ * reinitialize the context for a new pass.
*/
MemoryContextReset(hashtable->batchCxt);
oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.73 2005/09/25 19:37:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.74 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState *outerNode,
- HashJoinState *hjstate,
- uint32 *hashvalue);
+ HashJoinState *hjstate,
+ uint32 *hashvalue);
static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
uint32 *hashvalue,
econtext = node->js.ps.ps_ExprContext;
/*
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous join
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->js.ps.ps_TupFromTlist)
{
}
/*
- * If we're doing an IN join, we want to return at most one row per
- * outer tuple; so we can stop scanning the inner scan if we matched
- * on the previous try.
+ * If we're doing an IN join, we want to return at most one row per outer
+ * tuple; so we can stop scanning the inner scan if we matched on the
+ * previous try.
*/
if (node->js.jointype == JOIN_IN && node->hj_MatchedOuter)
node->hj_NeedNewOuter = true;
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
{
/*
* If the outer relation is completely empty, we can quit without
- * building the hash table. However, for an inner join it is only
- * a win to check this when the outer relation's startup cost is less
- * than the projected cost of building the hash table. Otherwise
- * it's best to build the hash table first and see if the inner
- * relation is empty. (When it's an outer join, we should always
- * make this check, since we aren't going to be able to skip the
- * join on the strength of an empty inner relation anyway.)
+ * building the hash table. However, for an inner join it is only a
+ * win to check this when the outer relation's startup cost is less
+ * than the projected cost of building the hash table. Otherwise it's
+ * best to build the hash table first and see if the inner relation is
+ * empty. (When it's an outer join, we should always make this check,
+ * since we aren't going to be able to skip the join on the strength
+ * of an empty inner relation anyway.)
*
- * The only way to make the check is to try to fetch a tuple from
- * the outer plan node. If we succeed, we have to stash it away
- * for later consumption by ExecHashJoinOuterGetTuple.
+ * The only way to make the check is to try to fetch a tuple from the
+ * outer plan node. If we succeed, we have to stash it away for later
+ * consumption by ExecHashJoinOuterGetTuple.
*/
if (outerNode->plan->startup_cost < hashNode->ps.plan->total_cost ||
node->js.jointype == JOIN_LEFT)
(void) MultiExecProcNode((PlanState *) hashNode);
/*
- * If the inner relation is completely empty, and we're not doing
- * an outer join, we can quit without scanning the outer relation.
+ * If the inner relation is completely empty, and we're not doing an
+ * outer join, we can quit without scanning the outer relation.
*/
if (hashtable->totalTuples == 0 && node->js.jointype != JOIN_LEFT)
{
node->hj_MatchedOuter = false;
/*
- * now we have an outer tuple, find the corresponding bucket
- * for this tuple from the hash table
+ * now we have an outer tuple, find the corresponding bucket for
+ * this tuple from the hash table
*/
node->hj_CurHashValue = hashvalue;
ExecHashGetBucketAndBatch(hashtable, hashvalue,
node->hj_CurTuple = NULL;
/*
- * Now we've got an outer tuple and the corresponding hash
- * bucket, but this tuple may not belong to the current batch.
+ * Now we've got an outer tuple and the corresponding hash bucket,
+ * but this tuple may not belong to the current batch.
*/
if (batchno != hashtable->curbatch)
{
/*
- * Need to postpone this outer tuple to a later batch.
- * Save it in the corresponding outer-batch file.
+ * Need to postpone this outer tuple to a later batch. Save it
+ * in the corresponding outer-batch file.
*/
Assert(batchno > hashtable->curbatch);
ExecHashJoinSaveTuple(ExecFetchSlotTuple(outerTupleSlot),
hashvalue,
&hashtable->outerBatchFile[batchno]);
node->hj_NeedNewOuter = true;
- continue; /* loop around for a new outer tuple */
+ continue; /* loop around for a new outer tuple */
}
}
/*
* if we pass the qual, then save state for next call and have
- * ExecProject form the projection, store it in the tuple
- * table, and return the slot.
+ * ExecProject form the projection, store it in the tuple table,
+ * and return the slot.
*
- * Only the joinquals determine MatchedOuter status, but all
- * quals must pass to actually return the tuple.
+ * Only the joinquals determine MatchedOuter status, but all quals
+ * must pass to actually return the tuple.
*/
if (joinqual == NIL || ExecQual(joinqual, econtext, false))
{
}
/*
- * If we didn't return a tuple, may need to set
- * NeedNewOuter
+ * If we didn't return a tuple, may need to set NeedNewOuter
*/
if (node->js.jointype == JOIN_IN)
{
/*
* Now the current outer tuple has run out of matches, so check
- * whether to emit a dummy outer-join tuple. If not, loop around
- * to get a new outer tuple.
+ * whether to emit a dummy outer-join tuple. If not, loop around to
+ * get a new outer tuple.
*/
node->hj_NeedNewOuter = true;
node->js.jointype == JOIN_LEFT)
{
/*
- * We are doing an outer join and there were no join matches
- * for this outer tuple. Generate a fake join tuple with
- * nulls for the inner tuple, and return it if it passes the
- * non-join quals.
+ * We are doing an outer join and there were no join matches for
+ * this outer tuple. Generate a fake join tuple with nulls for
+ * the inner tuple, and return it if it passes the non-join quals.
*/
econtext->ecxt_innertuple = node->hj_NullInnerTupleSlot;
if (ExecQual(otherqual, econtext, false))
{
/*
- * qualification was satisfied so we project and return
- * the slot containing the result tuple using
- * ExecProject().
+ * qualification was satisfied so we project and return the
+ * slot containing the result tuple using ExecProject().
*/
TupleTableSlot *result;
case JOIN_LEFT:
hjstate->hj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(hjstate)));
+ ExecGetResultType(innerPlanState(hjstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
}
/*
- * now for some voodoo. our temporary tuple slot is actually the
- * result tuple slot of the Hash node (which is our inner plan). we
- * do this because Hash nodes don't return tuples via ExecProcNode()
- * -- instead the hash join node uses ExecScanHashBucket() to get at
- * the contents of the hash table. -cim 6/9/91
+ * now for some voodoo. our temporary tuple slot is actually the result
+ * tuple slot of the Hash node (which is our inner plan). we do this
+ * because Hash nodes don't return tuples via ExecProcNode() -- instead
+ * the hash join node uses ExecScanHashBucket() to get at the contents of
+ * the hash table. -cim 6/9/91
*/
{
HashState *hashstate = (HashState *) innerPlanState(hjstate);
hjstate->hj_CurTuple = NULL;
/*
- * Deconstruct the hash clauses into outer and inner argument values,
- * so that we can evaluate those subexpressions separately. Also make
- * a list of the hash operator OIDs, in preparation for looking up the
- * hash functions to use.
+ * Deconstruct the hash clauses into outer and inner argument values, so
+ * that we can evaluate those subexpressions separately. Also make a list
+ * of the hash operator OIDs, in preparation for looking up the hash
+ * functions to use.
*/
lclauses = NIL;
rclauses = NIL;
if (curbatch == 0)
{ /* if it is the first pass */
+
/*
* Check to see if first outer tuple was already fetched by
* ExecHashJoin() and not used yet.
}
/*
- * We have just reached the end of the first pass. Try to switch
- * to a saved batch.
+ * We have just reached the end of the first pass. Try to switch to a
+ * saved batch.
*/
curbatch = ExecHashJoinNewBatch(hjstate);
}
/*
- * Try to read from a temp file. Loop allows us to advance to new
- * batches as needed. NOTE: nbatch could increase inside
- * ExecHashJoinNewBatch, so don't try to optimize this loop.
+ * Try to read from a temp file. Loop allows us to advance to new batches
+ * as needed. NOTE: nbatch could increase inside ExecHashJoinNewBatch, so
+ * don't try to optimize this loop.
*/
while (curbatch < hashtable->nbatch)
{
* sides. We can sometimes skip over batches that are empty on only one
* side, but there are exceptions:
*
- * 1. In a LEFT JOIN, we have to process outer batches even if the
- * inner batch is empty.
+ * 1. In a LEFT JOIN, we have to process outer batches even if the inner
+ * batch is empty.
*
- * 2. If we have increased nbatch since the initial estimate, we have
- * to scan inner batches since they might contain tuples that need to
- * be reassigned to later inner batches.
+ * 2. If we have increased nbatch since the initial estimate, we have to scan
+ * inner batches since they might contain tuples that need to be
+ * reassigned to later inner batches.
*
- * 3. Similarly, if we have increased nbatch since starting the outer
- * scan, we have to rescan outer batches in case they contain tuples
- * that need to be reassigned.
+ * 3. Similarly, if we have increased nbatch since starting the outer scan,
+ * we have to rescan outer batches in case they contain tuples that need
+ * to be reassigned.
*/
curbatch++;
while (curbatch < nbatch &&
if (BufFileSeek(innerFile, 0, 0L, SEEK_SET))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not rewind hash-join temporary file: %m")));
+ errmsg("could not rewind hash-join temporary file: %m")));
while ((slot = ExecHashJoinGetSavedTuple(hjstate,
innerFile,
hjstate->hj_HashTupleSlot)))
{
/*
- * NOTE: some tuples may be sent to future batches. Also,
- * it is possible for hashtable->nbatch to be increased here!
+ * NOTE: some tuples may be sent to future batches. Also, it is
+ * possible for hashtable->nbatch to be increased here!
*/
ExecHashTableInsert(hashtable,
ExecFetchSlotTuple(slot),
ExecHashJoinSaveTuple(HeapTuple heapTuple, uint32 hashvalue,
BufFile **fileptr)
{
- BufFile *file = *fileptr;
+ BufFile *file = *fileptr;
size_t written;
if (file == NULL)
/*
* ExecHashJoinGetSavedTuple
- * read the next tuple from a batch file. Return NULL if no more.
+ * read the next tuple from a batch file. Return NULL if no more.
*
* On success, *hashvalue is set to the tuple's hash value, and the tuple
* itself is stored in the given slot.
ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
{
/*
- * If we haven't yet built the hash table then we can just return;
- * nothing done yet, so nothing to undo.
+ * If we haven't yet built the hash table then we can just return; nothing
+ * done yet, so nothing to undo.
*/
if (node->hj_HashTable == NULL)
return;
/*
- * In a multi-batch join, we currently have to do rescans the hard
- * way, primarily because batch temp files may have already been
- * released. But if it's a single-batch join, and there is no
- * parameter change for the inner subnode, then we can just re-use the
- * existing hash table without rebuilding it.
+ * In a multi-batch join, we currently have to do rescans the hard way,
+ * primarily because batch temp files may have already been released. But
+ * if it's a single-batch join, and there is no parameter change for the
+ * inner subnode, then we can just re-use the existing hash table without
+ * rebuilding it.
*/
if (node->hj_HashTable->nbatch == 1 &&
((PlanState *) node)->righttree->chgParam == NULL)
node->hj_FirstOuterTupleSlot = NULL;
/*
- * if chgParam of subnode is not null then plan will be re-scanned
- * by first ExecProcNode.
+ * if chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode.
*/
if (((PlanState *) node)->righttree->chgParam == NULL)
ExecReScan(((PlanState *) node)->righttree, exprCtxt);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.103 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.104 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid;
/*
- * Clear any reference to the previously returned tuple. The idea
- * here is to not have the tuple slot be the last holder of a pin on
- * that tuple's buffer; if it is, we'll need a separate visit to the
- * bufmgr to release the buffer. By clearing here, we get to have the
- * release done by ReleaseAndReadBuffer inside index_getnext.
+ * Clear any reference to the previously returned tuple. The idea here is
+ * to not have the tuple slot be the last holder of a pin on that tuple's
+ * buffer; if it is, we'll need a separate visit to the bufmgr to release
+ * the buffer. By clearing here, we get to have the release done by
+ * ReleaseAndReadBuffer inside index_getnext.
*/
ExecClearTuple(slot);
ResetExprContext(econtext);
if (!ExecQual(node->indexqualorig, econtext, false))
- ExecClearTuple(slot); /* would not be returned by scan */
+ ExecClearTuple(slot); /* would not be returned by scan */
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[scanrelid - 1] = true;
if ((tuple = index_getnext(scandesc, direction)) != NULL)
{
/*
- * Store the scanned tuple in the scan tuple slot of the scan
- * state. Note: we pass 'false' because tuples returned by
- * amgetnext are pointers onto disk pages and must not be
- * pfree()'d.
+ * Store the scanned tuple in the scan tuple slot of the scan state.
+ * Note: we pass 'false' because tuples returned by amgetnext are
+ * pointers onto disk pages and must not be pfree()'d.
*/
- ExecStoreTuple(tuple, /* tuple to store */
- slot, /* slot to store in */
- scandesc->xs_cbuf, /* buffer containing tuple */
- false); /* don't pfree */
+ ExecStoreTuple(tuple, /* tuple to store */
+ slot, /* slot to store in */
+ scandesc->xs_cbuf, /* buffer containing tuple */
+ false); /* don't pfree */
return slot;
}
/*
- * if we get here it means the index scan failed so we are at the end
- * of the scan..
+ * if we get here it means the index scan failed so we are at the end of
+ * the scan..
*/
return ExecClearTuple(slot);
}
ExecIndexScan(IndexScanState *node)
{
/*
- * If we have runtime keys and they've not already been set up, do it
- * now.
+ * If we have runtime keys and they've not already been set up, do it now.
*/
if (node->iss_RuntimeKeyInfo && !node->iss_RuntimeKeysReady)
ExecReScan((PlanState *) node, NULL);
Index scanrelid;
estate = node->ss.ps.state;
- econtext = node->iss_RuntimeContext; /* context for runtime
- * keys */
+ econtext = node->iss_RuntimeContext; /* context for runtime keys */
scanKeys = node->iss_ScanKeys;
runtimeKeyInfo = node->iss_RuntimeKeyInfo;
numScanKeys = node->iss_NumScanKeys;
}
/*
- * Reset the runtime-key context so we don't leak memory as each
- * outer tuple is scanned. Note this assumes that we will
- * recalculate *all* runtime keys on each call.
+ * Reset the runtime-key context so we don't leak memory as each outer
+ * tuple is scanned. Note this assumes that we will recalculate *all*
+ * runtime keys on each call.
*/
ResetExprContext(econtext);
}
/*
- * If we are doing runtime key calculations (ie, the index keys depend
- * on data from an outer scan), compute the new key values
+ * If we are doing runtime key calculations (ie, the index keys depend on
+ * data from an outer scan), compute the new key values
*/
if (runtimeKeyInfo)
{
for (j = 0; j < n_keys; j++)
{
/*
- * If we have a run-time key, then extract the run-time
- * expression and evaluate it with respect to the current
- * outer tuple. We then stick the result into the scan key.
+ * If we have a run-time key, then extract the run-time expression and
+ * evaluate it with respect to the current outer tuple. We then stick
+ * the result into the scan key.
*
- * Note: the result of the eval could be a pass-by-ref value
- * that's stored in the outer scan's tuple, not in
- * econtext->ecxt_per_tuple_memory. We assume that the
- * outer tuple will stay put throughout our scan. If this
- * is wrong, we could copy the result into our context
- * explicitly, but I think that's not necessary...
+ * Note: the result of the eval could be a pass-by-ref value that's
+ * stored in the outer scan's tuple, not in
+ * econtext->ecxt_per_tuple_memory. We assume that the outer tuple
+ * will stay put throughout our scan. If this is wrong, we could copy
+ * the result into our context explicitly, but I think that's not
+ * necessary...
*/
if (run_keys[j] != NULL)
{
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
- * ExecInitIndexScan. This lock should be held till end of
- * transaction. (There is a faction that considers this too much
- * locking, however.)
+ * ExecInitIndexScan. This lock should be held till end of transaction.
+ * (There is a faction that considers this too much locking, however.)
*/
heap_close(relation, NoLock);
}
* initialize child expressions
*
* Note: we don't initialize all of the indexqual expression, only the
- * sub-parts corresponding to runtime keys (see below). The
- * indexqualorig expression is always initialized even though it will
- * only be used in some uncommon cases --- would be nice to improve
- * that. (Problem is that any SubPlans present in the expression must
- * be found now...)
+ * sub-parts corresponding to runtime keys (see below). The indexqualorig
+ * expression is always initialized even though it will only be used in
+ * some uncommon cases --- would be nice to improve that. (Problem is
+ * that any SubPlans present in the expression must be found now...)
*/
indexstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->scan.plan.targetlist,
indexstate->iss_NumScanKeys = numScanKeys;
/*
- * If we have runtime keys, we need an ExprContext to evaluate them.
- * The node's standard context won't do because we want to reset that
- * context for every tuple. So, build another context just like the
- * other one... -tgl 7/11/00
+ * If we have runtime keys, we need an ExprContext to evaluate them. The
+ * node's standard context won't do because we want to reset that context
+ * for every tuple. So, build another context just like the other one...
+ * -tgl 7/11/00
*/
if (have_runtime_keys)
{
ExecAssignScanType(&indexstate->ss, RelationGetDescr(currentRelation), false);
/*
- * open the index relation and initialize relation and scan
- * descriptors. Note we acquire no locks here; the index machinery
- * does its own locks and unlocks. (We rely on having AccessShareLock
- * on the parent table to ensure the index won't go away!)
+ * open the index relation and initialize relation and scan descriptors.
+ * Note we acquire no locks here; the index machinery does its own locks
+ * and unlocks. (We rely on having AccessShareLock on the parent table to
+ * ensure the index won't go away!)
*/
indexstate->iss_RelationDesc = index_open(node->indexid);
indexstate->iss_ScanDesc = index_beginscan(currentRelation,
(ExprState **) palloc(n_keys * sizeof(ExprState *));
/*
- * for each opclause in the given qual, convert each qual's
- * opclause into a single scan key
+ * for each opclause in the given qual, convert each qual's opclause into
+ * a single scan key
*/
qual_cell = list_head(quals);
strategy_cell = list_head(strategies);
for (j = 0; j < n_keys; j++)
{
- OpExpr *clause; /* one clause of index qual */
- Expr *leftop; /* expr on lhs of operator */
- Expr *rightop; /* expr on rhs ... */
+ OpExpr *clause; /* one clause of index qual */
+ Expr *leftop; /* expr on lhs of operator */
+ Expr *rightop; /* expr on rhs ... */
int flags = 0;
- AttrNumber varattno; /* att number used in scan */
+ AttrNumber varattno; /* att number used in scan */
StrategyNumber strategy; /* op's strategy number */
- Oid subtype; /* op's strategy subtype */
- RegProcedure opfuncid; /* operator proc id used in scan */
- Datum scanvalue; /* value used in scan (if const) */
+ Oid subtype; /* op's strategy subtype */
+ RegProcedure opfuncid; /* operator proc id used in scan */
+ Datum scanvalue; /* value used in scan (if const) */
/*
* extract clause information from the qualification
opfuncid = clause->opfuncid;
/*
- * Here we figure out the contents of the index qual. The
- * usual case is (var op const) which means we form a scan key
- * for the attribute listed in the var node and use the value
- * of the const as comparison data.
+ * Here we figure out the contents of the index qual. The usual case
+ * is (var op const) which means we form a scan key for the attribute
+ * listed in the var node and use the value of the const as comparison
+ * data.
*
- * If we don't have a const node, it means our scan key is a
- * function of information obtained during the execution of
- * the plan, in which case we need to recalculate the index
- * scan key at run time. Hence, we set have_runtime_keys to
- * true and place the appropriate subexpression in run_keys.
- * The corresponding scan key values are recomputed at run
- * time.
+ * If we don't have a const node, it means our scan key is a function of
+ * information obtained during the execution of the plan, in which
+ * case we need to recalculate the index scan key at run time. Hence,
+ * we set have_runtime_keys to true and place the appropriate
+ * subexpression in run_keys. The corresponding scan key values are
+ * recomputed at run time.
*/
run_keys[j] = NULL;
if (IsA(rightop, Const))
{
/*
- * if the rightop is a const node then it means it
- * identifies the value to place in our scan key.
+ * if the rightop is a const node then it means it identifies the
+ * value to place in our scan key.
*/
scanvalue = ((Const *) rightop)->constvalue;
if (((Const *) rightop)->constisnull)
else
{
/*
- * otherwise, the rightop contains an expression evaluable
- * at runtime to figure out the value to place in our scan
- * key.
+ * otherwise, the rightop contains an expression evaluable at
+ * runtime to figure out the value to place in our scan key.
*/
have_runtime_keys = true;
run_keys[j] = ExecInitExpr(rightop, planstate);
*/
ScanKeyEntryInitialize(&scan_keys[j],
flags,
- varattno, /* attribute number to scan */
- strategy, /* op's strategy */
- subtype, /* strategy subtype */
- opfuncid, /* reg proc to use */
- scanvalue); /* constant */
+ varattno, /* attribute number to scan */
+ strategy, /* op's strategy */
+ subtype, /* strategy subtype */
+ opfuncid, /* reg proc to use */
+ scanvalue); /* constant */
}
/* If no runtime keys, get rid of speculatively-allocated array */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.21 2005/03/16 21:38:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.22 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return NULL;
/*
- * First call for this scan, so compute limit/offset. (We
- * can't do this any earlier, because parameters from upper
- * nodes may not be set until now.) This also sets position =
- * 0.
+ * First call for this scan, so compute limit/offset. (We can't do
+ * this any earlier, because parameters from upper nodes may not
+ * be set until now.) This also sets position = 0.
*/
recompute_limits(node);
if (TupIsNull(slot))
{
/*
- * The subplan returns too few tuples for us to
- * produce any output at all.
+ * The subplan returns too few tuples for us to produce
+ * any output at all.
*/
node->lstate = LIMIT_EMPTY;
return NULL;
if (ScanDirectionIsForward(direction))
{
/*
- * Forwards scan, so check for stepping off end of window.
- * If we are at the end of the window, return NULL without
- * advancing the subplan or the position variable; but
- * change the state machine state to record having done
- * so.
+ * Forwards scan, so check for stepping off end of window. If
+ * we are at the end of the window, return NULL without
+ * advancing the subplan or the position variable; but change
+ * the state machine state to record having done so.
*/
if (!node->noCount &&
node->position >= node->offset + node->count)
else
{
/*
- * Backwards scan, so check for stepping off start of
- * window. As above, change only state-machine status if
- * so.
+ * Backwards scan, so check for stepping off start of window.
+ * As above, change only state-machine status if so.
*/
if (node->position <= node->offset + 1)
{
return NULL;
/*
- * Backing up from subplan EOF, so re-fetch previous tuple;
- * there should be one! Note previous tuple must be in
- * window.
+ * Backing up from subplan EOF, so re-fetch previous tuple; there
+ * should be one! Note previous tuple must be in window.
*/
slot = ExecProcNode(outerPlan);
if (TupIsNull(slot))
outerPlanState(limitstate) = ExecInitNode(outerPlan, estate);
/*
- * limit nodes do no projections, so initialize projection info for
- * this node appropriately
+ * limit nodes do no projections, so initialize projection info for this
+ * node appropriately
*/
ExecAssignResultTypeFromOuterPlan(&limitstate->ps);
limitstate->ps.ps_ProjInfo = NULL;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.49 2005/03/16 21:38:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.50 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * If we are not at the end of the tuplestore, or are going backwards,
- * try to fetch a tuple from tuplestore.
+ * If we are not at the end of the tuplestore, or are going backwards, try
+ * to fetch a tuple from tuplestore.
*/
eof_tuplestore = tuplestore_ateof(tuplestorestate);
{
/*
* When reversing direction at tuplestore EOF, the first
- * getheaptuple call will fetch the last-added tuple; but we
- * want to return the one before that, if possible. So do an
- * extra fetch.
+ * getheaptuple call will fetch the last-added tuple; but we want
+ * to return the one before that, if possible. So do an extra
+ * fetch.
*/
heapTuple = tuplestore_getheaptuple(tuplestorestate,
forward,
/*
* If necessary, try to fetch another row from the subplan.
*
- * Note: the eof_underlying state variable exists to short-circuit
- * further subplan calls. It's not optional, unfortunately, because
- * some plan node types are not robust about being called again when
- * they've already returned NULL.
+ * Note: the eof_underlying state variable exists to short-circuit further
+ * subplan calls. It's not optional, unfortunately, because some plan
+ * node types are not robust about being called again when they've already
+ * returned NULL.
*/
if (eof_tuplestore && !node->eof_underlying)
{
TupleTableSlot *outerslot;
/*
- * We can only get here with forward==true, so no need to worry
- * about which direction the subplan will go.
+ * We can only get here with forward==true, so no need to worry about
+ * which direction the subplan will go.
*/
outerNode = outerPlanState(node);
outerslot = ExecProcNode(outerNode);
/*
* Append returned tuple to tuplestore, too. NOTE: because the
- * tuplestore is certainly in EOF state, its read position will
- * move forward over the added tuple. This is what we want.
+ * tuplestore is certainly in EOF state, its read position will move
+ * forward over the added tuple. This is what we want.
*/
tuplestore_puttuple(tuplestorestate, (void *) heapTuple);
}
outerPlanState(matstate) = ExecInitNode(outerPlan, estate);
/*
- * initialize tuple type. no need to initialize projection info
- * because this node doesn't do projections.
+ * initialize tuple type. no need to initialize projection info because
+ * this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan(&matstate->ss.ps);
ExecAssignScanTypeFromOuterPlan(&matstate->ss);
ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
{
/*
- * If we haven't materialized yet, just return. If outerplan' chgParam
- * is not NULL then it will be re-scanned by ExecProcNode, else - no
- * reason to re-scan it at all.
+ * If we haven't materialized yet, just return. If outerplan' chgParam is
+ * not NULL then it will be re-scanned by ExecProcNode, else - no reason
+ * to re-scan it at all.
*/
if (!node->tuplestorestate)
return;
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
/*
- * If subnode is to be rescanned then we forget previous stored
- * results; we have to re-read the subplan and re-store.
+ * If subnode is to be rescanned then we forget previous stored results;
+ * we have to re-read the subplan and re-store.
*
- * Otherwise we can just rewind and rescan the stored output. The state
- * of the subnode does not change.
+ * Otherwise we can just rewind and rescan the stored output. The state of
+ * the subnode does not change.
*/
if (((PlanState *) node)->lefttree->chgParam != NULL)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.74 2005/05/15 21:19:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.75 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* matching tuple and so on.
*
* Therefore, when initializing the merge-join node, we look up the
- * associated sort operators. We assume the planner has seen to it
+ * associated sort operators. We assume the planner has seen to it
* that the inputs are correctly sorted by these operators. Rather
* than directly executing the merge join clauses, we evaluate the
* left and right key expressions separately and then compare the
typedef struct MergeJoinClauseData
{
/* Executable expression trees */
- ExprState *lexpr; /* left-hand (outer) input expression */
- ExprState *rexpr; /* right-hand (inner) input expression */
+ ExprState *lexpr; /* left-hand (outer) input expression */
+ ExprState *rexpr; /* right-hand (inner) input expression */
+
/*
* If we have a current left or right input tuple, the values of the
* expressions are loaded into these fields:
*/
- Datum ldatum; /* current left-hand value */
- Datum rdatum; /* current right-hand value */
- bool lisnull; /* and their isnull flags */
- bool risnull;
+ Datum ldatum; /* current left-hand value */
+ Datum rdatum; /* current right-hand value */
+ bool lisnull; /* and their isnull flags */
+ bool risnull;
+
/*
* Remember whether mergejoin operator is strict (usually it will be).
- * NOTE: if it's not strict, we still assume it cannot return true for
- * one null and one non-null input.
+ * NOTE: if it's not strict, we still assume it cannot return true for one
+ * null and one non-null input.
*/
- bool mergestrict;
+ bool mergestrict;
+
/*
- * The comparison strategy in use, and the lookup info to let us call
- * the needed comparison routines. eqfinfo is the "=" operator itself.
+ * The comparison strategy in use, and the lookup info to let us call the
+ * needed comparison routines. eqfinfo is the "=" operator itself.
* cmpfinfo is either the btree comparator or the "<" operator.
*/
MergeFunctionKind cmpstrategy;
- FmgrInfo eqfinfo;
- FmgrInfo cmpfinfo;
+ FmgrInfo eqfinfo;
+ FmgrInfo cmpfinfo;
} MergeJoinClauseData;
*
* The best, most efficient way to compare two expressions is to use a btree
* comparison support routine, since that requires only one function call
- * per comparison. Hence we try to find a btree opclass that matches the
- * mergejoinable operator. If we cannot find one, we'll have to call both
+ * per comparison. Hence we try to find a btree opclass that matches the
+ * mergejoinable operator. If we cannot find one, we'll have to call both
* the "=" and (often) the "<" operator for each comparison.
*/
static MergeJoinClause
clause->rexpr = ExecInitExpr((Expr *) lsecond(qual->args), parent);
/*
- * Check permission to call the mergejoinable operator.
- * For predictability, we check this even if we end up not using it.
+ * Check permission to call the mergejoinable operator. For
+ * predictability, we check this even if we end up not using it.
*/
aclresult = pg_proc_aclcheck(qual->opfuncid, GetUserId(), ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
/*
* Lookup the comparison operators that go with the mergejoinable
- * top-level operator. (This will elog if the operator isn't
+ * top-level operator. (This will elog if the operator isn't
* mergejoinable, which would be the planner's mistake.)
*/
op_mergejoin_crossops(qual->opno,
clause->cmpstrategy = MERGEFUNC_LT;
/*
- * Look for a btree opclass including all three operators.
- * This is much like SelectSortFunction except we insist on
- * matching all the operators provided, and it can be a cross-type
- * opclass.
+ * Look for a btree opclass including all three operators. This is
+ * much like SelectSortFunction except we insist on matching all the
+ * operators provided, and it can be a cross-type opclass.
*
- * XXX for now, insist on forward sort so that NULLs can be counted
- * on to be high.
+ * XXX for now, insist on forward sort so that NULLs can be counted on to
+ * be high.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(qual->opno),
if (!opclass_is_btree(opcid))
continue;
if (get_op_opclass_strategy(ltop, opcid) == BTLessStrategyNumber &&
- get_op_opclass_strategy(gtop, opcid) == BTGreaterStrategyNumber)
+ get_op_opclass_strategy(gtop, opcid) == BTGreaterStrategyNumber)
{
clause->cmpstrategy = MERGEFUNC_CMP;
ltproc = get_opclass_proc(opcid, aform->amopsubtype,
BTORDER_PROC);
Assert(RegProcedureIsValid(ltproc));
- break; /* done looking */
+ break; /* done looking */
}
}
/*
* MJEvalInnerValues
*
- * Same as above, but for the inner tuple. Here, we have to be prepared
+ * Same as above, but for the inner tuple. Here, we have to be prepared
* to load data from either the true current inner, or the marked inner,
* so caller must tell us which slot to load from.
*/
FunctionCallInfoData fcinfo;
/*
- * Call the comparison functions in short-lived context, in case they
- * leak memory.
+ * Call the comparison functions in short-lived context, in case they leak
+ * memory.
*/
ResetExprContext(econtext);
/*
* Deal with null inputs. We treat NULL as sorting after non-NULL.
*
- * If both inputs are NULL, and the comparison function isn't
- * strict, then we call it and check for a true result (this allows
- * operators that behave like IS NOT DISTINCT to be mergejoinable).
- * If the function is strict or returns false, we temporarily
- * pretend NULL == NULL and contine checking remaining columns.
+ * If both inputs are NULL, and the comparison function isn't strict,
+ * then we call it and check for a true result (this allows operators
+ * that behave like IS NOT DISTINCT to be mergejoinable). If the
+ * function is strict or returns false, we temporarily pretend NULL ==
+ * NULL and contine checking remaining columns.
*/
if (clause->lisnull)
{
break;
}
}
- else /* must be MERGEFUNC_CMP */
+ else
+ /* must be MERGEFUNC_CMP */
{
InitFunctionCallInfoData(fcinfo, &(clause->cmpfinfo), 2,
NULL, NULL);
}
/*
- * If we had any null comparison results or NULL-vs-NULL inputs,
- * we do not want to report that the tuples are equal. Instead,
- * if result is still 0, change it to +1. This will result in
- * advancing the inner side of the join.
+ * If we had any null comparison results or NULL-vs-NULL inputs, we do not
+ * want to report that the tuples are equal. Instead, if result is still
+ * 0, change it to +1. This will result in advancing the inner side of
+ * the join.
*/
if (nulleqnull && result == 0)
result = 1;
if (ExecQual(otherqual, econtext, false))
{
/*
- * qualification succeeded. now form the desired projection tuple
- * and return the slot containing it.
+ * qualification succeeded. now form the desired projection tuple and
+ * return the slot containing it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
if (ExecQual(otherqual, econtext, false))
{
/*
- * qualification succeeded. now form the desired projection tuple
- * and return the slot containing it.
+ * qualification succeeded. now form the desired projection tuple and
+ * return the slot containing it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
doFillInner = node->mj_FillInner;
/*
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous join
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->js.ps.ps_TupFromTlist)
{
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
{
/*
* EXEC_MJ_INITIALIZE_OUTER means that this is the first time
- * ExecMergeJoin() has been called and so we have to fetch
- * the first matchable tuple for both outer and inner subplans.
- * We do the outer side in INITIALIZE_OUTER state, then
- * advance to INITIALIZE_INNER state for the inner subplan.
+ * ExecMergeJoin() has been called and so we have to fetch the
+ * first matchable tuple for both outer and inner subplans. We
+ * do the outer side in INITIALIZE_OUTER state, then advance
+ * to INITIALIZE_INNER state for the inner subplan.
*/
case EXEC_MJ_INITIALIZE_OUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_INITIALIZE_OUTER\n");
if (doFillInner)
{
/*
- * Need to emit right-join tuples for remaining
- * inner tuples. We set MatchedInner = true to
- * force the ENDOUTER state to advance inner.
+ * Need to emit right-join tuples for remaining inner
+ * tuples. We set MatchedInner = true to force the
+ * ENDOUTER state to advance inner.
*/
node->mj_JoinState = EXEC_MJ_ENDOUTER;
node->mj_MatchedInner = true;
if (doFillOuter)
{
/*
- * Need to emit left-join tuples for all outer
- * tuples, including the one we just fetched. We
- * set MatchedOuter = false to force the ENDINNER
- * state to emit first tuple before advancing
- * outer.
+ * Need to emit left-join tuples for all outer tuples,
+ * including the one we just fetched. We set
+ * MatchedOuter = false to force the ENDINNER state to
+ * emit first tuple before advancing outer.
*/
node->mj_JoinState = EXEC_MJ_ENDINNER;
node->mj_MatchedOuter = false;
break;
/*
- * EXEC_MJ_JOINTUPLES means we have two tuples which
- * satisfied the merge clause so we join them and then
- * proceed to get the next inner tuple (EXEC_MJ_NEXTINNER).
+ * EXEC_MJ_JOINTUPLES means we have two tuples which satisfied
+ * the merge clause so we join them and then proceed to get
+ * the next inner tuple (EXEC_MJ_NEXTINNER).
*/
case EXEC_MJ_JOINTUPLES:
MJ_printf("ExecMergeJoin: EXEC_MJ_JOINTUPLES\n");
node->mj_JoinState = EXEC_MJ_NEXTINNER;
/*
- * Check the extra qual conditions to see if we actually
- * want to return this join tuple. If not, can proceed
- * with merge. We must distinguish the additional
- * joinquals (which must pass to consider the tuples
- * "matched" for outer-join logic) from the otherquals
- * (which must pass before we actually return the tuple).
+ * Check the extra qual conditions to see if we actually want
+ * to return this join tuple. If not, can proceed with merge.
+ * We must distinguish the additional joinquals (which must
+ * pass to consider the tuples "matched" for outer-join logic)
+ * from the otherquals (which must pass before we actually
+ * return the tuple).
*
* We don't bother with a ResetExprContext here, on the
- * assumption that we just did one while checking the
- * merge qual. One per tuple should be sufficient. We
- * do have to set up the econtext links to the tuples
- * for ExecQual to use.
+ * assumption that we just did one while checking the merge
+ * qual. One per tuple should be sufficient. We do have to
+ * set up the econtext links to the tuples for ExecQual to
+ * use.
*/
outerTupleSlot = node->mj_OuterTupleSlot;
econtext->ecxt_outertuple = outerTupleSlot;
{
/*
* qualification succeeded. now form the desired
- * projection tuple and return the slot containing
- * it.
+ * projection tuple and return the slot containing it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
break;
/*
- * EXEC_MJ_NEXTINNER means advance the inner scan to the
- * next tuple. If the tuple is not nil, we then proceed to
- * test it against the join qualification.
+ * EXEC_MJ_NEXTINNER means advance the inner scan to the next
+ * tuple. If the tuple is not nil, we then proceed to test it
+ * against the join qualification.
*
* Before advancing, we check to see if we must emit an
* outer-join fill tuple for this inner tuple.
{
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
}
/*
- * now we get the next inner tuple, if any. If there's
- * none, advance to next outer tuple (which may be able
- * to join to previously marked tuples).
+ * now we get the next inner tuple, if any. If there's none,
+ * advance to next outer tuple (which may be able to join to
+ * previously marked tuples).
*
- * If we find one but it cannot join to anything, stay
- * in NEXTINNER state to fetch the next one.
+ * If we find one but it cannot join to anything, stay in
+ * NEXTINNER state to fetch the next one.
*/
innerTupleSlot = ExecProcNode(innerPlan);
node->mj_InnerTupleSlot = innerTupleSlot;
/*
* Test the new inner tuple to see if it matches outer.
*
- * If they do match, then we join them and move on to the
- * next inner tuple (EXEC_MJ_JOINTUPLES).
+ * If they do match, then we join them and move on to the next
+ * inner tuple (EXEC_MJ_JOINTUPLES).
*
* If they do not match then advance to next outer tuple.
*/
{
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
node->mj_MatchedOuter = false;
/*
- * if the outer tuple is null then we are done with the
- * join, unless we have inner tuples we need to null-fill.
+ * if the outer tuple is null then we are done with the join,
+ * unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
if (doFillInner && !TupIsNull(innerTupleSlot))
{
/*
- * Need to emit right-join tuples for remaining
- * inner tuples.
+ * Need to emit right-join tuples for remaining inner
+ * tuples.
*/
node->mj_JoinState = EXEC_MJ_ENDOUTER;
break;
if (compareResult == 0)
{
/*
- * the merge clause matched so now we restore the
- * inner scan position to the first mark, and go join
- * that tuple (and any following ones) to the new outer.
+ * the merge clause matched so now we restore the inner
+ * scan position to the first mark, and go join that tuple
+ * (and any following ones) to the new outer.
*
- * NOTE: we do not need to worry about the MatchedInner
- * state for the rescanned inner tuples. We know all
- * of them will match this new outer tuple and
- * therefore won't be emitted as fill tuples. This
- * works *only* because we require the extra joinquals
- * to be nil when doing a right or full join ---
- * otherwise some of the rescanned tuples might fail
- * the extra joinquals.
+ * NOTE: we do not need to worry about the MatchedInner state
+ * for the rescanned inner tuples. We know all of them
+ * will match this new outer tuple and therefore won't be
+ * emitted as fill tuples. This works *only* because we
+ * require the extra joinquals to be nil when doing a
+ * right or full join --- otherwise some of the rescanned
+ * tuples might fail the extra joinquals.
*/
ExecRestrPos(innerPlan);
/*
* ExecRestrPos probably should give us back a new Slot,
* but since it doesn't, use the marked slot. (The
- * previously returned mj_InnerTupleSlot cannot be
- * assumed to hold the required tuple.)
+ * previously returned mj_InnerTupleSlot cannot be assumed
+ * to hold the required tuple.)
*/
node->mj_InnerTupleSlot = innerTupleSlot;
/* we need not do MJEvalInnerValues again */
* which means that all subsequent outer tuples will be
* larger than our marked inner tuples. So we need not
* revisit any of the marked tuples but can proceed to
- * look for a match to the current inner. If there's
+ * look for a match to the current inner. If there's
* no more inners, we are done.
* ----------------
*/
/*
* before we advance, make sure the current tuples do not
- * satisfy the mergeclauses. If they do, then we update
- * the marked tuple position and go join them.
+ * satisfy the mergeclauses. If they do, then we update the
+ * marked tuple position and go join them.
*/
compareResult = MJCompare(node);
MJ_DEBUG_COMPARE(compareResult);
}
else if (compareResult < 0)
node->mj_JoinState = EXEC_MJ_SKIPOUTER_ADVANCE;
- else /* compareResult > 0 */
+ else
+ /* compareResult > 0 */
node->mj_JoinState = EXEC_MJ_SKIPINNER_ADVANCE;
break;
{
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
node->mj_MatchedOuter = false;
/*
- * if the outer tuple is null then we are done with the
- * join, unless we have inner tuples we need to null-fill.
+ * if the outer tuple is null then we are done with the join,
+ * unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
if (doFillInner && !TupIsNull(innerTupleSlot))
{
/*
- * Need to emit right-join tuples for remaining
- * inner tuples.
+ * Need to emit right-join tuples for remaining inner
+ * tuples.
*/
node->mj_JoinState = EXEC_MJ_ENDOUTER;
break;
{
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
node->mj_MatchedInner = false;
/*
- * if the inner tuple is null then we are done with the
- * join, unless we have outer tuples we need to null-fill.
+ * if the inner tuple is null then we are done with the join,
+ * unless we have outer tuples we need to null-fill.
*/
if (TupIsNull(innerTupleSlot))
{
if (doFillOuter && !TupIsNull(outerTupleSlot))
{
/*
- * Need to emit left-join tuples for remaining
- * outer tuples.
+ * Need to emit left-join tuples for remaining outer
+ * tuples.
*/
node->mj_JoinState = EXEC_MJ_ENDINNER;
break;
break;
/*
- * EXEC_MJ_ENDOUTER means we have run out of outer tuples,
- * but are doing a right/full join and therefore must
- * null-fill any remaing unmatched inner tuples.
+ * EXEC_MJ_ENDOUTER means we have run out of outer tuples, but
+ * are doing a right/full join and therefore must null-fill
+ * any remaing unmatched inner tuples.
*/
case EXEC_MJ_ENDOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_ENDOUTER\n");
{
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
break;
/*
- * EXEC_MJ_ENDINNER means we have run out of inner tuples,
- * but are doing a left/full join and therefore must null-
- * fill any remaing unmatched outer tuples.
+ * EXEC_MJ_ENDINNER means we have run out of inner tuples, but
+ * are doing a left/full join and therefore must null- fill
+ * any remaing unmatched outer tuples.
*/
case EXEC_MJ_ENDINNER:
MJ_printf("ExecMergeJoin: EXEC_MJ_ENDINNER\n");
{
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join
- * quals.
+ * tuple, and return it if it passes the non-join quals.
*/
TupleTableSlot *result;
ExecAssignExprContext(estate, &mergestate->js.ps);
/*
- * we need two additional econtexts in which we can compute the
- * join expressions from the left and right input tuples. The
- * node's regular econtext won't do because it gets reset too
- * often.
+ * we need two additional econtexts in which we can compute the join
+ * expressions from the left and right input tuples. The node's regular
+ * econtext won't do because it gets reset too often.
*/
mergestate->mj_OuterEContext = CreateExprContext(estate);
mergestate->mj_InnerEContext = CreateExprContext(estate);
mergestate->mj_FillInner = false;
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(mergestate)));
+ ExecGetResultType(innerPlanState(mergestate)));
break;
case JOIN_RIGHT:
mergestate->mj_FillOuter = false;
mergestate->mj_FillInner = true;
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(outerPlanState(mergestate)));
+ ExecGetResultType(outerPlanState(mergestate)));
/*
- * Can't handle right or full join with non-nil extra
- * joinclauses. This should have been caught by planner.
+ * Can't handle right or full join with non-nil extra joinclauses.
+ * This should have been caught by planner.
*/
if (node->join.joinqual != NIL)
ereport(ERROR,
mergestate->mj_FillInner = true;
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(outerPlanState(mergestate)));
+ ExecGetResultType(outerPlanState(mergestate)));
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(mergestate)));
+ ExecGetResultType(innerPlanState(mergestate)));
/*
- * Can't handle right or full join with non-nil extra
- * joinclauses.
+ * Can't handle right or full join with non-nil extra joinclauses.
*/
if (node->join.joinqual != NIL)
ereport(ERROR,
node->mj_InnerTupleSlot = NULL;
/*
- * if chgParam of subnodes is not null then plans will be re-scanned
- * by first ExecProcNode.
+ * if chgParam of subnodes is not null then plans will be re-scanned by
+ * first ExecProcNode.
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
ExecReScan(((PlanState *) node)->lefttree, exprCtxt);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.38 2004/12/31 21:59:45 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.39 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
econtext->ecxt_outertuple = outerTupleSlot;
/*
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous join
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->js.ps.ps_TupFromTlist)
{
}
/*
- * If we're doing an IN join, we want to return at most one row per
- * outer tuple; so we can stop scanning the inner scan if we matched
- * on the previous try.
+ * If we're doing an IN join, we want to return at most one row per outer
+ * tuple; so we can stop scanning the inner scan if we matched on the
+ * previous try.
*/
if (node->js.jointype == JOIN_IN &&
node->nl_MatchedOuter)
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
outerTupleSlot = ExecProcNode(outerPlan);
/*
- * if there are no more outer tuples, then the join is
- * complete..
+ * if there are no more outer tuples, then the join is complete..
*/
if (TupIsNull(outerTupleSlot))
{
/*
* The scan key of the inner plan might depend on the current
- * outer tuple (e.g. in index scans), that's why we pass our
- * expr context.
+ * outer tuple (e.g. in index scans), that's why we pass our expr
+ * context.
*/
ExecReScan(innerPlan, econtext);
}
node->js.jointype == JOIN_LEFT)
{
/*
- * We are doing an outer join and there were no join
- * matches for this outer tuple. Generate a fake join
- * tuple with nulls for the inner tuple, and return it if
- * it passes the non-join quals.
+ * We are doing an outer join and there were no join matches
+ * for this outer tuple. Generate a fake join tuple with
+ * nulls for the inner tuple, and return it if it passes the
+ * non-join quals.
*/
econtext->ecxt_innertuple = node->nl_NullInnerTupleSlot;
if (ExecQual(otherqual, econtext, false))
{
/*
- * qualification was satisfied so we project and
- * return the slot containing the result tuple using
+ * qualification was satisfied so we project and return
+ * the slot containing the result tuple using
* ExecProject().
*/
TupleTableSlot *result;
}
/*
- * at this point we have a new pair of inner and outer tuples so
- * we test the inner and outer tuples to see if they satisfy the
- * node's qualification.
+ * at this point we have a new pair of inner and outer tuples so we
+ * test the inner and outer tuples to see if they satisfy the node's
+ * qualification.
*
- * Only the joinquals determine MatchedOuter status, but all quals
- * must pass to actually return the tuple.
+ * Only the joinquals determine MatchedOuter status, but all quals must
+ * pass to actually return the tuple.
*/
ENL1_printf("testing qualification");
if (otherqual == NIL || ExecQual(otherqual, econtext, false))
{
/*
- * qualification was satisfied so we project and return
- * the slot containing the result tuple using
- * ExecProject().
+ * qualification was satisfied so we project and return the
+ * slot containing the result tuple using ExecProject().
*/
TupleTableSlot *result;
ExprDoneCond isDone;
case JOIN_LEFT:
nlstate->nl_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(nlstate)));
+ ExecGetResultType(innerPlanState(nlstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
/*
* If outerPlan->chgParam is not null then plan will be automatically
- * re-scanned by first ExecProcNode. innerPlan is re-scanned for each
- * new outer tuple and MUST NOT be re-scanned from here or you'll get
- * troubles from inner index scans when outer Vars are used as
- * run-time keys...
+ * re-scanned by first ExecProcNode. innerPlan is re-scanned for each new
+ * outer tuple and MUST NOT be re-scanned from here or you'll get troubles
+ * from inner index scans when outer Vars are used as run-time keys...
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan, exprCtxt);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.31 2005/04/24 15:32:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.32 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Check to see if we're still projecting out tuples from a previous
- * scan tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
+ * Check to see if we're still projecting out tuples from a previous scan
+ * tuple (because there is a function-returning-set in the projection
+ * expressions). If so, try to project another one.
*/
if (node->ps.ps_TupFromTlist)
{
/*
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a scan tuple.
+ * storage allocated in the previous tuple cycle. Note this can't happen
+ * until we're done projecting out tuples from a scan tuple.
*/
ResetExprContext(econtext);
/*
* if rs_done is true then it means that we were asked to return a
* constant tuple and we already did the last time ExecResult() was
- * called, OR that we failed the constant qual check. Either way, now
- * we are through.
+ * called, OR that we failed the constant qual check. Either way, now we
+ * are through.
*/
while (!node->rs_done)
{
if (outerPlan != NULL)
{
/*
- * retrieve tuples from the outer plan until there are no
- * more.
+ * retrieve tuples from the outer plan until there are no more.
*/
outerTupleSlot = ExecProcNode(outerPlan);
node->ps.ps_OuterTupleSlot = outerTupleSlot;
/*
- * XXX gross hack. use outer tuple as scan tuple for
- * projection
+ * XXX gross hack. use outer tuple as scan tuple for projection
*/
econtext->ecxt_outertuple = outerTupleSlot;
econtext->ecxt_scantuple = outerTupleSlot;
else
{
/*
- * if we don't have an outer plan, then we are just generating
- * the results from a constant target list. Do it only once.
+ * if we don't have an outer plan, then we are just generating the
+ * results from a constant target list. Do it only once.
*/
node->rs_done = true;
}
/*
- * form the result tuple using ExecProject(), and return it ---
- * unless the projection produces an empty set, in which case we
- * must loop back to see if there are more outerPlan tuples.
+ * form the result tuple using ExecProject(), and return it --- unless
+ * the projection produces an empty set, in which case we must loop
+ * back to see if there are more outerPlan tuples.
*/
resultSlot = ExecProject(node->ps.ps_ProjInfo, &isDone);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.53 2005/05/15 21:19:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.54 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
slot = node->ss_ScanTupleSlot;
/*
- * Clear any reference to the previously returned tuple. The idea
- * here is to not have the tuple slot be the last holder of a pin on
- * that tuple's buffer; if it is, we'll need a separate visit to the
- * bufmgr to release the buffer. By clearing here, we get to have the
- * release done by ReleaseAndReadBuffer inside heap_getnext.
+ * Clear any reference to the previously returned tuple. The idea here is
+ * to not have the tuple slot be the last holder of a pin on that tuple's
+ * buffer; if it is, we'll need a separate visit to the bufmgr to release
+ * the buffer. By clearing here, we get to have the release done by
+ * ReleaseAndReadBuffer inside heap_getnext.
*/
ExecClearTuple(slot);
/*
* Note that unlike IndexScan, SeqScan never use keys in
- * heap_beginscan (and this is very bad) - so, here we do not
- * check are keys ok or not.
+ * heap_beginscan (and this is very bad) - so, here we do not check
+ * are keys ok or not.
*/
/* Flag for the next call that no more tuples */
tuple = heap_getnext(scandesc, direction);
/*
- * save the tuple and the buffer returned to us by the access methods
- * in our scan tuple slot and return the slot. Note: we pass 'false'
- * because tuples returned by heap_getnext() are pointers onto disk
- * pages and were not created with palloc() and so should not be
- * pfree()'d. Note also that ExecStoreTuple will increment the
- * refcount of the buffer; the refcount will not be dropped until the
- * tuple table slot is cleared.
+ * save the tuple and the buffer returned to us by the access methods in
+ * our scan tuple slot and return the slot. Note: we pass 'false' because
+ * tuples returned by heap_getnext() are pointers onto disk pages and were
+ * not created with palloc() and so should not be pfree()'d. Note also
+ * that ExecStoreTuple will increment the refcount of the buffer; the
+ * refcount will not be dropped until the tuple table slot is cleared.
*/
if (tuple)
- ExecStoreTuple(tuple, /* tuple to store */
- slot, /* slot to store in */
- scandesc->rs_cbuf, /* buffer associated with
- * this tuple */
- false); /* don't pfree this pointer */
+ ExecStoreTuple(tuple, /* tuple to store */
+ slot, /* slot to store in */
+ scandesc->rs_cbuf, /* buffer associated with this
+ * tuple */
+ false); /* don't pfree this pointer */
return slot;
}
HeapScanDesc currentScanDesc;
/*
- * get the relation object id from the relid'th entry in the range
- * table, open that relation and initialize the scan state.
+ * get the relation object id from the relid'th entry in the range table,
+ * open that relation and initialize the scan state.
*
* We acquire AccessShareLock for the duration of the scan.
*/
SeqScanState *scanstate;
/*
- * Once upon a time it was possible to have an outerPlan of a SeqScan,
- * but not any more.
+ * Once upon a time it was possible to have an outerPlan of a SeqScan, but
+ * not any more.
*/
Assert(outerPlan(node) == NULL);
Assert(innerPlan(node) == NULL);
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
- * InitScanRelation. This lock should be held till end of
- * transaction. (There is a faction that considers this too much
- * locking, however.)
+ * InitScanRelation. This lock should be held till end of transaction.
+ * (There is a faction that considers this too much locking, however.)
*/
heap_close(relation, NoLock);
}
HeapScanDesc scan = node->ss_currentScanDesc;
/*
- * Clear any reference to the previously returned tuple. This is
- * needed because the slot is simply pointing at scan->rs_cbuf, which
- * heap_restrpos will change; we'd have an internally inconsistent
- * slot if we didn't do this.
+ * Clear any reference to the previously returned tuple. This is needed
+ * because the slot is simply pointing at scan->rs_cbuf, which
+ * heap_restrpos will change; we'd have an internally inconsistent slot if
+ * we didn't do this.
*/
ExecClearTuple(node->ss_ScanTupleSlot);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.17 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSetOp.c,v 1.18 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
resultTupleSlot = node->ps.ps_ResultTupleSlot;
/*
- * If the previously-returned tuple needs to be returned more than
- * once, keep returning it.
+ * If the previously-returned tuple needs to be returned more than once,
+ * keep returning it.
*/
if (node->numOutput > 0)
{
ExecClearTuple(resultTupleSlot);
/*
- * Absorb groups of duplicate tuples, counting them, and saving the
- * first of each group as a possible return value. At the end of each
- * group, decide whether to return anything.
+ * Absorb groups of duplicate tuples, counting them, and saving the first
+ * of each group as a possible return value. At the end of each group,
+ * decide whether to return anything.
*
* We assume that the tuples arrive in sorted order so we can detect
* duplicates easily.
else
{
/*
- * Current tuple is member of same group as resultTuple. Count
- * it in the appropriate counter.
+ * Current tuple is member of same group as resultTuple. Count it
+ * in the appropriate counter.
*/
int flag;
bool isNull;
* Miscellaneous initialization
*
* SetOp nodes have no ExprContext initialization because they never call
- * ExecQual or ExecProject. But they do need a per-tuple memory
- * context anyway for calling execTuplesMatch.
+ * ExecQual or ExecProject. But they do need a per-tuple memory context
+ * anyway for calling execTuplesMatch.
*/
setopstate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate);
/*
- * setop nodes do no projections, so initialize projection info for
- * this node appropriately
+ * setop nodes do no projections, so initialize projection info for this
+ * node appropriately
*/
ExecAssignResultTypeFromOuterPlan(&setopstate->ps);
setopstate->ps.ps_ProjInfo = NULL;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.50 2005/03/16 21:38:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.51 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
tuplesortstate = (Tuplesortstate *) node->tuplesortstate;
/*
- * If first time through, read all tuples from outer plan and pass
- * them to tuplesort.c. Subsequent calls just fetch tuples from
- * tuplesort.
+ * If first time through, read all tuples from outer plan and pass them to
+ * tuplesort.c. Subsequent calls just fetch tuples from tuplesort.
*/
if (!node->sort_Done)
"sorting subplan");
/*
- * Want to scan subplan in the forward direction while creating
- * the sorted data.
+ * Want to scan subplan in the forward direction while creating the
+ * sorted data.
*/
estate->es_direction = ForwardScanDirection;
outerPlanState(sortstate) = ExecInitNode(outerPlan(node), estate);
/*
- * initialize tuple type. no need to initialize projection info
- * because this node doesn't do projections.
+ * initialize tuple type. no need to initialize projection info because
+ * this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan(&sortstate->ss.ps);
ExecAssignScanTypeFromOuterPlan(&sortstate->ss);
ExecReScanSort(SortState *node, ExprContext *exprCtxt)
{
/*
- * If we haven't sorted yet, just return. If outerplan' chgParam is
- * not NULL then it will be re-scanned by ExecProcNode, else - no
- * reason to re-scan it at all.
+ * If we haven't sorted yet, just return. If outerplan' chgParam is not
+ * NULL then it will be re-scanned by ExecProcNode, else - no reason to
+ * re-scan it at all.
*/
if (!node->sort_Done)
return;
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
/*
- * If subnode is to be rescanned then we forget previous sort results;
- * we have to re-read the subplan and re-sort.
+ * If subnode is to be rescanned then we forget previous sort results; we
+ * have to re-read the subplan and re-sort.
*
* Otherwise we can just rewind and rescan the sorted output.
*/
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.69 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.70 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
elog(ERROR, "hashed subplan with direct correlation not supported");
/*
- * If first time through or we need to rescan the subplan, build the
- * hash table.
+ * If first time through or we need to rescan the subplan, build the hash
+ * table.
*/
if (node->hashtable == NULL || planstate->chgParam != NULL)
buildSubPlanHash(node);
/*
- * The result for an empty subplan is always FALSE; no need to
- * evaluate lefthand side.
+ * The result for an empty subplan is always FALSE; no need to evaluate
+ * lefthand side.
*/
*isNull = false;
if (!node->havehashrows && !node->havenullrows)
slot = ExecProject(node->projLeft, NULL);
/*
- * Note: because we are typically called in a per-tuple context, we
- * have to explicitly clear the projected tuple before returning.
- * Otherwise, we'll have a double-free situation: the per-tuple
- * context will probably be reset before we're called again, and then
- * the tuple slot will think it still needs to free the tuple.
+ * Note: because we are typically called in a per-tuple context, we have
+ * to explicitly clear the projected tuple before returning. Otherwise,
+ * we'll have a double-free situation: the per-tuple context will probably
+ * be reset before we're called again, and then the tuple slot will think
+ * it still needs to free the tuple.
*/
/*
- * Since the hashtable routines will use innerecontext's per-tuple
- * memory as working memory, be sure to reset it for each tuple.
+ * Since the hashtable routines will use innerecontext's per-tuple memory
+ * as working memory, be sure to reset it for each tuple.
*/
ResetExprContext(innerecontext);
/*
- * If the LHS is all non-null, probe for an exact match in the main
- * hash table. If we find one, the result is TRUE. Otherwise, scan
- * the partly-null table to see if there are any rows that aren't
- * provably unequal to the LHS; if so, the result is UNKNOWN. (We
- * skip that part if we don't care about UNKNOWN.) Otherwise, the
- * result is FALSE.
+ * If the LHS is all non-null, probe for an exact match in the main hash
+ * table. If we find one, the result is TRUE. Otherwise, scan the
+ * partly-null table to see if there are any rows that aren't provably
+ * unequal to the LHS; if so, the result is UNKNOWN. (We skip that part
+ * if we don't care about UNKNOWN.) Otherwise, the result is FALSE.
*
- * Note: the reason we can avoid a full scan of the main hash table is
- * that the combining operators are assumed never to yield NULL when
- * both inputs are non-null. If they were to do so, we might need to
- * produce UNKNOWN instead of FALSE because of an UNKNOWN result in
- * comparing the LHS to some main-table entry --- which is a
- * comparison we will not even make, unless there's a chance match of
- * hash keys.
+ * Note: the reason we can avoid a full scan of the main hash table is that
+ * the combining operators are assumed never to yield NULL when both
+ * inputs are non-null. If they were to do so, we might need to produce
+ * UNKNOWN instead of FALSE because of an UNKNOWN result in comparing the
+ * LHS to some main-table entry --- which is a comparison we will not even
+ * make, unless there's a chance match of hash keys.
*/
if (slotNoNulls(slot))
{
}
/*
- * When the LHS is partly or wholly NULL, we can never return TRUE. If
- * we don't care about UNKNOWN, just return FALSE. Otherwise, if the
- * LHS is wholly NULL, immediately return UNKNOWN. (Since the
- * combining operators are strict, the result could only be FALSE if
- * the sub-select were empty, but we already handled that case.)
- * Otherwise, we must scan both the main and partly-null tables to see
- * if there are any rows that aren't provably unequal to the LHS; if
- * so, the result is UNKNOWN. Otherwise, the result is FALSE.
+ * When the LHS is partly or wholly NULL, we can never return TRUE. If we
+ * don't care about UNKNOWN, just return FALSE. Otherwise, if the LHS is
+ * wholly NULL, immediately return UNKNOWN. (Since the combining
+ * operators are strict, the result could only be FALSE if the sub-select
+ * were empty, but we already handled that case.) Otherwise, we must scan
+ * both the main and partly-null tables to see if there are any rows that
+ * aren't provably unequal to the LHS; if so, the result is UNKNOWN.
+ * Otherwise, the result is FALSE.
*/
if (node->hashnulls == NULL)
{
ArrayBuildState *astate = NULL;
/*
- * We are probably in a short-lived expression-evaluation context.
- * Switch to the child plan's per-query context for manipulating its
- * chgParam, calling ExecProcNode on it, etc.
+ * We are probably in a short-lived expression-evaluation context. Switch
+ * to the child plan's per-query context for manipulating its chgParam,
+ * calling ExecProcNode on it, etc.
*/
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
ExecReScan(planstate, NULL);
/*
- * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the
- * result is boolean as are the results of the combining operators. We
- * combine results within a tuple (if there are multiple columns)
- * using OR semantics if "useOr" is true, AND semantics if not. We
- * then combine results across tuples (if the subplan produces more
- * than one) using OR semantics for ANY_SUBLINK or AND semantics for
- * ALL_SUBLINK. (MULTIEXPR_SUBLINK doesn't allow multiple tuples from
- * the subplan.) NULL results from the combining operators are handled
- * according to the usual SQL semantics for OR and AND. The result
- * for no input tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK,
- * NULL for MULTIEXPR_SUBLINK.
+ * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the result
+ * is boolean as are the results of the combining operators. We combine
+ * results within a tuple (if there are multiple columns) using OR
+ * semantics if "useOr" is true, AND semantics if not. We then combine
+ * results across tuples (if the subplan produces more than one) using OR
+ * semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
+ * (MULTIEXPR_SUBLINK doesn't allow multiple tuples from the subplan.)
+ * NULL results from the combining operators are handled according to the
+ * usual SQL semantics for OR and AND. The result for no input tuples is
+ * FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
+ * MULTIEXPR_SUBLINK.
*
- * For EXPR_SUBLINK we require the subplan to produce no more than one
- * tuple, else an error is raised. For ARRAY_SUBLINK we allow the
- * subplan to produce more than one tuple. In either case, if zero
- * tuples are produced, we return NULL. Assuming we get a tuple, we
- * just use its first column (there can be only one non-junk column in
- * this case).
+ * For EXPR_SUBLINK we require the subplan to produce no more than one tuple,
+ * else an error is raised. For ARRAY_SUBLINK we allow the subplan to
+ * produce more than one tuple. In either case, if zero tuples are
+ * produced, we return NULL. Assuming we get a tuple, we just use its
+ * first column (there can be only one non-junk column in this case).
*/
result = BoolGetDatum(subLinkType == ALL_SUBLINK);
*isNull = false;
found = true;
/*
- * We need to copy the subplan's tuple in case the result is
- * of pass-by-ref type --- our return value will point into
- * this copied tuple! Can't use the subplan's instance of the
- * tuple since it won't still be valid after next
- * ExecProcNode() call. node->curTuple keeps track of the
- * copied tuple for eventual freeing.
+ * We need to copy the subplan's tuple in case the result is of
+ * pass-by-ref type --- our return value will point into this
+ * copied tuple! Can't use the subplan's instance of the tuple
+ * since it won't still be valid after next ExecProcNode() call.
+ * node->curTuple keeps track of the copied tuple for eventual
+ * freeing.
*/
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
if (node->curTuple)
bool expnull;
/*
- * Load up the Param representing this column of the
- * sub-select.
+ * Load up the Param representing this column of the sub-select.
*/
prmdata = &(econtext->ecxt_param_exec_vals[paramid]);
Assert(prmdata->execPlan == NULL);
{
/*
* deal with empty subplan result. result/isNull were previously
- * initialized correctly for all sublink types except EXPR, ARRAY,
- * and MULTIEXPR; for those, return NULL.
+ * initialized correctly for all sublink types except EXPR, ARRAY, and
+ * MULTIEXPR; for those, return NULL.
*/
if (subLinkType == EXPR_SUBLINK ||
subLinkType == ARRAY_SUBLINK ||
Assert(!subplan->useOr);
/*
- * If we already had any hash tables, destroy 'em; then create empty
- * hash table(s).
+ * If we already had any hash tables, destroy 'em; then create empty hash
+ * table(s).
*
* If we need to distinguish accurately between FALSE and UNKNOWN (i.e.,
- * NULL) results of the IN operation, then we have to store subplan
- * output rows that are partly or wholly NULL. We store such rows in
- * a separate hash table that we expect will be much smaller than the
- * main table. (We can use hashing to eliminate partly-null rows that
- * are not distinct. We keep them separate to minimize the cost of
- * the inevitable full-table searches; see findPartialMatch.)
+ * NULL) results of the IN operation, then we have to store subplan output
+ * rows that are partly or wholly NULL. We store such rows in a separate
+ * hash table that we expect will be much smaller than the main table.
+ * (We can use hashing to eliminate partly-null rows that are not
+ * distinct. We keep them separate to minimize the cost of the inevitable
+ * full-table searches; see findPartialMatch.)
*
- * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
- * need to store subplan output rows that contain NULL.
+ * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't need
+ * to store subplan output rows that contain NULL.
*/
MemoryContextReset(node->tablecxt);
node->hashtable = NULL;
}
/*
- * We are probably in a short-lived expression-evaluation context.
- * Switch to the child plan's per-query context for calling
- * ExecProcNode.
+ * We are probably in a short-lived expression-evaluation context. Switch
+ * to the child plan's per-query context for calling ExecProcNode.
*/
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
ExecReScan(planstate, NULL);
/*
- * Scan the subplan and load the hash table(s). Note that when there
- * are duplicate rows coming out of the sub-select, only one copy is
- * stored.
+ * Scan the subplan and load the hash table(s). Note that when there are
+ * duplicate rows coming out of the sub-select, only one copy is stored.
*/
for (slot = ExecProcNode(planstate);
!TupIsNull(slot);
bool isnew;
/*
- * Load up the Params representing the raw sub-select outputs,
- * then form the projection tuple to store in the hashtable.
+ * Load up the Params representing the raw sub-select outputs, then
+ * form the projection tuple to store in the hashtable.
*/
foreach(plst, subplan->paramIds)
{
}
/*
- * Reset innerecontext after each inner tuple to free any memory
- * used in hash computation or comparison routines.
+ * Reset innerecontext after each inner tuple to free any memory used
+ * in hash computation or comparison routines.
*/
ResetExprContext(innerecontext);
}
/*
- * Since the projected tuples are in the sub-query's context and not
- * the main context, we'd better clear the tuple slot before there's
- * any chance of a reset of the sub-query's context. Else we will
- * have the potential for a double free attempt. (XXX possibly
- * no longer needed, but can't hurt.)
+ * Since the projected tuples are in the sub-query's context and not the
+ * main context, we'd better clear the tuple slot before there's any
+ * chance of a reset of the sub-query's context. Else we will have the
+ * potential for a double free attempt. (XXX possibly no longer needed,
+ * but can't hurt.)
*/
ExecClearTuple(node->projRight->pi_slot);
/*
* create an EState for the subplan
*
- * The subquery needs its own EState because it has its own rangetable.
- * It shares our Param ID space, however. XXX if rangetable access
- * were done differently, the subquery could share our EState, which
- * would eliminate some thrashing about in this module...
+ * The subquery needs its own EState because it has its own rangetable. It
+ * shares our Param ID space, however. XXX if rangetable access were done
+ * differently, the subquery could share our EState, which would eliminate
+ * some thrashing about in this module...
*/
sp_estate = CreateExecutorState();
node->sub_estate = sp_estate;
MemoryContextSwitchTo(oldcontext);
/*
- * If this plan is un-correlated or undirect correlated one and want
- * to set params for parent plan then mark parameters as needing
- * evaluation.
+ * If this plan is un-correlated or undirect correlated one and want to
+ * set params for parent plan then mark parameters as needing evaluation.
*
* Note that in the case of un-correlated subqueries we don't care about
- * setting parent->chgParam here: indices take care about it, for
- * others - it doesn't matter...
+ * setting parent->chgParam here: indices take care about it, for others -
+ * it doesn't matter...
*/
if (subplan->setParam != NIL)
{
}
/*
- * If we are going to hash the subquery output, initialize relevant
- * stuff. (We don't create the hashtable until needed, though.)
+ * If we are going to hash the subquery output, initialize relevant stuff.
+ * (We don't create the hashtable until needed, though.)
*/
if (subplan->useHashTable)
{
/*
* We use ExecProject to evaluate the lefthand and righthand
- * expression lists and form tuples. (You might think that we
- * could use the sub-select's output tuples directly, but that is
- * not the case if we had to insert any run-time coercions of the
- * sub-select's output datatypes; anyway this avoids storing any
- * resjunk columns that might be in the sub-select's output.) Run
- * through the combining expressions to build tlists for the
- * lefthand and righthand sides. We need both the ExprState list
- * (for ExecProject) and the underlying parse Exprs (for
- * ExecTypeFromTL).
+ * expression lists and form tuples. (You might think that we could
+ * use the sub-select's output tuples directly, but that is not the
+ * case if we had to insert any run-time coercions of the sub-select's
+ * output datatypes; anyway this avoids storing any resjunk columns
+ * that might be in the sub-select's output.) Run through the
+ * combining expressions to build tlists for the lefthand and
+ * righthand sides. We need both the ExprState list (for ExecProject)
+ * and the underlying parse Exprs (for ExecTypeFromTL).
*
- * We also extract the combining operators themselves to initialize
- * the equality and hashing functions for the hash tables.
+ * We also extract the combining operators themselves to initialize the
+ * equality and hashing functions for the hash tables.
*/
lefttlist = righttlist = NIL;
leftptlist = rightptlist = NIL;
}
/*
- * Create a tupletable to hold these tuples. (Note: we never
- * bother to free the tupletable explicitly; that's okay because
- * it will never store raw disk tuples that might have associated
- * buffer pins. The only resource involved is memory, which will
- * be cleaned up by freeing the query context.)
+ * Create a tupletable to hold these tuples. (Note: we never bother
+ * to free the tupletable explicitly; that's okay because it will
+ * never store raw disk tuples that might have associated buffer pins.
+ * The only resource involved is memory, which will be cleaned up by
+ * freeing the query context.)
*/
tupTable = ExecCreateTupleTable(2);
/*
- * Construct tupdescs, slots and projection nodes for left and
- * right sides. The lefthand expressions will be evaluated in the
- * parent plan node's exprcontext, which we don't have access to
- * here. Fortunately we can just pass NULL for now and fill it in
- * later (hack alert!). The righthand expressions will be
- * evaluated in our own innerecontext.
+ * Construct tupdescs, slots and projection nodes for left and right
+ * sides. The lefthand expressions will be evaluated in the parent
+ * plan node's exprcontext, which we don't have access to here.
+ * Fortunately we can just pass NULL for now and fill it in later
+ * (hack alert!). The righthand expressions will be evaluated in our
+ * own innerecontext.
*/
tupDesc = ExecTypeFromTL(leftptlist, false);
slot = ExecAllocTableSlot(tupTable);
found = true;
/*
- * We need to copy the subplan's tuple into our own context, in
- * case any of the params are pass-by-ref type --- the pointers
- * stored in the param structs will point at this copied tuple!
- * node->curTuple keeps track of the copied tuple for eventual
- * freeing.
+ * We need to copy the subplan's tuple into our own context, in case
+ * any of the params are pass-by-ref type --- the pointers stored in
+ * the param structs will point at this copied tuple! node->curTuple
+ * keeps track of the copied tuple for eventual freeing.
*/
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
if (node->curTuple)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.26 2005/05/22 22:30:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.27 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
direction = estate->es_direction;
/*
- * We need not support EvalPlanQual here, since we are not scanning a
- * real relation.
+ * We need not support EvalPlanQual here, since we are not scanning a real
+ * relation.
*/
/*
- * Get the next tuple from the sub-query. We have to be careful to
- * run it in its appropriate memory context.
+ * Get the next tuple from the sub-query. We have to be careful to run it
+ * in its appropriate memory context.
*/
node->sss_SubEState->es_direction = direction;
ExecCheckRTPerms(rte->subquery->rtable);
/*
- * The subquery needs its own EState because it has its own
- * rangetable. It shares our Param ID space, however. XXX if
- * rangetable access were done differently, the subquery could share
- * our EState, which would eliminate some thrashing about in this
- * module...
+ * The subquery needs its own EState because it has its own rangetable. It
+ * shares our Param ID space, however. XXX if rangetable access were done
+ * differently, the subquery could share our EState, which would eliminate
+ * some thrashing about in this module...
*/
sp_estate = CreateExecutorState();
subquerystate->sss_SubEState = sp_estate;
* clean out the upper tuple table
*/
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
- node->ss.ss_ScanTupleSlot = NULL; /* not ours to clear */
+ node->ss.ss_ScanTupleSlot = NULL; /* not ours to clear */
/*
* close down subquery
/*
* ExecReScan doesn't know about my subplan, so I have to do
- * changed-parameter signaling myself. This is just as well, because
- * the subplan has its own memory context in which its chgParam state
- * lives.
+ * changed-parameter signaling myself. This is just as well, because the
+ * subplan has its own memory context in which its chgParam state lives.
*/
if (node->ss.ps.chgParam != NULL)
UpdateChangedParamSet(node->subplan, node->ss.ps.chgParam);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.42 2005/09/22 15:09:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.43 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return slot; /* return empty slot */
/*
- * XXX shouldn't we check here to make sure tuple matches TID
- * list? In runtime-key case this is not certain, is it?
+ * XXX shouldn't we check here to make sure tuple matches TID list? In
+ * runtime-key case this is not certain, is it?
*/
ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
tuple = &(node->tss_htup);
/*
- * ok, now that we have what we need, fetch an tid tuple. if scanning
- * this tid succeeded then return the appropriate heap tuple.. else
- * return NULL.
+ * ok, now that we have what we need, fetch an tid tuple. if scanning this
+ * tid succeeded then return the appropriate heap tuple.. else return
+ * NULL.
*/
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
/*
* store the scanned tuple in the scan tuple slot of the scan
- * state. Eventually we will only do this and not return a
- * tuple. Note: we pass 'false' because tuples returned by
- * amgetnext are pointers onto disk pages and were not created
- * with palloc() and so should not be pfree()'d.
+ * state. Eventually we will only do this and not return a tuple.
+ * Note: we pass 'false' because tuples returned by amgetnext are
+ * pointers onto disk pages and were not created with palloc() and
+ * so should not be pfree()'d.
*/
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
/*
* At this point we have an extra pin on the buffer, because
- * ExecStoreTuple incremented the pin count. Drop our local
- * pin.
+ * ExecStoreTuple incremented the pin count. Drop our local pin.
*/
ReleaseBuffer(buffer);
}
/*
- * if we get here it means the tid scan failed so we are at the end of
- * the scan..
+ * if we get here it means the tid scan failed so we are at the end of the
+ * scan..
*/
return ExecClearTuple(slot);
}
tidstate->tss_TidPtr = -1;
/*
- * get the range table and direction information from the execution
- * state (these are needed to open the relations).
+ * get the range table and direction information from the execution state
+ * (these are needed to open the relations).
*/
rangeTable = estate->es_range_table;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.47 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.48 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* now loop, returning only non-duplicate tuples. We assume that the
* tuples arrive in sorted order so we can detect duplicates easily.
*
- * We return the first tuple from each group of duplicates (or the last
- * tuple of each group, when moving backwards). At either end of the
- * subplan, clear the result slot so that we correctly return the
- * first/last tuple when reversing direction.
+ * We return the first tuple from each group of duplicates (or the last tuple
+ * of each group, when moving backwards). At either end of the subplan,
+ * clear the result slot so that we correctly return the first/last tuple
+ * when reversing direction.
*/
for (;;)
{
break;
/*
- * Else test if the new tuple and the previously returned tuple
- * match. If so then we loop back and fetch another new tuple
- * from the subplan.
+ * Else test if the new tuple and the previously returned tuple match.
+ * If so then we loop back and fetch another new tuple from the
+ * subplan.
*/
if (!execTuplesMatch(slot, resultTupleSlot,
plannode->numCols, plannode->uniqColIdx,
}
/*
- * We have a new tuple different from the previous saved tuple (if
- * any). Save it and return it. We must copy it because the source
- * subplan won't guarantee that this source tuple is still accessible
- * after fetching the next source tuple.
+ * We have a new tuple different from the previous saved tuple (if any).
+ * Save it and return it. We must copy it because the source subplan
+ * won't guarantee that this source tuple is still accessible after
+ * fetching the next source tuple.
*/
return ExecCopySlot(resultTupleSlot, slot);
}
/*
* Miscellaneous initialization
*
- * Unique nodes have no ExprContext initialization because they never
- * call ExecQual or ExecProject. But they do need a per-tuple memory
- * context anyway for calling execTuplesMatch.
+ * Unique nodes have no ExprContext initialization because they never call
+ * ExecQual or ExecProject. But they do need a per-tuple memory context
+ * anyway for calling execTuplesMatch.
*/
uniquestate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
outerPlanState(uniquestate) = ExecInitNode(outerPlan(node), estate);
/*
- * unique nodes do no projections, so initialize projection info for
- * this node appropriately
+ * unique nodes do no projections, so initialize projection info for this
+ * node appropriately
*/
ExecAssignResultTypeFromOuterPlan(&uniquestate->ps);
uniquestate->ps.ps_ProjInfo = NULL;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.142 2005/10/01 18:43:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.143 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void _SPI_prepare_plan(const char *src, _SPI_plan *plan);
static int _SPI_execute_plan(_SPI_plan *plan,
- Datum *Values, const char *Nulls,
- Snapshot snapshot, Snapshot crosscheck_snapshot,
- bool read_only, long tcount);
+ Datum *Values, const char *Nulls,
+ Snapshot snapshot, Snapshot crosscheck_snapshot,
+ bool read_only, long tcount);
-static int _SPI_pquery(QueryDesc *queryDesc, long tcount);
+static int _SPI_pquery(QueryDesc *queryDesc, long tcount);
static void _SPI_error_callback(void *arg);
int newdepth;
/*
- * When procedure called by Executor _SPI_curid expected to be equal
- * to _SPI_connected
+ * When procedure called by Executor _SPI_curid expected to be equal to
+ * _SPI_connected
*/
if (_SPI_curid != _SPI_connected)
return SPI_ERROR_CONNECT;
_SPI_current->processed = 0;
_SPI_current->lastoid = InvalidOid;
_SPI_current->tuptable = NULL;
- _SPI_current->procCxt = NULL; /* in case we fail to create 'em */
+ _SPI_current->procCxt = NULL; /* in case we fail to create 'em */
_SPI_current->execCxt = NULL;
_SPI_current->connectSubid = GetCurrentSubTransactionId();
/*
* Create memory contexts for this procedure
*
- * XXX it would be better to use PortalContext as the parent context, but
- * we may not be inside a portal (consider deferred-trigger
- * execution). Perhaps CurTransactionContext would do? For now it
- * doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
+ * XXX it would be better to use PortalContext as the parent context, but we
+ * may not be inside a portal (consider deferred-trigger execution).
+ * Perhaps CurTransactionContext would do? For now it doesn't matter
+ * because we clean up explicitly in AtEOSubXact_SPI().
*/
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Proc",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
_SPI_current->execCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Exec",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
/* ... and switch to procedure's context */
_SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt);
SPI_tuptable = NULL;
/*
- * After _SPI_begin_call _SPI_connected == _SPI_curid. Now we are
- * closing connection to SPI and returning to upper Executor and so
- * _SPI_connected must be equal to _SPI_curid.
+ * After _SPI_begin_call _SPI_connected == _SPI_curid. Now we are closing
+ * connection to SPI and returning to upper Executor and so _SPI_connected
+ * must be equal to _SPI_curid.
*/
_SPI_connected--;
_SPI_curid--;
AtEOXact_SPI(bool isCommit)
{
/*
- * Note that memory contexts belonging to SPI stack entries will be
- * freed automatically, so we can ignore them here. We just need to
- * restore our static variables to initial state.
+ * Note that memory contexts belonging to SPI stack entries will be freed
+ * automatically, so we can ignore them here. We just need to restore our
+ * static variables to initial state.
*/
if (isCommit && _SPI_connected != -1)
ereport(WARNING,
/*
* Pop the stack entry and reset global variables. Unlike
- * SPI_finish(), we don't risk switching to memory contexts that
- * might be already gone.
+ * SPI_finish(), we don't risk switching to memory contexts that might
+ * be already gone.
*/
_SPI_connected--;
_SPI_curid = _SPI_connected;
mtuple = heap_formtuple(rel->rd_att, v, n);
/*
- * copy the identification info of the old tuple: t_ctid, t_self,
- * and OID (if any)
+ * copy the identification info of the old tuple: t_ctid, t_self, and
+ * OID (if any)
*/
mtuple->t_data->t_ctid = tuple->t_data->t_ctid;
mtuple->t_self = tuple->t_self;
getTypeOutputInfo(typoid, &foutoid, &typisvarlena);
/*
- * If we have a toasted datum, forcibly detoast it here to avoid
- * memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid memory
+ * leakage inside the type's output routine.
*/
if (typisvarlena)
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
char *
SPI_getnspname(Relation rel)
{
- return get_namespace_name(RelationGetNamespace(rel));
+ return get_namespace_name(RelationGetNamespace(rel));
}
void *
portal->cursorOptions |= CURSOR_OPT_NO_SCROLL;
/*
- * Set up the snapshot to use. (PortalStart will do CopySnapshot,
- * so we skip that here.)
+ * Set up the snapshot to use. (PortalStart will do CopySnapshot, so we
+ * skip that here.)
*/
if (read_only)
snapshot = ActiveSnapshot;
tuptable->free = 256;
tuptable->alloced += tuptable->free;
tuptable->vals = (HeapTuple *) repalloc(tuptable->vals,
- tuptable->alloced * sizeof(HeapTuple));
+ tuptable->alloced * sizeof(HeapTuple));
}
tuptable->vals[tuptable->alloced - tuptable->free] =
int nargs = plan->nargs;
/*
- * Increment CommandCounter to see changes made by now. We must do
- * this to be sure of seeing any schema changes made by a just-preceding
- * SPI command. (But we don't bother advancing the snapshot, since the
+ * Increment CommandCounter to see changes made by now. We must do this
+ * to be sure of seeing any schema changes made by a just-preceding SPI
+ * command. (But we don't bother advancing the snapshot, since the
* planner generally operates under SnapshotNow rules anyway.)
*/
CommandCounterIncrement();
/*
* Do parse analysis and rule rewrite for each raw parsetree.
*
- * We save the querytrees from each raw parsetree as a separate
- * sublist. This allows _SPI_execute_plan() to know where the
- * boundaries between original queries fall.
+ * We save the querytrees from each raw parsetree as a separate sublist.
+ * This allows _SPI_execute_plan() to know where the boundaries between
+ * original queries fall.
*/
query_list_list = NIL;
plan_list = NIL;
volatile int res = 0;
volatile uint32 my_processed = 0;
volatile Oid my_lastoid = InvalidOid;
- SPITupleTable * volatile my_tuptable = NULL;
+ SPITupleTable *volatile my_tuptable = NULL;
Snapshot saveActiveSnapshot;
/* Be sure to restore ActiveSnapshot on error exit */
if (read_only && !QueryIsReadOnly(queryTree))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL statement name */
- errmsg("%s is not allowed in a non-volatile function",
- CreateQueryTag(queryTree))));
+ /* translator: %s is a SQL statement name */
+ errmsg("%s is not allowed in a non-volatile function",
+ CreateQueryTag(queryTree))));
+
/*
* If not read-only mode, advance the command counter before
* each command.
}
FreeSnapshot(ActiveSnapshot);
ActiveSnapshot = NULL;
+
/*
* The last canSetTag query sets the auxiliary values returned
* to the caller. Be careful to free any tuptables not
{
case CMD_SELECT:
res = SPI_OK_SELECT;
- if (queryDesc->parsetree->into) /* select into table? */
+ if (queryDesc->parsetree->into) /* select into table? */
{
res = SPI_OK_SELINTO;
- queryDesc->dest = None_Receiver; /* don't output results */
+ queryDesc->dest = None_Receiver; /* don't output results */
}
else if (queryDesc->dest->mydest != SPI)
{
int syntaxerrposition;
/*
- * If there is a syntax error position, convert to internal syntax
- * error; otherwise treat the query as an item of context stack
+ * If there is a syntax error position, convert to internal syntax error;
+ * otherwise treat the query as an item of context stack
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
dest);
/*
- * Think not to combine this store with the preceding function call.
- * If the portal contains calls to functions that use SPI, then
- * SPI_stack is likely to move around while the portal runs. When
- * control returns, _SPI_current will point to the correct stack
- * entry... but the pointer may be different than it was beforehand.
- * So we must be sure to re-fetch the pointer after the function call
- * completes.
+ * Think not to combine this store with the preceding function call. If
+ * the portal contains calls to functions that use SPI, then SPI_stack is
+ * likely to move around while the portal runs. When control returns,
+ * _SPI_current will point to the correct stack entry... but the pointer
+ * may be different than it was beforehand. So we must be sure to re-fetch
+ * the pointer after the function call completes.
*/
_SPI_current->processed = nfetched;
parentcxt = _SPI_current->procCxt;
else if (location == _SPI_CPLAN_TOPCXT)
parentcxt = TopMemoryContext;
- else /* (this case not currently used) */
+ else
+ /* (this case not currently used) */
parentcxt = CurrentMemoryContext;
/*
- * Create a memory context for the plan. We don't expect the plan to
- * be very large, so use smaller-than-default alloc parameters.
+ * Create a memory context for the plan. We don't expect the plan to be
+ * very large, so use smaller-than-default alloc parameters.
*/
plancxt = AllocSetContextCreate(parentcxt,
"SPI Plan",
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/lib/dllist.c,v 1.31 2005/01/18 22:59:32 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/lib/dllist.c,v 1.32 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
e->dle_prev = NULL;
l->dll_head = e;
- if (l->dll_tail == NULL) /* if this is first element added */
+ if (l->dll_tail == NULL) /* if this is first element added */
l->dll_tail = e;
}
e->dle_next = NULL;
l->dll_tail = e;
- if (l->dll_head == NULL) /* if this is first element added */
+ if (l->dll_head == NULL) /* if this is first element added */
l->dll_head = e;
}
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.41 2004/12/31 21:59:48 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.42 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(str != NULL);
/*
- * If there's hardly any space, don't bother trying, just fail to make
- * the caller enlarge the buffer first.
+ * If there's hardly any space, don't bother trying, just fail to make the
+ * caller enlarge the buffer first.
*/
avail = str->maxlen - str->len - 1;
if (avail < 16)
/*
* Assert check here is to catch buggy vsnprintf that overruns the
- * specified buffer length. Solaris 7 in 64-bit mode is an example of
- * a platform with such a bug.
+ * specified buffer length. Solaris 7 in 64-bit mode is an example of a
+ * platform with such a bug.
*/
#ifdef USE_ASSERT_CHECKING
str->data[str->maxlen - 1] = '\0';
Assert(str->data[str->maxlen - 1] == '\0');
/*
- * Note: some versions of vsnprintf return the number of chars
- * actually stored, but at least one returns -1 on failure. Be
- * conservative about believing whether the print worked.
+ * Note: some versions of vsnprintf return the number of chars actually
+ * stored, but at least one returns -1 on failure. Be conservative about
+ * believing whether the print worked.
*/
if (nprinted >= 0 && nprinted < avail - 1)
{
str->len += datalen;
/*
- * Keep a trailing null in place, even though it's probably useless
- * for binary data...
+ * Keep a trailing null in place, even though it's probably useless for
+ * binary data...
*/
str->data[str->len] = '\0';
}
int newlen;
/*
- * Guard against ridiculous "needed" values, which can occur if we're
- * fed bogus data. Without this, we can get an overflow or infinite
- * loop in the following.
+ * Guard against ridiculous "needed" values, which can occur if we're fed
+ * bogus data. Without this, we can get an overflow or infinite loop in
+ * the following.
*/
if (needed < 0 ||
((Size) needed) >= (MaxAllocSize - (Size) str->len))
return; /* got enough space already */
/*
- * We don't want to allocate just a little more space with each
- * append; for efficiency, double the buffer size each time it
- * overflows. Actually, we might need to more than double it if
- * 'needed' is big...
+ * We don't want to allocate just a little more space with each append;
+ * for efficiency, double the buffer size each time it overflows.
+ * Actually, we might need to more than double it if 'needed' is big...
*/
newlen = 2 * str->maxlen;
while (needed > newlen)
newlen = 2 * newlen;
/*
- * Clamp to MaxAllocSize in case we went past it. Note we are
- * assuming here that MaxAllocSize <= INT_MAX/2, else the above loop
- * could overflow. We will still have newlen >= needed.
+ * Clamp to MaxAllocSize in case we went past it. Note we are assuming
+ * here that MaxAllocSize <= INT_MAX/2, else the above loop could
+ * overflow. We will still have newlen >= needed.
*/
if (newlen > (int) MaxAllocSize)
newlen = (int) MaxAllocSize;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.129 2005/10/13 22:55:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.130 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int recv_and_check_password_packet(Port *port);
char *pg_krb_server_keyfile;
-char *pg_krb_srvnam;
+char *pg_krb_srvnam;
bool pg_krb_caseins_users;
char *pg_krb_server_hostname = NULL;
};
static char *pam_passwd = NULL; /* Workaround for Solaris 2.6 brokenness */
-static Port *pam_port_cludge; /* Workaround for passing "Port *port"
- * into pam_passwd_conv_proc */
+static Port *pam_port_cludge; /* Workaround for passing "Port *port" into
+ * pam_passwd_conv_proc */
#endif /* USE_PAM */
#ifdef KRB5
pg_krb5_init(void)
{
krb5_error_code retval;
- char *khostname;
+ char *khostname;
if (pg_krb5_initialised)
return STATUS_OK;
}
/*
- * If no hostname was specified, pg_krb_server_hostname is already
- * NULL. If it's set to blank, force it to NULL.
+ * If no hostname was specified, pg_krb_server_hostname is already NULL.
+ * If it's set to blank, force it to NULL.
*/
khostname = pg_krb_server_hostname;
if (khostname && khostname[0] == '\0')
{
ereport(LOG,
(errmsg("Kerberos sname_to_principal(\"%s\", \"%s\") returned error %d",
- khostname ? khostname : "localhost", pg_krb_srvnam, retval)));
+ khostname ? khostname : "localhost", pg_krb_srvnam, retval)));
com_err("postgres", retval,
- "while getting server principal for server \"%s\" for service \"%s\"",
+ "while getting server principal for server \"%s\" for service \"%s\"",
khostname ? khostname : "localhost", pg_krb_srvnam);
krb5_kt_close(pg_krb5_context, pg_krb5_keytab);
krb5_free_context(pg_krb5_context);
return ret;
}
-
#else
static int
/*
* If we failed due to EOF from client, just quit; there's no point in
- * trying to send a message to the client, and not much point in
- * logging the failure in the postmaster log. (Logging the failure
- * might be desirable, were it not for the fact that libpq closes the
- * connection unceremoniously if challenged for a password when it
- * hasn't got one to send. We'll get a useless log entry for every
- * psql connection under password auth, even if it's perfectly
- * successful, if we log STATUS_EOF events.)
+ * trying to send a message to the client, and not much point in logging
+ * the failure in the postmaster log. (Logging the failure might be
+ * desirable, were it not for the fact that libpq closes the connection
+ * unceremoniously if challenged for a password when it hasn't got one to
+ * send. We'll get a useless log entry for every psql connection under
+ * password auth, even if it's perfectly successful, if we log STATUS_EOF
+ * events.)
*/
if (status == STATUS_EOF)
proc_exit(0);
/*
* Get the authentication method to use for this frontend/database
- * combination. Note: a failure return indicates a problem with the
- * hba config file, not with the request. hba.c should have dropped
- * an error message into the postmaster logfile if it failed.
+ * combination. Note: a failure return indicates a problem with the hba
+ * config file, not with the request. hba.c should have dropped an error
+ * message into the postmaster logfile if it failed.
*/
if (hba_getauthmethod(port) != STATUS_OK)
ereport(FATAL,
/*
* This could have come from an explicit "reject" entry in
* pg_hba.conf, but more likely it means there was no matching
- * entry. Take pity on the poor user and issue a helpful
- * error message. NOTE: this is not a security breach,
- * because all the info reported here is known at the frontend
- * and must be assumed known to bad guys. We're merely helping
- * out the less clueful good guys.
+ * entry. Take pity on the poor user and issue a helpful error
+ * message. NOTE: this is not a security breach, because all the
+ * info reported here is known at the frontend and must be assumed
+ * known to bad guys. We're merely helping out the less clueful
+ * good guys.
*/
{
char hostinfo[NI_MAXHOST];
#ifdef USE_SSL
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
- hostinfo, port->user_name, port->database_name,
- port->ssl ? _("SSL on") : _("SSL off"))));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
+ hostinfo, port->user_name, port->database_name,
+ port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
hostinfo, port->user_name, port->database_name)));
#endif
break;
if (setsockopt(port->sock, 0, LOCAL_CREDS, &on, sizeof(on)) < 0)
ereport(FATAL,
(errcode_for_socket_access(),
- errmsg("could not enable credential reception: %m")));
+ errmsg("could not enable credential reception: %m")));
#endif
sendAuthRequest(port, AUTH_REQ_SCM_CREDS);
pq_endmessage(&buf);
/*
- * Flush message so client will see it, except for AUTH_REQ_OK, which
- * need not be sent until we are ready for queries.
+ * Flush message so client will see it, except for AUTH_REQ_OK, which need
+ * not be sent until we are ready for queries.
*/
if (areq != AUTH_REQ_OK)
pq_flush();
if (!appdata_ptr)
{
/*
- * Workaround for Solaris 2.6 where the PAM library is broken and
- * does not pass appdata_ptr to the conversation routine
+ * Workaround for Solaris 2.6 where the PAM library is broken and does
+ * not pass appdata_ptr to the conversation routine
*/
appdata_ptr = pam_passwd;
}
/*
- * Password wasn't passed to PAM the first time around - let's go ask
- * the client to send a password, which we then stuff into PAM.
+ * Password wasn't passed to PAM the first time around - let's go ask the
+ * client to send a password, which we then stuff into PAM.
*/
if (strlen(appdata_ptr) == 0)
{
{
/*
* If the client just disconnects without offering a password,
- * don't make a log entry. This is legal per protocol spec
- * and in fact commonly done by psql, so complaining just
- * clutters the log.
+ * don't make a log entry. This is legal per protocol spec and in
+ * fact commonly done by psql, so complaining just clutters the
+ * log.
*/
if (mtype != EOF)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("expected password response, got message type %d",
- mtype)));
+ errmsg("expected password response, got message type %d",
+ mtype)));
return NULL; /* EOF or bad message type */
}
}
}
/*
- * Apply sanity check: password packet length should agree with length
- * of contained string. Note it is safe to use strlen here because
+ * Apply sanity check: password packet length should agree with length of
+ * contained string. Note it is safe to use strlen here because
* StringInfo is guaranteed to have an appended '\0'.
*/
if (strlen(buf.data) + 1 != buf.len)
/*
* Return the received string. Note we do not attempt to do any
- * character-set conversion on it; since we don't yet know the
- * client's encoding, there wouldn't be much point.
+ * character-set conversion on it; since we don't yet know the client's
+ * encoding, there wouldn't be much point.
*/
return buf.data;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.78 2005/06/13 02:26:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.79 2005/10/15 02:49:17 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
ALLOCSET_DEFAULT_INITSIZE, \
ALLOCSET_DEFAULT_MAXSIZE); \
} while (0)
-
+
static int newLOfd(LargeObjectDesc *lobjCookie);
static void deleteLOfd(int fd);
if ((cookies[fd]->flags & IFS_WRLOCK) == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("large object descriptor %d was not opened for writing",
- fd)));
+ errmsg("large object descriptor %d was not opened for writing",
+ fd)));
Assert(fscxt != NULL);
currentContext = MemoryContextSwitchTo(fscxt);
}
/*
- * We assume we do not need to switch contexts for inv_tell. That is
- * true for now, but is probably more than this module ought to
- * assume...
+ * We assume we do not need to switch contexts for inv_tell. That is true
+ * for now, but is probably more than this module ought to assume...
*/
PG_RETURN_INT32(inv_tell(cookies[fd]));
}
}
/*
- * inv_drop does not need a context switch, indeed it doesn't touch
- * any LO-specific data structures at all. (Again, that's probably
- * more than this module ought to be assuming.)
+ * inv_drop does not need a context switch, indeed it doesn't touch any
+ * LO-specific data structures at all. (Again, that's probably more than
+ * this module ought to be assuming.)
*/
PG_RETURN_INT32(inv_drop(lobjId));
}
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to use server-side lo_import()"),
+ errmsg("must be superuser to use server-side lo_import()"),
errhint("Anyone can use the client-side lo_import() provided by libpq.")));
#endif
/*
- * We don't actually need to switch into fscxt, but create it anyway
- * to ensure that AtEOXact_LargeObject knows there is state to clean up
+ * We don't actually need to switch into fscxt, but create it anyway to
+ * ensure that AtEOXact_LargeObject knows there is state to clean up
*/
CreateFSContext();
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to use server-side lo_export()"),
+ errmsg("must be superuser to use server-side lo_export()"),
errhint("Anyone can use the client-side lo_export() provided by libpq.")));
#endif
/*
- * We don't actually need to switch into fscxt, but create it anyway
- * to ensure that AtEOXact_LargeObject knows there is state to clean up
+ * We don't actually need to switch into fscxt, but create it anyway to
+ * ensure that AtEOXact_LargeObject knows there is state to clean up
*/
CreateFSContext();
/*
* open the file to be written to
*
- * Note: we reduce backend's normal 077 umask to the slightly friendlier
- * 022. This code used to drop it all the way to 0, but creating
- * world-writable export files doesn't seem wise.
+ * Note: we reduce backend's normal 077 umask to the slightly friendlier 022.
+ * This code used to drop it all the way to 0, but creating world-writable
+ * export files doesn't seem wise.
*/
nbytes = VARSIZE(filename) - VARHDRSZ;
if (nbytes >= MAXPGPATH)
currentContext = MemoryContextSwitchTo(fscxt);
/*
- * Close LO fds and clear cookies array so that LO fds are no longer
- * good. On abort we skip the close step.
+ * Close LO fds and clear cookies array so that LO fds are no longer good.
+ * On abort we skip the close step.
*/
for (i = 0; i < cookies_size; i++)
{
else
{
/*
- * Make sure we do not call inv_close twice if it errors
- * out for some reason. Better a leak than a crash.
+ * Make sure we do not call inv_close twice if it errors out
+ * for some reason. Better a leak than a crash.
*/
deleteLOfd(i);
inv_close(lo);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.58 2005/07/04 04:51:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.59 2005/10/15 02:49:17 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
#define ROOT_CERT_FILE "root.crt"
#define SERVER_CERT_FILE "server.crt"
-#define SERVER_PRIVATE_KEY_FILE "server.key"
+#define SERVER_PRIVATE_KEY_FILE "server.key"
static DH *load_dh_file(int keylength);
static DH *load_dh_buffer(const char *, size_t);
case SSL_ERROR_WANT_WRITE:
#ifdef WIN32
pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl),
- (err==SSL_ERROR_WANT_READ) ?
- FD_READ|FD_CLOSE : FD_WRITE|FD_CLOSE);
+ (err == SSL_ERROR_WANT_READ) ?
+ FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE);
#endif
goto rloop;
case SSL_ERROR_SYSCALL:
if (port->ssl->state != SSL_ST_OK)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("SSL failed to send renegotiation request")));
+ errmsg("SSL failed to send renegotiation request")));
port->ssl->state |= SSL_ST_ACCEPT;
SSL_do_handshake(port->ssl);
if (port->ssl->state != SSL_ST_OK)
case SSL_ERROR_WANT_WRITE:
#ifdef WIN32
pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl),
- (err==SSL_ERROR_WANT_READ) ?
- FD_READ|FD_CLOSE : FD_WRITE|FD_CLOSE);
+ (err == SSL_ERROR_WANT_READ) ?
+ FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE);
#endif
goto wloop;
case SSL_ERROR_SYSCALL:
static bool my_bio_initialized = false;
static BIO_METHOD my_bio_methods;
-static int (*std_sock_read) (BIO *h, char *buf, int size);
+static int (*std_sock_read) (BIO *h, char *buf, int size);
static int
my_sock_read(BIO *h, char *buf, int size)
{
- int res;
+ int res;
prepare_for_client_read();
static int
my_SSL_set_fd(SSL *s, int fd)
{
- int ret=0;
- BIO *bio=NULL;
+ int ret = 0;
+ BIO *bio = NULL;
- bio=BIO_new(my_BIO_s_socket());
+ bio = BIO_new(my_BIO_s_socket());
if (bio == NULL)
{
- SSLerr(SSL_F_SSL_SET_FD,ERR_R_BUF_LIB);
+ SSLerr(SSL_F_SSL_SET_FD, ERR_R_BUF_LIB);
goto err;
}
- BIO_set_fd(bio,fd,BIO_NOCLOSE);
- SSL_set_bio(s,bio,bio);
- ret=1;
+ BIO_set_fd(bio, fd, BIO_NOCLOSE);
+ SSL_set_bio(s, bio, bio);
+ ret = 1;
err:
- return(ret);
+ return (ret);
}
/*
(codes & DH_CHECK_P_NOT_SAFE_PRIME))
{
elog(LOG,
- "DH error (%s): neither suitable generator or safe prime",
+ "DH error (%s): neither suitable generator or safe prime",
fnbuf);
return NULL;
}
if (r == NULL || 8 * DH_size(r) < keylength)
{
ereport(DEBUG2,
- (errmsg_internal("DH: generating parameters (%d bits)....",
- keylength)));
+ (errmsg_internal("DH: generating parameters (%d bits)....",
+ keylength)));
r = DH_generate_parameters(keylength, DH_GENERATOR_2, NULL, NULL);
}
SSL_FILETYPE_PEM))
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("could not load server certificate file \"%s\": %s",
- SERVER_CERT_FILE, SSLerrmessage())));
+ errmsg("could not load server certificate file \"%s\": %s",
+ SERVER_CERT_FILE, SSLerrmessage())));
if (stat(SERVER_PRIVATE_KEY_FILE, &buf) == -1)
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not access private key file \"%s\": %m",
- SERVER_PRIVATE_KEY_FILE)));
+ errmsg("could not access private key file \"%s\": %m",
+ SERVER_PRIVATE_KEY_FILE)));
/*
* Require no public access to key file.
*
- * XXX temporarily suppress check when on Windows, because there may
- * not be proper support for Unix-y file permissions. Need to
- * think of a reasonable check to apply on Windows. (See also the
- * data directory permission check in postmaster.c)
+ * XXX temporarily suppress check when on Windows, because there may not
+ * be proper support for Unix-y file permissions. Need to think of a
+ * reasonable check to apply on Windows. (See also the data directory
+ * permission check in postmaster.c)
*/
#if !defined(WIN32) && !defined(__CYGWIN__)
if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) ||
buf.st_uid != geteuid())
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("unsafe permissions on private key file \"%s\"",
- SERVER_PRIVATE_KEY_FILE),
+ errmsg("unsafe permissions on private key file \"%s\"",
+ SERVER_PRIVATE_KEY_FILE),
errdetail("File must be owned by the database user and must have no permissions for \"group\" or \"other\".")));
#endif
case SSL_ERROR_WANT_WRITE:
#ifdef WIN32
pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl),
- (err==SSL_ERROR_WANT_READ) ?
- FD_READ|FD_CLOSE|FD_ACCEPT : FD_WRITE|FD_CLOSE);
+ (err == SSL_ERROR_WANT_READ) ?
+ FD_READ | FD_CLOSE | FD_ACCEPT : FD_WRITE | FD_CLOSE);
#endif
goto aloop;
case SSL_ERROR_SYSCALL:
else
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("could not accept SSL connection: EOF detected")));
+ errmsg("could not accept SSL connection: EOF detected")));
break;
case SSL_ERROR_SSL:
ereport(COMMERROR,
case SSL_ERROR_ZERO_RETURN:
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("could not accept SSL connection: EOF detected")));
+ errmsg("could not accept SSL connection: EOF detected")));
break;
default:
ereport(COMMERROR,
port->peer_dn, sizeof(port->peer_dn));
port->peer_dn[sizeof(port->peer_dn) - 1] = '\0';
X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer),
- NID_commonName, port->peer_cn, sizeof(port->peer_cn));
+ NID_commonName, port->peer_cn, sizeof(port->peer_cn));
port->peer_cn[sizeof(port->peer_cn) - 1] = '\0';
}
ereport(DEBUG2,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/crypt.c,v 1.65 2005/08/15 02:40:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/crypt.c,v 1.66 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
TimestampTz vuntil;
vuntil = DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in,
- CStringGetDatum(valuntil),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(-1)));
+ CStringGetDatum(valuntil),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(-1)));
if (vuntil < GetCurrentTimestamp())
retval = STATUS_ERROR;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.147 2005/08/11 21:11:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.148 2005/10/15 02:49:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int role_length;
static void tokenize_file(const char *filename, FILE *file,
- List **lines, List **line_nums);
+ List **lines, List **line_nums);
static char *tokenize_inc_file(const char *outer_filename,
- const char *inc_filename);
+ const char *inc_filename);
/*
* isblank() exists in the ISO C99 spec, but it's not very portable yet,
}
/*
- * Build a token in buf of next characters up to EOF, EOL, unquoted
- * comma, or unquoted whitespace.
+ * Build a token in buf of next characters up to EOF, EOL, unquoted comma,
+ * or unquoted whitespace.
*/
while (c != EOF && c != '\n' &&
(!pg_isblank(c) || in_quote == true))
*buf = '\0';
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("authentication file token too long, skipping: \"%s\"",
- start_buf)));
+ errmsg("authentication file token too long, skipping: \"%s\"",
+ start_buf)));
/* Discard remainder of line */
while ((c = getc(fp)) != EOF && c != '\n')
;
}
/*
- * Put back the char right after the token (critical in case it is
- * EOL, since we need to detect end-of-line at next call).
+ * Put back the char right after the token (critical in case it is EOL,
+ * since we need to detect end-of-line at next call).
*/
if (c != EOF)
ungetc(c, fp);
foreach(token, token_list)
{
- int oldlen = strlen(comma_str);
- int needed;
+ int oldlen = strlen(comma_str);
+ int needed;
needed = oldlen + strlen(lfirst(token)) + 1;
if (oldlen > 0)
/*
* Lookup a role name in the pg_auth file
*/
-List **
+List **
get_role_line(const char *role)
{
/* On some versions of Solaris, bsearch of zero items dumps core */
return true;
/*
- * skip over the role name, password, valuntil, examine all the
- * membership entries
+ * skip over the role name, password, valuntil, examine all the membership
+ * entries
*/
if (list_length(*line) < 4)
return false;
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid IP address \"%s\" in file \"%s\" line %d: %s",
- token, HbaFileName, line_num,
- gai_strerror(ret))));
+ errmsg("invalid IP address \"%s\" in file \"%s\" line %d: %s",
+ token, HbaFileName, line_num,
+ gai_strerror(ret))));
if (cidr_slash)
*cidr_slash = '/';
if (gai_result)
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid IP mask \"%s\" in file \"%s\" line %d: %s",
- token, HbaFileName, line_num,
- gai_strerror(ret))));
+ errmsg("invalid IP mask \"%s\" in file \"%s\" line %d: %s",
+ token, HbaFileName, line_num,
+ gai_strerror(ret))));
if (gai_result)
freeaddrinfo_all(hints.ai_family, gai_result);
goto hba_other_error;
if (addr.ss_family != port->raddr.addr.ss_family)
{
/*
- * Wrong address family. We allow only one case: if the file
- * has IPv4 and the port is IPv6, promote the file address to
- * IPv6 and try to match that way.
+ * Wrong address family. We allow only one case: if the file has
+ * IPv4 and the port is IPv6, promote the file address to IPv6 and
+ * try to match that way.
*/
#ifdef HAVE_IPV6
if (addr.ss_family == AF_INET &&
if (line_item)
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid entry in file \"%s\" at line %d, token \"%s\"",
- HbaFileName, line_num,
- (char *) lfirst(line_item))));
+ errmsg("invalid entry in file \"%s\" at line %d, token \"%s\"",
+ HbaFileName, line_num,
+ (char *) lfirst(line_item))));
else
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("missing field in file \"%s\" at end of line %d",
- HbaFileName, line_num)));
+ errmsg("missing field in file \"%s\" at end of line %d",
+ HbaFileName, line_num)));
/* Come here if suitable message already logged */
hba_other_error:
/* Discard any old data */
if (role_lines || role_line_nums)
free_lines(&role_lines, &role_line_nums);
- if (role_sorted)
+ if (role_sorted)
pfree(role_sorted);
role_sorted = NULL;
role_length = 0;
role_length = list_length(role_lines);
if (role_length)
{
- int i = 0;
- ListCell *line;
+ int i = 0;
+ ListCell *line;
/* We assume the flat file was written already-sorted */
role_sorted = palloc(role_length * sizeof(List *));
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("cannot use Ident authentication without usermap field")));
+ errmsg("cannot use Ident authentication without usermap field")));
found_entry = false;
}
else if (strcmp(usermap_name, "sameuser\n") == 0 ||
interpret_ident_response(const char *ident_response,
char *ident_user)
{
- const char *cursor = ident_response; /* Cursor into
- * *ident_response */
+ const char *cursor = ident_response; /* Cursor into *ident_response */
/*
- * Ident's response, in the telnet tradition, should end in crlf
- * (\r\n).
+ * Ident's response, in the telnet tradition, should end in crlf (\r\n).
*/
if (strlen(ident_response) < 2)
return false;
else
{
/*
- * It's a USERID response. Good. "cursor" should be
- * pointing to the colon that precedes the operating
- * system type.
+ * It's a USERID response. Good. "cursor" should be pointing
+ * to the colon that precedes the operating system type.
*/
if (*cursor != ':')
return false;
const SockAddr local_addr,
char *ident_user)
{
- int sock_fd, /* File descriptor for socket on which we
- * talk to Ident */
- rc; /* Return code from a locally called
- * function */
+ int sock_fd, /* File descriptor for socket on which we talk
+ * to Ident */
+ rc; /* Return code from a locally called function */
bool ident_return;
char remote_addr_s[NI_MAXHOST];
char remote_port[NI_MAXSERV];
hints;
/*
- * Might look a little weird to first convert it to text and then back
- * to sockaddr, but it's protocol independent.
+ * Might look a little weird to first convert it to text and then back to
+ * sockaddr, but it's protocol independent.
*/
getnameinfo_all(&remote_addr.addr, remote_addr.salen,
remote_addr_s, sizeof(remote_addr_s),
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not create socket for Ident connection: %m")));
+ errmsg("could not create socket for Ident connection: %m")));
ident_return = false;
goto ident_inet_done;
}
/*
- * Bind to the address which the client originally contacted,
- * otherwise the ident server won't be able to match up the right
- * connection. This is necessary if the PostgreSQL server is running
- * on an IP alias.
+ * Bind to the address which the client originally contacted, otherwise
+ * the ident server won't be able to match up the right connection. This
+ * is necessary if the PostgreSQL server is running on an IP alias.
*/
rc = bind(sock_fd, la->ai_addr, la->ai_addrlen);
if (rc != 0)
ident_return = interpret_ident_response(ident_response, ident_user);
if (!ident_return)
ereport(LOG,
- (errmsg("invalidly formatted response from Ident server: \"%s\"",
- ident_response)));
+ (errmsg("invalidly formatted response from Ident server: \"%s\"",
+ ident_response)));
ident_inet_done:
if (sock_fd >= 0)
StrNCpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
return true;
-
#elif defined(SO_PEERCRED)
/* Linux style: use getsockopt(SO_PEERCRED) */
struct ucred peercred;
StrNCpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
return true;
-
#elif defined(HAVE_STRUCT_CMSGCRED) || defined(HAVE_STRUCT_FCRED) || (defined(HAVE_STRUCT_SOCKCRED) && defined(LOCAL_CREDS))
struct msghdr msg;
/*
* The one character which is received here is not meaningful; its
- * purposes is only to make sure that recvmsg() blocks long enough for
- * the other side to send its credentials.
+ * purposes is only to make sure that recvmsg() blocks long enough for the
+ * other side to send its credentials.
*/
iov.iov_base = &buf;
iov.iov_len = 1;
StrNCpy(ident_user, pw->pw_name, IDENT_USERNAME_MAX + 1);
return true;
-
#else
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/md5.c,v 1.28 2005/02/23 22:46:17 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/md5.c,v 1.29 2005/10/15 02:49:18 momjian Exp $
*/
bool ret;
/*
- * Place salt at the end because it may be known by users trying to
- * crack the MD5 output.
+ * Place salt at the end because it may be known by users trying to crack
+ * the MD5 output.
*/
strcpy(crypt_buf, passwd);
memcpy(crypt_buf + passwd_len, salt, salt_len);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.180 2005/09/24 17:53:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.181 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define PQ_BUFFER_SIZE 8192
static char PqSendBuffer[PQ_BUFFER_SIZE];
-static int PqSendPointer; /* Next index to store a byte in
- * PqSendBuffer */
+static int PqSendPointer; /* Next index to store a byte in PqSendBuffer */
static char PqRecvBuffer[PQ_BUFFER_SIZE];
-static int PqRecvPointer; /* Next index to read a byte from
- * PqRecvBuffer */
+static int PqRecvPointer; /* Next index to read a byte from PqRecvBuffer */
static int PqRecvLength; /* End of data available in PqRecvBuffer */
/*
static void pq_close(int code, Datum arg);
static int internal_putbytes(const char *s, size_t len);
static int internal_flush(void);
+
#ifdef HAVE_UNIX_SOCKETS
static int Lock_AF_UNIX(unsigned short portNumber, char *unixSocketName);
static int Setup_AF_UNIX(void);
secure_close(MyProcPort);
/*
- * Formerly we did an explicit close() here, but it seems better
- * to leave the socket open until the process dies. This allows
- * clients to perform a "synchronous close" if they care --- wait
- * till the transport layer reports connection closure, and you
- * can be sure the backend has exited.
+ * Formerly we did an explicit close() here, but it seems better to
+ * leave the socket open until the process dies. This allows clients
+ * to perform a "synchronous close" if they care --- wait till the
+ * transport layer reports connection closure, and you can be sure the
+ * backend has exited.
*
* We do set sock to -1 to prevent any further I/O, though.
*/
hostName, service, gai_strerror(ret))));
else
ereport(LOG,
- (errmsg("could not translate service \"%s\" to address: %s",
- service, gai_strerror(ret))));
+ (errmsg("could not translate service \"%s\" to address: %s",
+ service, gai_strerror(ret))));
if (addrs)
freeaddrinfo_all(hint.ai_family, addrs);
return STATUS_ERROR;
if (!IS_AF_UNIX(family) && IS_AF_UNIX(addr->ai_family))
{
/*
- * Only set up a unix domain socket when they really asked for
- * it. The service/port is different in that case.
+ * Only set up a unix domain socket when they really asked for it.
+ * The service/port is different in that case.
*/
continue;
}
/*
* Note: This might fail on some OS's, like Linux older than
- * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and
- * map ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all
- * ipv4 connections.
+ * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map
+ * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
+ * connections.
*/
err = bind(fd, addr->ai_addr, addr->ai_addrlen);
if (err < 0)
errmsg("could not bind %s socket: %m",
familyDesc),
(IS_AF_UNIX(addr->ai_family)) ?
- errhint("Is another postmaster already running on port %d?"
- " If not, remove socket file \"%s\" and retry.",
- (int) portNumber, sock_path) :
- errhint("Is another postmaster already running on port %d?"
- " If not, wait a few seconds and retry.",
- (int) portNumber)));
+ errhint("Is another postmaster already running on port %d?"
+ " If not, remove socket file \"%s\" and retry.",
+ (int) portNumber, sock_path) :
+ errhint("Is another postmaster already running on port %d?"
+ " If not, wait a few seconds and retry.",
+ (int) portNumber)));
closesocket(fd);
continue;
}
#endif
/*
- * Select appropriate accept-queue length limit. PG_SOMAXCONN is
- * only intended to provide a clamp on the request on platforms
- * where an overly large request provokes a kernel error (are
- * there any?).
+ * Select appropriate accept-queue length limit. PG_SOMAXCONN is only
+ * intended to provide a clamp on the request on platforms where an
+ * overly large request provokes a kernel error (are there any?).
*/
maxconn = MaxBackends * 2;
if (maxconn > PG_SOMAXCONN)
/*
* Fix socket ownership/permission if requested. Note we must do this
- * before we listen() to avoid a window where unwanted connections
- * could get accepted.
+ * before we listen() to avoid a window where unwanted connections could
+ * get accepted.
*/
Assert(Unix_socket_group);
if (Unix_socket_group[0] != '\0')
}
/*
- * Also apply the current keepalive parameters. If we fail to set
- * a parameter, don't error out, because these aren't universally
+ * Also apply the current keepalive parameters. If we fail to set a
+ * parameter, don't error out, because these aren't universally
* supported. (Note: you might think we need to reset the GUC
- * variables to 0 in such a case, but it's not necessary because
- * the show hooks for these variables report the truth anyway.)
+ * variables to 0 in such a case, but it's not necessary because the
+ * show hooks for these variables report the truth anyway.)
*/
(void) pq_setkeepalivesidle(tcp_keepalives_idle, port);
(void) pq_setkeepalivesinterval(tcp_keepalives_interval, port);
if (sock_path[0] != '\0')
{
/*
- * utime() is POSIX standard, utimes() is a common alternative. If
- * we have neither, there's no way to affect the mod or access
- * time of the socket :-(
+ * utime() is POSIX standard, utimes() is a common alternative. If we
+ * have neither, there's no way to affect the mod or access time of
+ * the socket :-(
*
* In either path, we ignore errors; there's no point in complaining.
*/
continue; /* Ok if interrupted */
/*
- * Careful: an ereport() that tries to write to the client
- * would cause recursion to here, leading to stack overflow
- * and core dump! This message must go *only* to the
- * postmaster log.
+ * Careful: an ereport() that tries to write to the client would
+ * cause recursion to here, leading to stack overflow and core
+ * dump! This message must go *only* to the postmaster log.
*/
ereport(COMMERROR,
(errcode_for_socket_access(),
if (r == 0)
{
/*
- * EOF detected. We used to write a log message here, but
- * it's better to expect the ultimate caller to do that.
+ * EOF detected. We used to write a log message here, but it's
+ * better to expect the ultimate caller to do that.
*/
return EOF;
}
if (len > 0)
{
/*
- * Allocate space for message. If we run out of room (ridiculously
+ * Allocate space for message. If we run out of room (ridiculously
* large message), we will elog(ERROR), but we want to discard the
* message body so as not to lose communication sync.
*/
continue; /* Ok if we were interrupted */
/*
- * Careful: an ereport() that tries to write to the client
- * would cause recursion to here, leading to stack overflow
- * and core dump! This message must go *only* to the
- * postmaster log.
+ * Careful: an ereport() that tries to write to the client would
+ * cause recursion to here, leading to stack overflow and core
+ * dump! This message must go *only* to the postmaster log.
*
* If a client disconnects while we're in the midst of output, we
- * might write quite a bit of data before we get to a safe
- * query abort point. So, suppress duplicate log messages.
+ * might write quite a bit of data before we get to a safe query
+ * abort point. So, suppress duplicate log messages.
*/
if (errno != last_reported_send_errno)
{
if (port->default_keepalives_idle == 0)
{
- socklen_t size = sizeof(port->default_keepalives_idle);
+ socklen_t size = sizeof(port->default_keepalives_idle);
if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPIDLE,
(char *) &port->default_keepalives_idle,
&size) < 0)
{
elog(LOG, "getsockopt(TCP_KEEPIDLE) failed: %m");
- port->default_keepalives_idle = -1; /* don't know */
+ port->default_keepalives_idle = -1; /* don't know */
}
}
if (pq_getkeepalivesidle(port) < 0)
{
if (idle == 0)
- return STATUS_OK; /* default is set but unknown */
+ return STATUS_OK; /* default is set but unknown */
else
return STATUS_ERROR;
}
if (port->default_keepalives_interval == 0)
{
- socklen_t size = sizeof(port->default_keepalives_interval);
+ socklen_t size = sizeof(port->default_keepalives_interval);
if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPINTVL,
(char *) &port->default_keepalives_interval,
&size) < 0)
{
elog(LOG, "getsockopt(TCP_KEEPINTVL) failed: %m");
- port->default_keepalives_interval = -1; /* don't know */
+ port->default_keepalives_interval = -1; /* don't know */
}
}
if (pq_getkeepalivesinterval(port) < 0)
{
if (interval == 0)
- return STATUS_OK; /* default is set but unknown */
+ return STATUS_OK; /* default is set but unknown */
else
return STATUS_ERROR;
}
if (port->default_keepalives_count == 0)
{
- socklen_t size = sizeof(port->default_keepalives_count);
+ socklen_t size = sizeof(port->default_keepalives_count);
if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPCNT,
(char *) &port->default_keepalives_count,
&size) < 0)
{
elog(LOG, "getsockopt(TCP_KEEPCNT) failed: %m");
- port->default_keepalives_count = -1; /* don't know */
+ port->default_keepalives_count = -1; /* don't know */
}
}
if (pq_getkeepalivescount(port) < 0)
{
if (count == 0)
- return STATUS_OK; /* default is set but unknown */
+ return STATUS_OK; /* default is set but unknown */
else
return STATUS_ERROR;
}
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/pqformat.c,v 1.39 2005/09/24 17:53:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqformat.c,v 1.40 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* We stash the message type into the buffer's cursor field, expecting
- * that the pq_sendXXX routines won't touch it. We could
- * alternatively make it the first byte of the buffer contents, but
- * this seems easier.
+ * that the pq_sendXXX routines won't touch it. We could alternatively
+ * make it the first byte of the buffer contents, but this seems easier.
*/
buf->cursor = msgtype;
}
str = &msg->data[msg->cursor];
/*
- * It's safe to use strlen() here because a StringInfo is guaranteed
- * to have a trailing null byte. But check we found a null inside the
+ * It's safe to use strlen() here because a StringInfo is guaranteed to
+ * have a trailing null byte. But check we found a null inside the
* message.
*/
slen = strlen(str);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.40 2005/02/14 23:02:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.41 2005/10/15 02:49:18 momjian Exp $
*
* NOTES
* This shouldn't be in libpq, but the monitor and some other
sigset_t UnBlockSig,
BlockSig,
AuthBlockSig;
-
#else
int UnBlockSig,
BlockSig,
sigfillset(&AuthBlockSig);
/*
- * Unmark those signals that should never be blocked. Some of these
- * signal names don't exist on all platforms. Most do, but might as
- * well ifdef them all for consistency...
+ * Unmark those signals that should never be blocked. Some of these signal
+ * names don't exist on all platforms. Most do, but might as well ifdef
+ * them all for consistency...
*/
#ifdef SIGTRAP
sigdelset(&BlockSig, SIGTRAP);
UnBlockSig = 0;
BlockSig = sigmask(SIGQUIT) |
sigmask(SIGTERM) | sigmask(SIGALRM) |
- /* common signals between two */
+ /* common signals between two */
sigmask(SIGHUP) |
sigmask(SIGINT) | sigmask(SIGUSR1) |
sigmask(SIGUSR2) | sigmask(SIGCHLD) |
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.95 2005/10/13 15:37:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.96 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *pw_name_persist;
/*
- * Place platform-specific startup hacks here. This is the right
- * place to put code that must be executed early in launch of either a
- * postmaster, a standalone backend, or a standalone bootstrap run.
- * Note that this code will NOT be executed when a backend or
- * sub-bootstrap run is forked by the postmaster.
+ * Place platform-specific startup hacks here. This is the right place to
+ * put code that must be executed early in launch of either a postmaster,
+ * a standalone backend, or a standalone bootstrap run. Note that this
+ * code will NOT be executed when a backend or sub-bootstrap run is forked
+ * by the postmaster.
*
- * XXX The need for code here is proof that the platform in question is
- * too brain-dead to provide a standard C execution environment
- * without help. Avoid adding more here, if you can.
+ * XXX The need for code here is proof that the platform in question is too
+ * brain-dead to provide a standard C execution environment without help.
+ * Avoid adding more here, if you can.
*/
#if defined(__alpha) /* no __alpha__ ? */
#endif
/*
- * On some platforms, unaligned memory accesses result in a kernel
- * trap; the default kernel behavior is to emulate the memory
- * access, but this results in a significant performance
- * penalty. We ought to fix PG not to make such unaligned memory
- * accesses, so this code disables the kernel emulation: unaligned
- * accesses will result in SIGBUS instead.
+ * On some platforms, unaligned memory accesses result in a kernel trap;
+ * the default kernel behavior is to emulate the memory access, but this
+ * results in a significant performance penalty. We ought to fix PG not to
+ * make such unaligned memory accesses, so this code disables the kernel
+ * emulation: unaligned accesses will result in SIGBUS instead.
*/
#ifdef NOFIXADE
#endif
/*
- * Not-quite-so-platform-specific startup environment checks. Still
- * best to minimize these.
+ * Not-quite-so-platform-specific startup environment checks. Still best
+ * to minimize these.
*/
/*
- * Remember the physical location of the initially given argv[] array
- * for possible use by ps display. On some platforms, the argv[]
- * storage must be overwritten in order to set the process title for
- * ps. In such cases save_ps_display_args makes and returns a new copy
- * of the argv[] array.
+ * Remember the physical location of the initially given argv[] array for
+ * possible use by ps display. On some platforms, the argv[] storage must
+ * be overwritten in order to set the process title for ps. In such cases
+ * save_ps_display_args makes and returns a new copy of the argv[] array.
*
- * save_ps_display_args may also move the environment strings to make
- * extra room. Therefore this should be done as early as possible
- * during startup, to avoid entanglements with code that might save a
- * getenv() result pointer.
+ * save_ps_display_args may also move the environment strings to make extra
+ * room. Therefore this should be done as early as possible during
+ * startup, to avoid entanglements with code that might save a getenv()
+ * result pointer.
*/
argv = save_ps_display_args(argc, argv);
/*
* Set up locale information from environment. Note that LC_CTYPE and
* LC_COLLATE will be overridden later from pg_control if we are in an
- * already-initialized database. We set them here so that they will
- * be available to fill pg_control during initdb. LC_MESSAGES will
- * get set later during GUC option processing, but we set it here to
- * allow startup error messages to be localized.
+ * already-initialized database. We set them here so that they will be
+ * available to fill pg_control during initdb. LC_MESSAGES will get set
+ * later during GUC option processing, but we set it here to allow startup
+ * error messages to be localized.
*/
set_pglocale_pgservice(argv[0], "postgres");
#ifdef WIN32
/*
- * Windows uses codepages rather than the environment, so we work
- * around that by querying the environment explicitly first for
- * LC_COLLATE and LC_CTYPE. We have to do this because initdb passes
- * those values in the environment. If there is nothing there we fall
- * back on the codepage.
+ * Windows uses codepages rather than the environment, so we work around
+ * that by querying the environment explicitly first for LC_COLLATE and
+ * LC_CTYPE. We have to do this because initdb passes those values in the
+ * environment. If there is nothing there we fall back on the codepage.
*/
if ((env_locale = getenv("LC_COLLATE")) != NULL)
#endif
/*
- * We keep these set to "C" always, except transiently in pg_locale.c;
- * see that file for explanations.
+ * We keep these set to "C" always, except transiently in pg_locale.c; see
+ * that file for explanations.
*/
setlocale(LC_MONETARY, "C");
setlocale(LC_NUMERIC, "C");
setlocale(LC_TIME, "C");
/*
- * Skip permission checks if we're just trying to do --help or
- * --version; otherwise root will get unhelpful failure messages from
- * initdb.
+ * Skip permission checks if we're just trying to do --help or --version;
+ * otherwise root will get unhelpful failure messages from initdb.
*/
if (!(argc > 1
&& (strcmp(argv[1], "--help") == 0 ||
write_stderr("\"root\" execution of the PostgreSQL server is not permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
"possible system security compromise. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "more information on how to properly start the server.\n");
exit(1);
}
#endif /* !__BEOS__ */
/*
- * Also make sure that real and effective uids are the same.
- * Executing Postgres as a setuid program from a root shell is a
- * security hole, since on many platforms a nefarious subroutine
- * could setuid back to root if real uid is root. (Since nobody
- * actually uses Postgres as a setuid program, trying to actively
- * fix this situation seems more trouble than it's worth; we'll
- * just expend the effort to check for it.)
+ * Also make sure that real and effective uids are the same. Executing
+ * Postgres as a setuid program from a root shell is a security hole,
+ * since on many platforms a nefarious subroutine could setuid back to
+ * root if real uid is root. (Since nobody actually uses Postgres as
+ * a setuid program, trying to actively fix this situation seems more
+ * trouble than it's worth; we'll just expend the effort to check for
+ * it.)
*/
if (getuid() != geteuid())
{
"permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
"possible system security compromises. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "more information on how to properly start the server.\n");
exit(1);
}
#endif /* !WIN32 */
/*
* Now dispatch to one of PostmasterMain, PostgresMain, GucInfoMain,
- * SubPostmasterMain, or BootstrapMain depending on the program name
- * (and possibly first argument) we were called with. The lack of
- * consistency here is historical.
+ * SubPostmasterMain, or BootstrapMain depending on the program name (and
+ * possibly first argument) we were called with. The lack of consistency
+ * here is historical.
*/
if (strcmp(get_progname(argv[0]), "postmaster") == 0)
{
/*
* If the first argument begins with "-fork", then invoke
- * SubPostmasterMain. This is used for forking postmaster child
- * processes on systems where we can't simply fork.
+ * SubPostmasterMain. This is used for forking postmaster child processes
+ * on systems where we can't simply fork.
*/
#ifdef EXEC_BACKEND
if (argc > 1 && strncmp(argv[1], "-fork", 5) == 0)
#endif
#ifdef WIN32
+
/*
* Start our win32 signal implementation
*
- * SubPostmasterMain() will do this for itself, but the remaining
- * modes need it here
+ * SubPostmasterMain() will do this for itself, but the remaining modes need
+ * it here
*/
pgwin32_signal_initialize();
#endif
exit(GucInfoMain());
/*
- * Otherwise we're a standalone backend. Invoke PostgresMain,
- * specifying current userid as the "authenticated" Postgres user
- * name.
+ * Otherwise we're a standalone backend. Invoke PostgresMain, specifying
+ * current userid as the "authenticated" Postgres user name.
*/
#ifndef WIN32
pw = getpwuid(geteuid());
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/bitmapset.c,v 1.9 2005/06/15 16:24:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/bitmapset.c,v 1.10 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* Note: we must ensure that any two bitmapsets that are bms_equal() will
* hash to the same value; in practice this means that trailing all-zero
- * words cannot affect the result. The circular-shift-and-XOR hash method
+ * words cannot affect the result. The circular-shift-and-XOR hash method
* used here has this property, so long as we work from back to front.
*
* Note: you might wonder why we bother with the circular shift; at first
* multiword bitmapsets is "a JOIN b JOIN c JOIN d ...", which gives rise
* to rangetables in which base tables and JOIN nodes alternate; so
* bitmapsets of base table RT indexes tend to use only odd-numbered or only
- * even-numbered bits. A straight longitudinal XOR would preserve this
+ * even-numbered bits. A straight longitudinal XOR would preserve this
* property, leading to a much smaller set of possible outputs than if
* we include a shift.
*/
if (a == NULL || a->nwords <= 0)
return 0; /* All empty sets hash to 0 */
- for (wordnum = a->nwords; --wordnum > 0; )
+ for (wordnum = a->nwords; --wordnum > 0;)
{
result ^= a->words[wordnum];
if (result & ((bitmapword) 1 << (BITS_PER_BITMAPWORD - 1)))
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.315 2005/08/01 20:31:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.316 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static BitmapAnd *
_copyBitmapAnd(BitmapAnd *from)
{
- BitmapAnd *newnode = makeNode(BitmapAnd);
+ BitmapAnd *newnode = makeNode(BitmapAnd);
/*
* copy node superclass fields
static BitmapOr *
_copyBitmapOr(BitmapOr *from)
{
- BitmapOr *newnode = makeNode(BitmapOr);
+ BitmapOr *newnode = makeNode(BitmapOr);
/*
* copy node superclass fields
static BitmapIndexScan *
_copyBitmapIndexScan(BitmapIndexScan *from)
{
- BitmapIndexScan *newnode = makeNode(BitmapIndexScan);
+ BitmapIndexScan *newnode = makeNode(BitmapIndexScan);
/*
* copy node superclass fields
static BitmapHeapScan *
_copyBitmapHeapScan(BitmapHeapScan *from)
{
- BitmapHeapScan *newnode = makeNode(BitmapHeapScan);
+ BitmapHeapScan *newnode = makeNode(BitmapHeapScan);
/*
* copy node superclass fields
COPY_SCALAR_FIELD(right_sortop);
/*
- * Do not copy pathkeys, since they'd not be canonical in a copied
- * query
+ * Do not copy pathkeys, since they'd not be canonical in a copied query
*/
newnode->left_pathkey = NIL;
newnode->right_pathkey = NIL;
static GrantRoleStmt *
_copyGrantRoleStmt(GrantRoleStmt *from)
{
- GrantRoleStmt *newnode = makeNode(GrantRoleStmt);
+ GrantRoleStmt *newnode = makeNode(GrantRoleStmt);
COPY_NODE_FIELD(granted_roles);
COPY_NODE_FIELD(grantee_roles);
break;
/*
- * Lists of integers and OIDs don't need to be deep-copied, so
- * we perform a shallow copy via list_copy()
+ * Lists of integers and OIDs don't need to be deep-copied, so we
+ * perform a shallow copy via list_copy()
*/
case T_IntList:
case T_OidList:
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.252 2005/08/01 20:31:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.253 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* We treat all NULL constants of the same type as equal. Someday this
- * might need to change? But datumIsEqual doesn't work on nulls,
- * so...
+ * might need to change? But datumIsEqual doesn't work on nulls, so...
*/
if (a->constisnull)
return true;
COMPARE_SCALAR_FIELD(funcretset);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->funcformat != b->funcformat &&
a->funcformat != COERCE_DONTCARE &&
/*
* Special-case opfuncid: it is allowable for it to differ if one node
- * contains zero and the other doesn't. This just means that the one
- * node isn't as far along in the parse/plan pipeline and hasn't had
- * the opfuncid cache filled yet.
+ * contains zero and the other doesn't. This just means that the one node
+ * isn't as far along in the parse/plan pipeline and hasn't had the
+ * opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
/*
* Special-case opfuncid: it is allowable for it to differ if one node
- * contains zero and the other doesn't. This just means that the one
- * node isn't as far along in the parse/plan pipeline and hasn't had
- * the opfuncid cache filled yet.
+ * contains zero and the other doesn't. This just means that the one node
+ * isn't as far along in the parse/plan pipeline and hasn't had the
+ * opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
/*
* Special-case opfuncid: it is allowable for it to differ if one node
- * contains zero and the other doesn't. This just means that the one
- * node isn't as far along in the parse/plan pipeline and hasn't had
- * the opfuncid cache filled yet.
+ * contains zero and the other doesn't. This just means that the one node
+ * isn't as far along in the parse/plan pipeline and hasn't had the
+ * opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
COMPARE_SCALAR_FIELD(resulttypmod);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->relabelformat != b->relabelformat &&
a->relabelformat != COERCE_DONTCARE &&
COMPARE_SCALAR_FIELD(resulttype);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->convertformat != b->convertformat &&
a->convertformat != COERCE_DONTCARE &&
COMPARE_SCALAR_FIELD(row_typeid);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->row_format != b->row_format &&
a->row_format != COERCE_DONTCARE &&
/*
* Special-case opfuncid: it is allowable for it to differ if one node
- * contains zero and the other doesn't. This just means that the one
- * node isn't as far along in the parse/plan pipeline and hasn't had
- * the opfuncid cache filled yet.
+ * contains zero and the other doesn't. This just means that the one node
+ * isn't as far along in the parse/plan pipeline and hasn't had the
+ * opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
COMPARE_SCALAR_FIELD(resulttypmod);
/*
- * Special-case COERCE_DONTCARE, so that planner can build coercion
- * nodes that are equal() to both explicit and implicit coercions.
+ * Special-case COERCE_DONTCARE, so that planner can build coercion nodes
+ * that are equal() to both explicit and implicit coercions.
*/
if (a->coercionformat != b->coercionformat &&
a->coercionformat != COERCE_DONTCARE &&
COMPARE_BITMAPSET_FIELD(required_relids);
/*
- * We ignore all the remaining fields, since they may not be set yet,
- * and should be derivable from the clause anyway.
+ * We ignore all the remaining fields, since they may not be set yet, and
+ * should be derivable from the clause anyway.
*/
return true;
ListCell *item_b;
/*
- * Try to reject by simple scalar checks before grovelling through all
- * the list elements...
+ * Try to reject by simple scalar checks before grovelling through all the
+ * list elements...
*/
COMPARE_SCALAR_FIELD(type);
COMPARE_SCALAR_FIELD(length);
/*
- * We place the switch outside the loop for the sake of efficiency;
- * this may not be worth doing...
+ * We place the switch outside the loop for the sake of efficiency; this
+ * may not be worth doing...
*/
switch (a->type)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.65 2005/07/28 20:26:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.66 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(list->head->next == list->tail);
Assert(list->tail->next == NULL);
}
-
#else
#define check_list_invariants(l)
#endif /* USE_ASSERT_CHECKING */
Assert(prev != NULL ? lnext(prev) == cell : list_head(list) == cell);
/*
- * If we're about to delete the last node from the list, free the
- * whole list instead and return NIL, which is the only valid
- * representation of a zero-length list.
+ * If we're about to delete the last node from the list, free the whole
+ * list instead and return NIL, which is the only valid representation of
+ * a zero-length list.
*/
if (list->length == 1)
{
}
/*
- * Otherwise, adjust the necessary list links, deallocate the
- * particular node we have just removed, and return the list we were
- * given.
+ * Otherwise, adjust the necessary list links, deallocate the particular
+ * node we have just removed, and return the list we were given.
*/
list->length--;
* via equal().
*
* This is almost the same functionality as list_union(), but list1 is
- * modified in-place rather than being copied. Note also that list2's cells
+ * modified in-place rather than being copied. Note also that list2's cells
* are not inserted in list1, so the analogy to list_concat() isn't perfect.
*/
List *
newlist->length = oldlist->length;
/*
- * Copy over the data in the first cell; new_list() has already
- * allocated the head cell itself
+ * Copy over the data in the first cell; new_list() has already allocated
+ * the head cell itself
*/
newlist->head->data = oldlist->head->data;
oldlist_cur = oldlist_cur->next;
/*
- * Copy over the data in the first remaining cell; new_list() has
- * already allocated the head cell itself
+ * Copy over the data in the first remaining cell; new_list() has already
+ * allocated the head cell itself
*/
newlist->head->data = oldlist_cur->data;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.47 2005/04/06 16:34:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.48 2005/10/15 02:49:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
var->varlevelsup = varlevelsup;
/*
- * Since few if any routines ever create Var nodes with
- * varnoold/varoattno different from varno/varattno, we don't provide
- * separate arguments for them, but just initialize them to the given
- * varno/varattno. This reduces code clutter and chance of error for
- * most callers.
+ * Since few if any routines ever create Var nodes with varnoold/varoattno
+ * different from varno/varattno, we don't provide separate arguments for
+ * them, but just initialize them to the given varno/varattno. This
+ * reduces code clutter and chance of error for most callers.
*/
var->varnoold = varno;
var->varoattno = varattno;
tle->resname = resname;
/*
- * We always set these fields to 0. If the caller wants to change them
- * he must do so explicitly. Few callers do that, so omitting these
+ * We always set these fields to 0. If the caller wants to change them he
+ * must do so explicitly. Few callers do that, so omitting these
* arguments reduces the chance of error.
*/
tle->ressortgroupref = 0;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.260 2005/08/27 22:13:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.261 2005/10/15 02:49:18 momjian Exp $
*
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
}
/*
- * Look for characters or patterns that are treated specially by
- * read.c (either in pg_strtok() or in nodeRead()), and therefore need
- * a protective backslash.
+ * Look for characters or patterns that are treated specially by read.c
+ * (either in pg_strtok() or in nodeRead()), and therefore need a
+ * protective backslash.
*/
/* These characters only need to be quoted at the start of the string */
if (*s == '<' ||
{
/*
* For the sake of backward compatibility, we emit a slightly
- * different whitespace format for lists of nodes vs. other types
- * of lists. XXX: is this necessary?
+ * different whitespace format for lists of nodes vs. other types of
+ * lists. XXX: is this necessary?
*/
if (IsA(node, List))
{
/*
* Hack to work around missing outfuncs routines for a lot of the
* utility-statement node types. (The only one we actually *need* for
- * rules support is NotifyStmt.) Someday we ought to support 'em all,
- * but for the meantime do this to avoid getting lots of warnings when
- * running with debug_print_parse on.
+ * rules support is NotifyStmt.) Someday we ought to support 'em all, but
+ * for the meantime do this to avoid getting lots of warnings when running
+ * with debug_print_parse on.
*/
if (node->utilityStmt)
{
case T_Float:
/*
- * We assume the value is a valid numeric literal and so does
- * not need quoting.
+ * We assume the value is a valid numeric literal and so does not
+ * need quoting.
*/
appendStringInfoString(str, value->val.str);
break;
default:
/*
- * This should be an ERROR, but it's too useful to be able
- * to dump structures that _outNode only understands part
- * of.
+ * This should be an ERROR, but it's too useful to be able to
+ * dump structures that _outNode only understands part of.
*/
elog(WARNING, "could not dump unrecognized node type: %d",
(int) nodeTag(obj));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.76 2005/05/01 18:56:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.77 2005/10/15 02:49:19 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
if (IsA(p, BitmapAnd))
{
ListCell *l;
- BitmapAnd *bitmapandplan = (BitmapAnd *) p;
+ BitmapAnd *bitmapandplan = (BitmapAnd *) p;
foreach(l, bitmapandplan->bitmapplans)
{
if (IsA(p, BitmapOr))
{
ListCell *l;
- BitmapOr *bitmaporplan = (BitmapOr *) p;
+ BitmapOr *bitmaporplan = (BitmapOr *) p;
foreach(l, bitmaporplan->bitmapplans)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.46 2004/12/31 21:59:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.47 2005/10/15 02:49:19 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
void *retval;
/*
- * We save and restore the pre-existing state of pg_strtok. This makes
- * the world safe for re-entrant invocation of stringToNode, without
- * incurring a lot of notational overhead by having to pass the
- * next-character pointer around through all the readfuncs.c code.
+ * We save and restore the pre-existing state of pg_strtok. This makes the
+ * world safe for re-entrant invocation of stringToNode, without incurring
+ * a lot of notational overhead by having to pass the next-character
+ * pointer around through all the readfuncs.c code.
*/
save_strtok = pg_strtok_ptr;
if (*numptr == '+' || *numptr == '-')
numptr++, numlen--;
if ((numlen > 0 && isdigit((unsigned char) *numptr)) ||
- (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1])))
+ (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1])))
{
/*
* Yes. Figure out whether it is integral or float; this requires
- * both a syntax check and a range check. strtol() can do both for
- * us. We know the token will end at a character that strtol will
- * stop at, so we do not need to modify the string.
+ * both a syntax check and a range check. strtol() can do both for us.
+ * We know the token will end at a character that strtol will stop at,
+ * so we do not need to modify the string.
*/
long val;
char *endptr;
case T_Integer:
/*
- * we know that the token terminates on a char atol will stop
- * at
+ * we know that the token terminates on a char atol will stop at
*/
result = (Node *) makeInteger(atol(token));
break;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/readfuncs.c,v 1.181 2005/08/01 20:31:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/readfuncs.c,v 1.182 2005/10/15 02:49:19 momjian Exp $
*
* NOTES
* Path and Plan nodes do not have any readfuncs support, because we
READ_OID_FIELD(opfuncid);
/*
- * The opfuncid is stored in the textual format primarily for
- * debugging and documentation reasons. We want to always read it as
- * zero to force it to be re-looked-up in the pg_operator entry. This
- * ensures that stored rules don't have hidden dependencies on
- * operators' functions. (We don't currently support an ALTER OPERATOR
- * command, but might someday.)
+ * The opfuncid is stored in the textual format primarily for debugging
+ * and documentation reasons. We want to always read it as zero to force
+ * it to be re-looked-up in the pg_operator entry. This ensures that
+ * stored rules don't have hidden dependencies on operators' functions.
+ * (We don't currently support an ALTER OPERATOR command, but might
+ * someday.)
*/
local_node->opfuncid = InvalidOid;
READ_OID_FIELD(opfuncid);
/*
- * The opfuncid is stored in the textual format primarily for
- * debugging and documentation reasons. We want to always read it as
- * zero to force it to be re-looked-up in the pg_operator entry. This
- * ensures that stored rules don't have hidden dependencies on
- * operators' functions. (We don't currently support an ALTER OPERATOR
- * command, but might someday.)
+ * The opfuncid is stored in the textual format primarily for debugging
+ * and documentation reasons. We want to always read it as zero to force
+ * it to be re-looked-up in the pg_operator entry. This ensures that
+ * stored rules don't have hidden dependencies on operators' functions.
+ * (We don't currently support an ALTER OPERATOR command, but might
+ * someday.)
*/
local_node->opfuncid = InvalidOid;
READ_OID_FIELD(opfuncid);
/*
- * The opfuncid is stored in the textual format primarily for
- * debugging and documentation reasons. We want to always read it as
- * zero to force it to be re-looked-up in the pg_operator entry. This
- * ensures that stored rules don't have hidden dependencies on
- * operators' functions. (We don't currently support an ALTER OPERATOR
- * command, but might someday.)
+ * The opfuncid is stored in the textual format primarily for debugging
+ * and documentation reasons. We want to always read it as zero to force
+ * it to be re-looked-up in the pg_operator entry. This ensures that
+ * stored rules don't have hidden dependencies on operators' functions.
+ * (We don't currently support an ALTER OPERATOR command, but might
+ * someday.)
*/
local_node->opfuncid = InvalidOid;
READ_OID_FIELD(opfuncid);
/*
- * The opfuncid is stored in the textual format primarily for
- * debugging and documentation reasons. We want to always read it as
- * zero to force it to be re-looked-up in the pg_operator entry. This
- * ensures that stored rules don't have hidden dependencies on
- * operators' functions. (We don't currently support an ALTER OPERATOR
- * command, but might someday.)
+ * The opfuncid is stored in the textual format primarily for debugging
+ * and documentation reasons. We want to always read it as zero to force
+ * it to be re-looked-up in the pg_operator entry. This ensures that
+ * stored rules don't have hidden dependencies on operators' functions.
+ * (We don't currently support an ALTER OPERATOR command, but might
+ * someday.)
*/
local_node->opfuncid = InvalidOid;
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.7 2005/09/02 19:02:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.8 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* The maximum number of tuples per page is not large (typically 256 with
* 8K pages, or 1024 with 32K pages). So there's not much point in making
- * the per-page bitmaps variable size. We just legislate that the size
+ * the per-page bitmaps variable size. We just legislate that the size
* is this:
*/
#define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage
* for that page in the page table.
*
* We actually store both exact pages and lossy chunks in the same hash
- * table, using identical data structures. (This is because dynahash.c's
+ * table, using identical data structures. (This is because dynahash.c's
* memory management doesn't allow space to be transferred easily from one
* hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the
- * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
+ * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
* also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer
* remainder operations. So, define it like this:
*/
#define BITNUM(x) ((x) % BITS_PER_BITMAPWORD)
/* number of active words for an exact page: */
-#define WORDS_PER_PAGE ((MAX_TUPLES_PER_PAGE - 1) / BITS_PER_BITMAPWORD + 1)
+#define WORDS_PER_PAGE ((MAX_TUPLES_PER_PAGE - 1) / BITS_PER_BITMAPWORD + 1)
/* number of active words for a lossy chunk: */
#define WORDS_PER_CHUNK ((PAGES_PER_CHUNK - 1) / BITS_PER_BITMAPWORD + 1)
*/
typedef struct PagetableEntry
{
- BlockNumber blockno; /* page number (hashtable key) */
+ BlockNumber blockno; /* page number (hashtable key) */
bool ischunk; /* T = lossy storage, F = exact */
bitmapword words[Max(WORDS_PER_PAGE, WORDS_PER_CHUNK)];
} PagetableEntry;
/* Local function prototypes */
static void tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage);
static bool tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage,
- const TIDBitmap *b);
+ const TIDBitmap *b);
static const PagetableEntry *tbm_find_pageentry(const TIDBitmap *tbm,
- BlockNumber pageno);
+ BlockNumber pageno);
static PagetableEntry *tbm_get_pageentry(TIDBitmap *tbm, BlockNumber pageno);
static bool tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno);
static void tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno);
long nbuckets;
/*
- * Create the TIDBitmap struct, with enough trailing space to serve
- * the needs of the TBMIterateResult sub-struct.
+ * Create the TIDBitmap struct, with enough trailing space to serve the
+ * needs of the TBMIterateResult sub-struct.
*/
tbm = (TIDBitmap *) palloc(sizeof(TIDBitmap) +
MAX_TUPLES_PER_PAGE * sizeof(OffsetNumber));
tbm->status = TBM_EMPTY;
/*
- * Estimate number of hashtable entries we can have within maxbytes.
- * This estimates the hash overhead at MAXALIGN(sizeof(HASHELEMENT))
- * plus a pointer per hash entry, which is crude but good enough for
- * our purpose. Also count an extra Pointer per entry for the arrays
- * created during iteration readout.
+ * Estimate number of hashtable entries we can have within maxbytes. This
+ * estimates the hash overhead at MAXALIGN(sizeof(HASHELEMENT)) plus a
+ * pointer per hash entry, which is crude but good enough for our purpose.
+ * Also count an extra Pointer per entry for the arrays created during
+ * iteration readout.
*/
nbuckets = maxbytes /
(MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(PagetableEntry))
+ sizeof(Pointer) + sizeof(Pointer));
- nbuckets = Min(nbuckets, INT_MAX-1); /* safety limit */
- nbuckets = Max(nbuckets, 16); /* sanity limit */
+ nbuckets = Min(nbuckets, INT_MAX - 1); /* safety limit */
+ nbuckets = Max(nbuckets, 16); /* sanity limit */
tbm->maxentries = (int) nbuckets;
return tbm;
tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage)
{
PagetableEntry *apage;
- int wordnum;
+ int wordnum;
if (bpage->ischunk)
{
if (w != 0)
{
- BlockNumber pg;
+ BlockNumber pg;
pg = bpage->blockno + (wordnum * BITS_PER_BITMAPWORD);
while (w != 0)
tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b)
{
const PagetableEntry *bpage;
- int wordnum;
+ int wordnum;
if (apage->ischunk)
{
/* Scan each bit in chunk, try to clear */
- bool candelete = true;
+ bool candelete = true;
for (wordnum = 0; wordnum < WORDS_PER_PAGE; wordnum++)
{
if (w != 0)
{
bitmapword neww = w;
- BlockNumber pg;
- int bitnum;
+ BlockNumber pg;
+ int bitnum;
pg = apage->blockno + (wordnum * BITS_PER_BITMAPWORD);
bitnum = 0;
else if (tbm_page_is_lossy(b, apage->blockno))
{
/*
- * When the page is lossy in b, we have to mark it lossy in a too.
- * We know that no bits need be set in bitmap a, but we do not know
- * which ones should be cleared, and we have no API for "at most
- * these tuples need be checked". (Perhaps it's worth adding that?)
+ * When the page is lossy in b, we have to mark it lossy in a too. We
+ * know that no bits need be set in bitmap a, but we do not know which
+ * ones should be cleared, and we have no API for "at most these
+ * tuples need be checked". (Perhaps it's worth adding that?)
*/
tbm_mark_page_lossy(a, apage->blockno);
/*
- * Note: tbm_mark_page_lossy will have removed apage from a, and
- * may have inserted a new lossy chunk instead. We can continue the
- * same seq_search scan at the caller level, because it does not
- * matter whether we visit such a new chunk or not: it will have
- * only the bit for apage->blockno set, which is correct.
+ * Note: tbm_mark_page_lossy will have removed apage from a, and may
+ * have inserted a new lossy chunk instead. We can continue the same
+ * seq_search scan at the caller level, because it does not matter
+ * whether we visit such a new chunk or not: it will have only the bit
+ * for apage->blockno set, which is correct.
*
* We must return false here since apage was already deleted.
*/
}
else
{
- bool candelete = true;
+ bool candelete = true;
bpage = tbm_find_pageentry(b, apage->blockno);
if (bpage != NULL)
int nchunks;
tbm->iterating = true;
+
/*
* Reset iteration pointers.
*/
tbm->spageptr = 0;
tbm->schunkptr = 0;
tbm->schunkbit = 0;
+
/*
* Nothing else to do if no entries, nor if we don't have a hashtable.
*/
if (tbm->nentries == 0 || tbm->status != TBM_HASH)
return;
+
/*
* Create and fill the sorted page lists if we didn't already.
*/
TBMIterateResult *output = &(tbm->output);
Assert(tbm->iterating);
+
/*
* If lossy chunk pages remain, make sure we've advanced schunkptr/
* schunkbit to the next set bit.
while (tbm->schunkptr < tbm->nchunks)
{
PagetableEntry *chunk = tbm->schunks[tbm->schunkptr];
- int schunkbit = tbm->schunkbit;
+ int schunkbit = tbm->schunkbit;
while (schunkbit < PAGES_PER_CHUNK)
{
- int wordnum = WORDNUM(schunkbit);
- int bitnum = BITNUM(schunkbit);
+ int wordnum = WORDNUM(schunkbit);
+ int bitnum = BITNUM(schunkbit);
if ((chunk->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0)
break;
tbm->schunkptr++;
tbm->schunkbit = 0;
}
+
/*
* If both chunk and per-page data remain, must output the numerically
* earlier page.
*
* If new, the entry is marked as an exact (non-chunk) entry.
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static PagetableEntry *
HASH_FIND, NULL);
if (page != NULL && page->ischunk)
{
- int wordnum = WORDNUM(bitno);
- int bitnum = BITNUM(bitno);
+ int wordnum = WORDNUM(bitno);
+ int bitnum = BITNUM(bitno);
if ((page->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0)
return true;
/*
* tbm_mark_page_lossy - mark the page number as lossily stored
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static void
chunk_pageno = pageno - bitno;
/*
- * Remove any extant non-lossy entry for the page. If the page is
- * its own chunk header, however, we skip this and handle the case
- * below.
+ * Remove any extant non-lossy entry for the page. If the page is its own
+ * chunk header, however, we skip this and handle the case below.
*/
if (bitno != 0)
{
/*
* XXX Really stupid implementation: this just lossifies pages in
- * essentially random order. We should be paying some attention
- * to the number of bits set in each page, instead. Also it might
- * be a good idea to lossify more than the minimum number of pages
- * during each call.
+ * essentially random order. We should be paying some attention to the
+ * number of bits set in each page, instead. Also it might be a good idea
+ * to lossify more than the minimum number of pages during each call.
*/
Assert(!tbm->iterating);
Assert(tbm->status == TBM_HASH);
{
if (page->ischunk)
continue; /* already a chunk header */
+
/*
- * If the page would become a chunk header, we won't save anything
- * by converting it to lossy, so skip it.
+ * If the page would become a chunk header, we won't save anything by
+ * converting it to lossy, so skip it.
*/
if ((page->blockno % PAGES_PER_CHUNK) == 0)
continue;
return; /* we have done enough */
/*
- * Note: tbm_mark_page_lossy may have inserted a lossy chunk into
- * the hashtable. We can continue the same seq_search scan since
- * we do not care whether we visit lossy chunks or not.
+ * Note: tbm_mark_page_lossy may have inserted a lossy chunk into the
+ * hashtable. We can continue the same seq_search scan since we do
+ * not care whether we visit lossy chunks or not.
*/
}
}
* geqo_erx.c
* edge recombination crossover [ER]
*
-* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.19 2003/11/29 22:39:49 pgsql Exp $
+* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_erx.c,v 1.20 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Edge *edge_table;
/*
- * palloc one extra location so that nodes numbered 1..n can be
- * indexed directly; 0 will not be used
+ * palloc one extra location so that nodes numbered 1..n can be indexed
+ * directly; 0 will not be used
*/
edge_table = (Edge *) palloc((num_gene + 1) * sizeof(Edge));
int i,
index1,
index2;
- int edge_total; /* total number of unique edges in two
- * genes */
+ int edge_total; /* total number of unique edges in two genes */
/* at first clear the edge table's old data */
for (i = 1; i <= num_gene; i++)
for (index1 = 0; index1 < num_gene; index1++)
{
/*
- * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this
- * operaton maps n back to 1
+ * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton
+ * maps n back to 1
*/
index2 = (index1 + 1) % num_gene;
/*
- * edges are bidirectional, i.e. 1->2 is same as 2->1 call
- * gimme_edge twice per edge
+ * edges are bidirectional, i.e. 1->2 is same as 2->1 call gimme_edge
+ * twice per edge
*/
edge_total += gimme_edge(tour1[index1], tour1[index2], edge_table);
*/
/*
- * The test for minimum_count can probably be removed at some
- * point but comments should probably indicate exactly why it is
- * guaranteed that the test will always succeed the first time
- * around. If it can fail then the code is in error
+ * The test for minimum_count can probably be removed at some point
+ * but comments should probably indicate exactly why it is guaranteed
+ * that the test will always succeed the first time around. If it can
+ * fail then the code is in error
*/
/*
- * how many edges remain? how many gene with four total (initial)
- * edges remain?
+ * how many edges remain? how many gene with four total (initial) edges
+ * remain?
*/
for (i = 1; i <= num_gene; i++)
}
/*
- * random decision of the gene with remaining edges and whose
- * total_edges == 4
+ * random decision of the gene with remaining edges and whose total_edges
+ * == 4
*/
if (four_count != 0)
}
/*
- * edge table seems to be empty; this happens sometimes on the last
- * point due to the fact that the first point is removed from the
- * table even though only one of its edges has been determined
+ * edge table seems to be empty; this happens sometimes on the last point
+ * due to the fact that the first point is removed from the table even
+ * though only one of its edges has been determined
*/
else
- { /* occurs only at the last point in the
- * tour; simply look for the point which
- * is not yet used */
+ { /* occurs only at the last point in the tour;
+ * simply look for the point which is not yet
+ * used */
for (i = 1; i <= num_gene; i++)
if (edge_table[i].unused_edges >= 0)
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.76 2005/06/09 04:18:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.77 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
struct HTAB *savehash;
/*
- * Because gimme_tree considers both left- and right-sided trees,
- * there is no difference between a tour (a,b,c,d,...) and a tour
- * (b,a,c,d,...) --- the same join orders will be considered. To avoid
- * redundant cost calculations, we simply reject tours where tour[0] >
- * tour[1], assigning them an artificially bad fitness.
+ * Because gimme_tree considers both left- and right-sided trees, there is
+ * no difference between a tour (a,b,c,d,...) and a tour (b,a,c,d,...) ---
+ * the same join orders will be considered. To avoid redundant cost
+ * calculations, we simply reject tours where tour[0] > tour[1], assigning
+ * them an artificially bad fitness.
*
* init_tour() is aware of this rule and so we should never reject a tour
- * during the initial filling of the pool. It seems difficult to
- * persuade the recombination logic never to break the rule, however.
+ * during the initial filling of the pool. It seems difficult to persuade
+ * the recombination logic never to break the rule, however.
*/
if (num_gene >= 2 && tour[0] > tour[1])
return DBL_MAX;
* Create a private memory context that will hold all temp storage
* allocated inside gimme_tree().
*
- * Since geqo_eval() will be called many times, we can't afford to let
- * all that memory go unreclaimed until end of statement. Note we
- * make the temp context a child of the planner's normal context, so
- * that it will be freed even if we abort via ereport(ERROR).
+ * Since geqo_eval() will be called many times, we can't afford to let all
+ * that memory go unreclaimed until end of statement. Note we make the
+ * temp context a child of the planner's normal context, so that it will
+ * be freed even if we abort via ereport(ERROR).
*/
mycontext = AllocSetContextCreate(CurrentMemoryContext,
"GEQO",
/*
* gimme_tree will add entries to root->join_rel_list, which may or may
* not already contain some entries. The newly added entries will be
- * recycled by the MemoryContextDelete below, so we must ensure that
- * the list is restored to its former state before exiting. We can
- * do this by truncating the list to its original length. NOTE this
- * assumes that any added entries are appended at the end!
+ * recycled by the MemoryContextDelete below, so we must ensure that the
+ * list is restored to its former state before exiting. We can do this by
+ * truncating the list to its original length. NOTE this assumes that any
+ * added entries are appended at the end!
*
- * We also must take care not to mess up the outer join_rel_hash,
- * if there is one. We can do this by just temporarily setting the
- * link to NULL. (If we are dealing with enough join rels, which we
- * very likely are, a new hash table will get built and used locally.)
+ * We also must take care not to mess up the outer join_rel_hash, if there is
+ * one. We can do this by just temporarily setting the link to NULL. (If
+ * we are dealing with enough join rels, which we very likely are, a new
+ * hash table will get built and used locally.)
*/
savelength = list_length(evaldata->root->join_rel_list);
savehash = evaldata->root->join_rel_hash;
* Push each relation onto the stack in the specified order. After
* pushing each relation, see whether the top two stack entries are
* joinable according to the desirable_join() heuristics. If so, join
- * them into one stack entry, and try again to combine with the next
- * stack entry down (if any). When the stack top is no longer
- * joinable, continue to the next input relation. After we have
- * pushed the last input relation, the heuristics are disabled and we
- * force joining all the remaining stack entries.
+ * them into one stack entry, and try again to combine with the next stack
+ * entry down (if any). When the stack top is no longer joinable,
+ * continue to the next input relation. After we have pushed the last
+ * input relation, the heuristics are disabled and we force joining all
+ * the remaining stack entries.
*
* If desirable_join() always returns true, this produces a straight
- * left-to-right join just like the old code. Otherwise we may
- * produce a bushy plan or a left/right-sided plan that really
- * corresponds to some tour other than the one given. To the extent
- * that the heuristics are helpful, however, this will be a better
- * plan than the raw tour.
+ * left-to-right join just like the old code. Otherwise we may produce a
+ * bushy plan or a left/right-sided plan that really corresponds to some
+ * tour other than the one given. To the extent that the heuristics are
+ * helpful, however, this will be a better plan than the raw tour.
*
- * Also, when a join attempt fails (because of IN-clause constraints), we
- * may be able to recover and produce a workable plan, where the old
- * code just had to give up. This case acts the same as a false
- * result from desirable_join().
+ * Also, when a join attempt fails (because of IN-clause constraints), we may
+ * be able to recover and produce a workable plan, where the old code just
+ * had to give up. This case acts the same as a false result from
+ * desirable_join().
*/
for (rel_count = 0; rel_count < num_gene; rel_count++)
{
stack_depth++;
/*
- * While it's feasible, pop the top two stack entries and replace
- * with their join.
+ * While it's feasible, pop the top two stack entries and replace with
+ * their join.
*/
while (stack_depth >= 2)
{
RelOptInfo *inner_rel = stack[stack_depth - 1];
/*
- * Don't pop if heuristics say not to join now. However, once
- * we have exhausted the input, the heuristics can't prevent
- * popping.
+ * Don't pop if heuristics say not to join now. However, once we
+ * have exhausted the input, the heuristics can't prevent popping.
*/
if (rel_count < num_gene - 1 &&
!desirable_join(evaldata->root, outer_rel, inner_rel))
break;
/*
- * Construct a RelOptInfo representing the join of these two
- * input relations. These are always inner joins. Note that
- * we expect the joinrel not to exist in root->join_rel_list
- * yet, and so the paths constructed for it will only include
- * the ones we want.
+ * Construct a RelOptInfo representing the join of these two input
+ * relations. These are always inner joins. Note that we expect
+ * the joinrel not to exist in root->join_rel_list yet, and so the
+ * paths constructed for it will only include the ones we want.
*/
joinrel = make_join_rel(evaldata->root, outer_rel, inner_rel,
JOIN_INNER);
return true;
/*
- * Join if the rels are members of the same IN sub-select. This is
- * needed to improve the odds that we will find a valid solution in a
- * case where an IN sub-select has a clauseless join.
+ * Join if the rels are members of the same IN sub-select. This is needed
+ * to improve the odds that we will find a valid solution in a case where
+ * an IN sub-select has a clauseless join.
*/
foreach(l, root->in_info_list)
{
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.50 2005/06/08 23:02:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.51 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
random_init_pool(pool, &evaldata);
/* sort the pool according to cheapest path as fitness */
- sort_pool(pool); /* we have to do it only one time, since
- * all kids replace the worst individuals
- * in future (-> geqo_pool.c:spread_chromo
- * ) */
+ sort_pool(pool); /* we have to do it only one time, since all
+ * kids replace the worst individuals in
+ * future (-> geqo_pool.c:spread_chromo ) */
#ifdef GEQO_DEBUG
elog(DEBUG1, "GEQO selected %d pool entries, best %.2f, worst %.2f",
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.42 2004/12/31 21:59:58 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.43 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
elog(ERROR, "pool_size is zero");
/*
- * Since the pool may contain multiple occurrences of DBL_MAX, divide
- * by pool->size before summing, not after, to avoid overflow. This
- * loses a little in speed and accuracy, but this routine is only used
- * for debug printouts, so we don't care that much.
+ * Since the pool may contain multiple occurrences of DBL_MAX, divide by
+ * pool->size before summing, not after, to avoid overflow. This loses a
+ * little in speed and accuracy, but this routine is only used for debug
+ * printouts, so we don't care that much.
*/
for (i = 0; i < pool->size; i++)
cumulative += pool->data[i].worth / pool->size;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.26 2004/12/31 21:59:58 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.27 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int bad = 0;
/*
- * We immediately discard any invalid individuals (those that
- * geqo_eval returns DBL_MAX for), thereby not wasting pool space on
- * them.
+ * We immediately discard any invalid individuals (those that geqo_eval
+ * returns DBL_MAX for), thereby not wasting pool space on them.
*
- * If we fail to make any valid individuals after 10000 tries, give up;
- * this probably means something is broken, and we shouldn't just let
- * ourselves get stuck in an infinite loop.
+ * If we fail to make any valid individuals after 10000 tries, give up; this
+ * probably means something is broken, and we shouldn't just let ourselves
+ * get stuck in an infinite loop.
*/
i = 0;
while (i < pool->size)
/*
- * these 2 cases move the search indices since a new location has
- * not yet been found.
+ * these 2 cases move the search indices since a new location has not
+ * yet been found.
*/
else if (chromo->worth < pool->data[mid].worth)
/* now we have index for chromo */
/*
- * move every gene from index on down one position to make room for
- * chromo
+ * move every gene from index on down one position to make room for chromo
*/
/*
* geqo_recombination.c
* misc recombination procedures
*
-* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.14 2004/08/29 05:06:43 momjian Exp $
+* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.15 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Since geqo_eval() will reject tours where tour[0] > tour[1], we may
- * as well switch the two to make it a valid tour.
+ * Since geqo_eval() will reject tours where tour[0] > tour[1], we may as
+ * well switch the two to make it a valid tour.
*/
if (num_gene >= 2 && tour[0] > tour[1])
{
City *city_table;
/*
- * palloc one extra location so that nodes numbered 1..n can be
- * indexed directly; 0 will not be used
+ * palloc one extra location so that nodes numbered 1..n can be indexed
+ * directly; 0 will not be used
*/
city_table = (City *) palloc((num_gene + 1) * sizeof(City));
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.19 2005/06/14 14:21:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_selection.c,v 1.20 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If geqo_rand() returns exactly 1.0 then we will get exactly max from
- * this equation, whereas we need 0 <= index < max. Also it seems possible
- * that roundoff error might deliver values slightly outside the range;
- * in particular avoid passing a value slightly less than 0 to sqrt().
- * If we get a bad value just try again.
+ * this equation, whereas we need 0 <= index < max. Also it seems
+ * possible that roundoff error might deliver values slightly outside the
+ * range; in particular avoid passing a value slightly less than 0 to
+ * sqrt(). If we get a bad value just try again.
*/
- do {
- double sqrtval;
+ do
+ {
+ double sqrtval;
sqrtval = (bias * bias) - 4.0 * (bias - 1.0) * geqo_rand();
if (sqrtval > 0.0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.136 2005/08/22 17:34:58 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.137 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
bool *differentTypes);
static void subquery_push_qual(Query *subquery,
- RangeTblEntry *rte, Index rti, Node *qual);
+ RangeTblEntry *rte, Index rti, Node *qual);
static void recurse_push_qual(Node *setOp, Query *topquery,
RangeTblEntry *rte, Index rti, Node *qual);
if (brel == NULL)
continue;
- Assert(brel->relid == rti); /* sanity check on array */
+ Assert(brel->relid == rti); /* sanity check on array */
/* ignore RTEs that are "other rels" */
if (brel->reloptkind != RELOPT_BASEREL)
Index rti;
/*
- * Note: because we call expand_inherited_rtentry inside the loop,
- * it's quite possible for the base_rel_array to be enlarged while
- * the loop runs. Hence don't try to optimize the loop.
+ * Note: because we call expand_inherited_rtentry inside the loop, it's
+ * quite possible for the base_rel_array to be enlarged while the loop
+ * runs. Hence don't try to optimize the loop.
*/
for (rti = 1; rti < root->base_rel_array_size; rti++)
{
ListCell *il;
/*
- * XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE;
- * can we do better?
+ * XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE; can
+ * we do better?
*/
if (list_member_int(root->parse->rowMarks, parentRTindex))
ereport(ERROR,
rel->width = 0;
/*
- * Generate access paths for each table in the tree (parent AND
- * children), and pick the cheapest path for each table.
+ * Generate access paths for each table in the tree (parent AND children),
+ * and pick the cheapest path for each table.
*/
foreach(il, inheritlist)
{
childOID = childrte->relid;
/*
- * Make a RelOptInfo for the child so we can do planning.
- * Mark it as an "other rel" since it will not be part of the
- * main join tree.
+ * Make a RelOptInfo for the child so we can do planning. Mark it as
+ * an "other rel" since it will not be part of the main join tree.
*/
childrel = build_other_rel(root, childRTindex);
/*
- * Copy the parent's targetlist and restriction quals to the
- * child, with attribute-number adjustment as needed. We don't
- * bother to copy the join quals, since we can't do any joining of
- * the individual tables. Also, we just zap attr_needed rather
- * than trying to adjust it; it won't be looked at in the child.
+ * Copy the parent's targetlist and restriction quals to the child,
+ * with attribute-number adjustment as needed. We don't bother to
+ * copy the join quals, since we can't do any joining of the
+ * individual tables. Also, we just zap attr_needed rather than
+ * trying to adjust it; it won't be looked at in the child.
*/
childrel->reltargetlist = (List *)
adjust_inherited_attrs((Node *) rel->reltargetlist,
*/
if (constraint_exclusion)
{
- List *constraint_pred;
+ List *constraint_pred;
constraint_pred = get_relation_constraints(childOID, childrel);
+
/*
- * We do not currently enforce that CHECK constraints contain
- * only immutable functions, so it's necessary to check here.
- * We daren't draw conclusions from plan-time evaluation of
+ * We do not currently enforce that CHECK constraints contain only
+ * immutable functions, so it's necessary to check here. We
+ * daren't draw conclusions from plan-time evaluation of
* non-immutable functions.
*/
if (!contain_mutable_functions((Node *) constraint_pred))
subpaths = lappend(subpaths, childrel->cheapest_total_path);
/*
- * Propagate size information from the child back to the parent.
- * For simplicity, we use the largest widths from any child as the
- * parent estimates.
+ * Propagate size information from the child back to the parent. For
+ * simplicity, we use the largest widths from any child as the parent
+ * estimates.
*/
rel->rows += childrel->rows;
if (childrel->width > rel->width)
}
/*
- * Finally, build Append path and install it as the only access path
- * for the parent rel. (Note: this is correct even if we have zero
- * or one live subpath due to constraint exclusion.)
+ * Finally, build Append path and install it as the only access path for
+ * the parent rel. (Note: this is correct even if we have zero or one
+ * live subpath due to constraint exclusion.)
*/
add_path(rel, (Path *) create_append_path(rel, subpaths));
/*
* If there are any restriction clauses that have been attached to the
- * subquery relation, consider pushing them down to become WHERE or
- * HAVING quals of the subquery itself. This transformation is useful
- * because it may allow us to generate a better plan for the subquery
- * than evaluating all the subquery output rows and then filtering them.
+ * subquery relation, consider pushing them down to become WHERE or HAVING
+ * quals of the subquery itself. This transformation is useful because it
+ * may allow us to generate a better plan for the subquery than evaluating
+ * all the subquery output rows and then filtering them.
*
- * There are several cases where we cannot push down clauses.
- * Restrictions involving the subquery are checked by
- * subquery_is_pushdown_safe(). Restrictions on individual clauses
- * are checked by qual_is_pushdown_safe().
+ * There are several cases where we cannot push down clauses. Restrictions
+ * involving the subquery are checked by subquery_is_pushdown_safe().
+ * Restrictions on individual clauses are checked by
+ * qual_is_pushdown_safe().
*
- * Non-pushed-down clauses will get evaluated as qpquals of the
- * SubqueryScan node.
+ * Non-pushed-down clauses will get evaluated as qpquals of the SubqueryScan
+ * node.
*
* XXX Are there any cases where we want to make a policy decision not to
* push down a pushable qual, because it'd result in a worse plan?
pfree(differentTypes);
/*
- * We can safely pass the outer tuple_fraction down to the subquery
- * if the outer level has no joining, aggregation, or sorting to do.
- * Otherwise we'd better tell the subquery to plan for full retrieval.
- * (XXX This could probably be made more intelligent ...)
+ * We can safely pass the outer tuple_fraction down to the subquery if the
+ * outer level has no joining, aggregation, or sorting to do. Otherwise
+ * we'd better tell the subquery to plan for full retrieval. (XXX This
+ * could probably be made more intelligent ...)
*/
if (parse->hasAggs ||
parse->groupClause ||
/*
* Count the number of child jointree nodes. This is the depth of the
- * dynamic-programming algorithm we must employ to consider all ways
- * of joining the child nodes.
+ * dynamic-programming algorithm we must employ to consider all ways of
+ * joining the child nodes.
*/
levels_needed = list_length(from->fromlist);
RelOptInfo *rel;
/*
- * We employ a simple "dynamic programming" algorithm: we first find
- * all ways to build joins of two jointree items, then all ways to
- * build joins of three items (from two-item joins and single items),
- * then four-item joins, and so on until we have considered all ways
- * to join all the items into one rel.
+ * We employ a simple "dynamic programming" algorithm: we first find all
+ * ways to build joins of two jointree items, then all ways to build joins
+ * of three items (from two-item joins and single items), then four-item
+ * joins, and so on until we have considered all ways to join all the
+ * items into one rel.
*
* joinitems[j] is a list of all the j-item rels. Initially we set
* joinitems[1] to represent all the single-jointree-item relations.
return false;
/*
- * Examine all Vars used in clause; since it's a restriction clause,
- * all such Vars must refer to subselect output columns.
+ * Examine all Vars used in clause; since it's a restriction clause, all
+ * such Vars must refer to subselect output columns.
*/
vars = pull_var_clause(qual, false);
foreach(vl, vars)
Assert(var->varno == rti);
/*
- * We use a bitmapset to avoid testing the same attno more than
- * once. (NB: this only works because subquery outputs can't have
- * negative attnos.)
+ * We use a bitmapset to avoid testing the same attno more than once.
+ * (NB: this only works because subquery outputs can't have negative
+ * attnos.)
*/
if (bms_is_member(var->varattno, tested))
continue;
else
{
/*
- * We need to replace Vars in the qual (which must refer to
- * outputs of the subquery) with copies of the subquery's
- * targetlist expressions. Note that at this point, any uplevel
- * Vars in the qual should have been replaced with Params, so they
- * need no work.
+ * We need to replace Vars in the qual (which must refer to outputs of
+ * the subquery) with copies of the subquery's targetlist expressions.
+ * Note that at this point, any uplevel Vars in the qual should have
+ * been replaced with Params, so they need no work.
*
* This step also ensures that when we are pushing into a setop tree,
* each component query gets its own copy of the qual.
CMD_SELECT, 0);
/*
- * Now attach the qual to the proper place: normally WHERE, but
- * if the subquery uses grouping or aggregation, put it in HAVING
- * (since the qual really refers to the group-result rows).
+ * Now attach the qual to the proper place: normally WHERE, but if the
+ * subquery uses grouping or aggregation, put it in HAVING (since the
+ * qual really refers to the group-result rows).
*/
if (subquery->hasAggs || subquery->groupClause || subquery->havingQual)
subquery->havingQual = make_and_qual(subquery->havingQual, qual);
/*
* We need not change the subquery's hasAggs or hasSublinks flags,
- * since we can't be pushing down any aggregates that weren't
- * there before, and we don't push down subselects at all.
+ * since we can't be pushing down any aggregates that weren't there
+ * before, and we don't push down subselects at all.
*/
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.74 2005/10/11 16:44:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.75 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* hisel + losel + null_frac - 1.)
*
* If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation
- * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
+ * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
* yields an impossible (negative) result.
*
* A free side-effect is that we can recognize redundant inequalities such
ListCell *l;
/*
- * Initial scan over clauses. Anything that doesn't look like a
- * potential rangequery clause gets multiplied into s1 and forgotten.
- * Anything that does gets inserted into an rqlist entry.
+ * Initial scan over clauses. Anything that doesn't look like a potential
+ * rangequery clause gets multiplied into s1 and forgotten. Anything that
+ * does gets inserted into an rqlist entry.
*/
foreach(l, clauses)
{
rinfo = NULL;
/*
- * See if it looks like a restriction clause with a pseudoconstant
- * on one side. (Anything more complicated than that might not
- * behave in the simple way we are expecting.) Most of the tests
- * here can be done more efficiently with rinfo than without.
+ * See if it looks like a restriction clause with a pseudoconstant on
+ * one side. (Anything more complicated than that might not behave in
+ * the simple way we are expecting.) Most of the tests here can be
+ * done more efficiently with rinfo than without.
*/
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
{
{
ok = (bms_membership(rinfo->clause_relids) == BMS_SINGLETON) &&
(is_pseudo_constant_clause_relids(lsecond(expr->args),
- rinfo->right_relids) ||
+ rinfo->right_relids) ||
(varonleft = false,
- is_pseudo_constant_clause_relids(linitial(expr->args),
- rinfo->left_relids)));
+ is_pseudo_constant_clause_relids(linitial(expr->args),
+ rinfo->left_relids)));
}
else
{
{
/*
* If it's not a "<" or ">" operator, just merge the
- * selectivity in generically. But if it's the right
- * oprrest, add the clause to rqlist for later processing.
+ * selectivity in generically. But if it's the right oprrest,
+ * add the clause to rqlist for later processing.
*/
switch (get_oprrest(expr->opno))
{
/*
* Exact equality to the default value probably means the
- * selectivity function punted. This is not airtight but
- * should be good enough.
+ * selectivity function punted. This is not airtight but should
+ * be good enough.
*/
if (rqlist->hibound == DEFAULT_INEQ_SEL ||
rqlist->lobound == DEFAULT_INEQ_SEL)
for (rqelem = *rqlist; rqelem; rqelem = rqelem->next)
{
/*
- * We use full equal() here because the "var" might be a function
- * of one or more attributes of the same relation...
+ * We use full equal() here because the "var" might be a function of
+ * one or more attributes of the same relation...
*/
if (!equal(var, rqelem->var))
continue;
rinfo = (RestrictInfo *) clause;
/*
- * If possible, cache the result of the selectivity calculation
- * for the clause. We can cache if varRelid is zero or the clause
- * contains only vars of that relid --- otherwise varRelid will
- * affect the result, so mustn't cache. We also have to be
- * careful about the jointype. It's OK to cache when jointype is
- * JOIN_INNER or one of the outer join types (any given outer-join
- * clause should always be examined with the same jointype, so
- * result won't change). It's not OK to cache when jointype is one
- * of the special types associated with IN processing, because the
- * same clause may be examined with different jointypes and the
- * result should vary.
+ * If possible, cache the result of the selectivity calculation for
+ * the clause. We can cache if varRelid is zero or the clause
+ * contains only vars of that relid --- otherwise varRelid will affect
+ * the result, so mustn't cache. We also have to be careful about the
+ * jointype. It's OK to cache when jointype is JOIN_INNER or one of
+ * the outer join types (any given outer-join clause should always be
+ * examined with the same jointype, so result won't change). It's not
+ * OK to cache when jointype is one of the special types associated
+ * with IN processing, because the same clause may be examined with
+ * different jointypes and the result should vary.
*/
if (varRelid == 0 ||
bms_is_subset_singleton(rinfo->clause_relids, varRelid))
Var *var = (Var *) clause;
/*
- * We probably shouldn't ever see an uplevel Var here, but if we
- * do, return the default selectivity...
+ * We probably shouldn't ever see an uplevel Var here, but if we do,
+ * return the default selectivity...
*/
if (var->varlevelsup == 0 &&
(varRelid == 0 || varRelid == (int) var->varno))
if (rte->rtekind == RTE_SUBQUERY)
{
/*
- * XXX not smart about subquery references... any way to
- * do better?
+ * XXX not smart about subquery references... any way to do
+ * better?
*/
s1 = 0.5;
}
else
{
/*
- * A Var at the top of a clause must be a bool Var. This
- * is equivalent to the clause reln.attribute = 't', so we
+ * A Var at the top of a clause must be a bool Var. This is
+ * equivalent to the clause reln.attribute = 't', so we
* compute the selectivity as if that is what we have.
*/
s1 = restriction_selectivity(root,
BooleanEqualOperator,
list_make2(var,
- makeBoolConst(true,
- false)),
+ makeBoolConst(true,
+ false)),
varRelid);
}
}
{
/* inverse of the selectivity of the underlying clause */
s1 = 1.0 - clause_selectivity(root,
- (Node *) get_notclausearg((Expr *) clause),
+ (Node *) get_notclausearg((Expr *) clause),
varRelid,
jointype);
}
{
/*
* If we are considering a nestloop join then all clauses are
- * restriction clauses, since we are only interested in the
- * one relation.
+ * restriction clauses, since we are only interested in the one
+ * relation.
*/
is_join_clause = false;
}
else
{
/*
- * Otherwise, it's a join if there's more than one relation
- * used. We can optimize this calculation if an rinfo was
- * passed.
+ * Otherwise, it's a join if there's more than one relation used.
+ * We can optimize this calculation if an rinfo was passed.
*/
if (rinfo)
is_join_clause = (bms_membership(rinfo->clause_relids) ==
else if (is_funcclause(clause))
{
/*
- * This is not an operator, so we guess at the selectivity. THIS
- * IS A HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
+ * This is not an operator, so we guess at the selectivity. THIS IS A
+ * HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
* SELECTIVITIES THEMSELVES. -- JMH 7/9/92
*/
s1 = (Selectivity) 0.3333333;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.148 2005/10/05 17:19:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.149 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/*
* Force estimate to be at least one row, to make explain output look
- * better and to avoid possible divide-by-zero when interpolating
- * costs. Make it an integer, too.
+ * better and to avoid possible divide-by-zero when interpolating costs.
+ * Make it an integer, too.
*/
if (nrows < 1.0)
nrows = 1.0;
/*
* disk costs
*
- * The cost of reading a page sequentially is 1.0, by definition. Note
- * that the Unix kernel will typically do some amount of read-ahead
- * optimization, so that this cost is less than the true cost of
- * reading a page from disk. We ignore that issue here, but must take
- * it into account when estimating the cost of non-sequential
- * accesses!
+ * The cost of reading a page sequentially is 1.0, by definition. Note that
+ * the Unix kernel will typically do some amount of read-ahead
+ * optimization, so that this cost is less than the true cost of reading a
+ * page from disk. We ignore that issue here, but must take it into
+ * account when estimating the cost of non-sequential accesses!
*/
run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
startup_cost += disable_cost;
/*
- * Call index-access-method-specific code to estimate the processing
- * cost for scanning the index, as well as the selectivity of the
- * index (ie, the fraction of main-table tuples we will have to
- * retrieve) and its correlation to the main-table tuple order.
+ * Call index-access-method-specific code to estimate the processing cost
+ * for scanning the index, as well as the selectivity of the index (ie,
+ * the fraction of main-table tuples we will have to retrieve) and its
+ * correlation to the main-table tuple order.
*/
OidFunctionCall7(index->amcostestimate,
PointerGetDatum(root),
/*
* Save amcostestimate's results for possible use in bitmap scan planning.
- * We don't bother to save indexStartupCost or indexCorrelation, because
- * a bitmap scan doesn't care about either.
+ * We don't bother to save indexStartupCost or indexCorrelation, because a
+ * bitmap scan doesn't care about either.
*/
path->indextotalcost = indexTotalCost;
path->indexselectivity = indexSelectivity;
}
/*
- * min_IO_cost corresponds to the perfectly correlated case
- * (csquared=1), max_IO_cost to the perfectly uncorrelated case
- * (csquared=0). Note that we just charge random_page_cost per page
- * in the uncorrelated case, rather than using
- * cost_nonsequential_access, since we've already accounted for
- * caching effects by using the Mackert model.
+ * min_IO_cost corresponds to the perfectly correlated case (csquared=1),
+ * max_IO_cost to the perfectly uncorrelated case (csquared=0). Note that
+ * we just charge random_page_cost per page in the uncorrelated case,
+ * rather than using cost_nonsequential_access, since we've already
+ * accounted for caching effects by using the Mackert model.
*/
min_IO_cost = ceil(indexSelectivity * T);
max_IO_cost = pages_fetched * random_page_cost;
/*
- * Now interpolate based on estimated index order correlation to get
- * total disk I/O cost for main table accesses.
+ * Now interpolate based on estimated index order correlation to get total
+ * disk I/O cost for main table accesses.
*/
csquared = indexCorrelation * indexCorrelation;
* Normally the indexquals will be removed from the list of restriction
* clauses that we have to evaluate as qpquals, so we should subtract
* their costs from baserestrictcost. But if we are doing a join then
- * some of the indexquals are join clauses and shouldn't be
- * subtracted. Rather than work out exactly how much to subtract, we
- * don't subtract anything.
+ * some of the indexquals are join clauses and shouldn't be subtracted.
+ * Rather than work out exactly how much to subtract, we don't subtract
+ * anything.
*/
startup_cost += baserel->baserestrictcost.startup;
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
/*
* For small numbers of pages we should charge random_page_cost apiece,
* while if nearly all the table's pages are being read, it's more
- * appropriate to charge 1.0 apiece. The effect is nonlinear, too.
- * For lack of a better idea, interpolate like this to determine the
- * cost per page.
+ * appropriate to charge 1.0 apiece. The effect is nonlinear, too. For
+ * lack of a better idea, interpolate like this to determine the cost per
+ * page.
*/
if (pages_fetched >= 2.0)
cost_per_page = random_page_cost -
/*
* Estimate CPU costs per tuple.
*
- * Often the indexquals don't need to be rechecked at each tuple ...
- * but not always, especially not if there are enough tuples involved
- * that the bitmaps become lossy. For the moment, just assume they
- * will be rechecked always.
+ * Often the indexquals don't need to be rechecked at each tuple ... but not
+ * always, especially not if there are enough tuples involved that the
+ * bitmaps become lossy. For the moment, just assume they will be
+ * rechecked always.
*/
startup_cost += baserel->baserestrictcost.startup;
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
* Estimate the cost of a BitmapAnd node
*
* Note that this considers only the costs of index scanning and bitmap
- * creation, not the eventual heap access. In that sense the object isn't
+ * creation, not the eventual heap access. In that sense the object isn't
* truly a Path, but it has enough path-like properties (costs in particular)
* to warrant treating it as one.
*/
cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
{
Cost totalCost;
- Selectivity selec;
+ Selectivity selec;
ListCell *l;
/*
- * We estimate AND selectivity on the assumption that the inputs
- * are independent. This is probably often wrong, but we don't
- * have the info to do better.
+ * We estimate AND selectivity on the assumption that the inputs are
+ * independent. This is probably often wrong, but we don't have the info
+ * to do better.
*
* The runtime cost of the BitmapAnd itself is estimated at 100x
- * cpu_operator_cost for each tbm_intersect needed. Probably too
- * small, definitely too simplistic?
+ * cpu_operator_cost for each tbm_intersect needed. Probably too small,
+ * definitely too simplistic?
*/
totalCost = 0.0;
selec = 1.0;
foreach(l, path->bitmapquals)
{
- Path *subpath = (Path *) lfirst(l);
- Cost subCost;
+ Path *subpath = (Path *) lfirst(l);
+ Cost subCost;
Selectivity subselec;
cost_bitmap_tree_node(subpath, &subCost, &subselec);
cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
{
Cost totalCost;
- Selectivity selec;
+ Selectivity selec;
ListCell *l;
/*
- * We estimate OR selectivity on the assumption that the inputs
- * are non-overlapping, since that's often the case in "x IN (list)"
- * type situations. Of course, we clamp to 1.0 at the end.
+ * We estimate OR selectivity on the assumption that the inputs are
+ * non-overlapping, since that's often the case in "x IN (list)" type
+ * situations. Of course, we clamp to 1.0 at the end.
*
* The runtime cost of the BitmapOr itself is estimated at 100x
- * cpu_operator_cost for each tbm_union needed. Probably too
- * small, definitely too simplistic? We are aware that the tbm_unions
- * are optimized out when the inputs are BitmapIndexScans.
+ * cpu_operator_cost for each tbm_union needed. Probably too small,
+ * definitely too simplistic? We are aware that the tbm_unions are
+ * optimized out when the inputs are BitmapIndexScans.
*/
totalCost = 0.0;
selec = 0.0;
foreach(l, path->bitmapquals)
{
- Path *subpath = (Path *) lfirst(l);
- Cost subCost;
+ Path *subpath = (Path *) lfirst(l);
+ Cost subCost;
Selectivity subselec;
cost_bitmap_tree_node(subpath, &subCost, &subselec);
Assert(baserel->rtekind == RTE_SUBQUERY);
/*
- * Cost of path is cost of evaluating the subplan, plus cost of
- * evaluating any restriction clauses that will be attached to the
- * SubqueryScan node, plus cpu_tuple_cost to account for selection and
- * projection overhead.
+ * Cost of path is cost of evaluating the subplan, plus cost of evaluating
+ * any restriction clauses that will be attached to the SubqueryScan node,
+ * plus cpu_tuple_cost to account for selection and projection overhead.
*/
path->startup_cost = baserel->subplan->startup_cost;
path->total_cost = baserel->subplan->total_cost;
/*
* For now, estimate function's cost at one operator eval per function
- * call. Someday we should revive the function cost estimate columns
- * in pg_proc...
+ * call. Someday we should revive the function cost estimate columns in
+ * pg_proc...
*/
cpu_per_tuple = cpu_operator_cost;
startup_cost += disable_cost;
/*
- * We want to be sure the cost of a sort is never estimated as zero,
- * even if passed-in tuple count is zero. Besides, mustn't do
- * log(0)...
+ * We want to be sure the cost of a sort is never estimated as zero, even
+ * if passed-in tuple count is zero. Besides, mustn't do log(0)...
*/
if (tuples < 2.0)
tuples = 2.0;
}
/*
- * Also charge a small amount (arbitrarily set equal to operator cost)
- * per extracted tuple.
+ * Also charge a small amount (arbitrarily set equal to operator cost) per
+ * extracted tuple.
*/
run_cost += cpu_operator_cost * tuples;
/*
* Charge a very small amount per inserted tuple, to reflect bookkeeping
- * costs. We use cpu_tuple_cost/10 for this. This is needed to break
- * the tie that would otherwise exist between nestloop with A outer,
+ * costs. We use cpu_tuple_cost/10 for this. This is needed to break the
+ * tie that would otherwise exist between nestloop with A outer,
* materialized B inner and nestloop with B outer, materialized A inner.
* The extra cost ensures we'll prefer materializing the smaller rel.
*/
startup_cost += cpu_tuple_cost * 0.1 * tuples;
/*
- * Also charge a small amount per extracted tuple. We use
- * cpu_tuple_cost so that it doesn't appear worthwhile to materialize
- * a bare seqscan.
+ * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
+ * so that it doesn't appear worthwhile to materialize a bare seqscan.
*/
run_cost += cpu_tuple_cost * tuples;
Cost total_cost;
/*
- * We charge one cpu_operator_cost per aggregate function per input
- * tuple, and another one per output tuple (corresponding to transfn
- * and finalfn calls respectively). If we are grouping, we charge an
- * additional cpu_operator_cost per grouping column per input tuple
- * for grouping comparisons.
+ * We charge one cpu_operator_cost per aggregate function per input tuple,
+ * and another one per output tuple (corresponding to transfn and finalfn
+ * calls respectively). If we are grouping, we charge an additional
+ * cpu_operator_cost per grouping column per input tuple for grouping
+ * comparisons.
*
* We will produce a single output tuple if not grouping, and a tuple per
* group otherwise. We charge cpu_tuple_cost for each output tuple.
*
- * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
- * same total CPU cost, but AGG_SORTED has lower startup cost. If the
- * input path is already sorted appropriately, AGG_SORTED should be
- * preferred (since it has no risk of memory overflow). This will
- * happen as long as the computed total costs are indeed exactly equal
- * --- but if there's roundoff error we might do the wrong thing. So
- * be sure that the computations below form the same intermediate
- * values in the same order.
+ * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the same
+ * total CPU cost, but AGG_SORTED has lower startup cost. If the input
+ * path is already sorted appropriately, AGG_SORTED should be preferred
+ * (since it has no risk of memory overflow). This will happen as long as
+ * the computed total costs are indeed exactly equal --- but if there's
+ * roundoff error we might do the wrong thing. So be sure that the
+ * computations below form the same intermediate values in the same order.
*/
if (aggstrategy == AGG_PLAIN)
{
total_cost = input_total_cost;
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples.
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples.
*/
total_cost += cpu_operator_cost * input_tuples * numGroupCols;
Selectivity joininfactor;
/*
- * If inner path is an indexscan, be sure to use its estimated output
- * row count, which may be lower than the restriction-clause-only row
- * count of its parent. (We don't include this case in the PATH_ROWS
- * macro because it applies *only* to a nestloop's inner relation.)
+ * If inner path is an indexscan, be sure to use its estimated output row
+ * count, which may be lower than the restriction-clause-only row count of
+ * its parent. (We don't include this case in the PATH_ROWS macro because
+ * it applies *only* to a nestloop's inner relation.)
*/
if (IsA(inner_path, IndexPath))
inner_path_rows = ((IndexPath *) inner_path)->rows;
startup_cost += disable_cost;
/*
- * If we're doing JOIN_IN then we will stop scanning inner tuples for
- * an outer tuple as soon as we have one match. Account for the
- * effects of this by scaling down the cost estimates in proportion to
- * the JOIN_IN selectivity. (This assumes that all the quals attached
- * to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop scanning inner tuples for an
+ * outer tuple as soon as we have one match. Account for the effects of
+ * this by scaling down the cost estimates in proportion to the JOIN_IN
+ * selectivity. (This assumes that all the quals attached to the join are
+ * IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(path, root);
* NOTE: clearly, we must pay both outer and inner paths' startup_cost
* before we can start returning tuples, so the join's startup cost is
* their sum. What's not so clear is whether the inner path's
- * startup_cost must be paid again on each rescan of the inner path.
- * This is not true if the inner path is materialized or is a
- * hashjoin, but probably is true otherwise.
+ * startup_cost must be paid again on each rescan of the inner path. This
+ * is not true if the inner path is materialized or is a hashjoin, but
+ * probably is true otherwise.
*/
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
run_cost += outer_path->total_cost - outer_path->startup_cost;
/*
* Compute cost and selectivity of the mergequals and qpquals (other
- * restriction clauses) separately. We use approx_selectivity here
- * for speed --- in most cases, any errors won't affect the result
- * much.
+ * restriction clauses) separately. We use approx_selectivity here for
+ * speed --- in most cases, any errors won't affect the result much.
*
- * Note: it's probably bogus to use the normal selectivity calculation
- * here when either the outer or inner path is a UniquePath.
+ * Note: it's probably bogus to use the normal selectivity calculation here
+ * when either the outer or inner path is a UniquePath.
*/
merge_selec = approx_selectivity(root, mergeclauses,
path->jpath.jointype);
mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
/*
- * When there are equal merge keys in the outer relation, the
- * mergejoin must rescan any matching tuples in the inner relation.
- * This means re-fetching inner tuples. Our cost model for this is
- * that a re-fetch costs the same as an original fetch, which is
- * probably an overestimate; but on the other hand we ignore the
- * bookkeeping costs of mark/restore. Not clear if it's worth
- * developing a more refined model.
+ * When there are equal merge keys in the outer relation, the mergejoin
+ * must rescan any matching tuples in the inner relation. This means
+ * re-fetching inner tuples. Our cost model for this is that a re-fetch
+ * costs the same as an original fetch, which is probably an overestimate;
+ * but on the other hand we ignore the bookkeeping costs of mark/restore.
+ * Not clear if it's worth developing a more refined model.
*
- * The number of re-fetches can be estimated approximately as size of
- * merge join output minus size of inner relation. Assume that the
- * distinct key values are 1, 2, ..., and denote the number of values
- * of each key in the outer relation as m1, m2, ...; in the inner
- * relation, n1, n2, ... Then we have
+ * The number of re-fetches can be estimated approximately as size of merge
+ * join output minus size of inner relation. Assume that the distinct key
+ * values are 1, 2, ..., and denote the number of values of each key in
+ * the outer relation as m1, m2, ...; in the inner relation, n1, n2, ...
+ * Then we have
*
* size of join = m1 * n1 + m2 * n2 + ...
*
- * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
- * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
+ * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 * n1
+ * + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
* relation
*
- * This equation works correctly for outer tuples having no inner match
- * (nk = 0), but not for inner tuples having no outer match (mk = 0);
- * we are effectively subtracting those from the number of rescanned
- * tuples, when we should not. Can we do better without expensive
- * selectivity computations?
+ * This equation works correctly for outer tuples having no inner match (nk =
+ * 0), but not for inner tuples having no outer match (mk = 0); we are
+ * effectively subtracting those from the number of rescanned tuples, when
+ * we should not. Can we do better without expensive selectivity
+ * computations?
*/
if (IsA(outer_path, UniquePath))
rescannedtuples = 0;
* inputs that will actually need to be scanned. We use only the first
* (most significant) merge clause for this purpose.
*
- * Since this calculation is somewhat expensive, and will be the same for
- * all mergejoin paths associated with the merge clause, we cache the
- * results in the RestrictInfo node.
+ * Since this calculation is somewhat expensive, and will be the same for all
+ * mergejoin paths associated with the merge clause, we cache the results
+ * in the RestrictInfo node.
*/
if (mergeclauses && path->jpath.jointype != JOIN_FULL)
{
/*
* Readjust scan selectivities to account for above rounding. This is
- * normally an insignificant effect, but when there are only a few
- * rows in the inputs, failing to do this makes for a large percentage
- * error.
+ * normally an insignificant effect, but when there are only a few rows in
+ * the inputs, failing to do this makes for a large percentage error.
*/
outerscansel = outer_rows / outer_path_rows;
innerscansel = inner_rows / inner_path_rows;
/* CPU costs */
/*
- * If we're doing JOIN_IN then we will stop outputting inner tuples
- * for an outer tuple as soon as we have one match. Account for the
- * effects of this by scaling down the cost estimates in proportion to
- * the expected output size. (This assumes that all the quals
- * attached to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop outputting inner tuples for an
+ * outer tuple as soon as we have one match. Account for the effects of
+ * this by scaling down the cost estimates in proportion to the expected
+ * output size. (This assumes that all the quals attached to the join are
+ * IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(&path->jpath, root);
/*
- * The number of tuple comparisons needed is approximately number of
- * outer rows plus number of inner rows plus number of rescanned
- * tuples (can we refine this?). At each one, we need to evaluate the
- * mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
- * so do NOT include joininfactor.
+ * The number of tuple comparisons needed is approximately number of outer
+ * rows plus number of inner rows plus number of rescanned tuples (can we
+ * refine this?). At each one, we need to evaluate the mergejoin quals.
+ * NOTE: JOIN_IN mode does not save any work here, so do NOT include
+ * joininfactor.
*/
startup_cost += merge_qual_cost.startup;
run_cost += merge_qual_cost.per_tuple *
/*
* For each tuple that gets through the mergejoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic
- * since not all of the quals may get evaluated at each tuple.) This
- * work is skipped in JOIN_IN mode, so apply the factor.
+ * clauses that are to be applied at the join. (This is pessimistic since
+ * not all of the quals may get evaluated at each tuple.) This work is
+ * skipped in JOIN_IN mode, so apply the factor.
*/
startup_cost += qp_qual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
double outer_path_rows = PATH_ROWS(outer_path);
double inner_path_rows = PATH_ROWS(inner_path);
double outerbytes = relation_byte_size(outer_path_rows,
- outer_path->parent->width);
+ outer_path->parent->width);
double innerbytes = relation_byte_size(inner_path_rows,
- inner_path->parent->width);
+ inner_path->parent->width);
int num_hashclauses = list_length(hashclauses);
int numbuckets;
int numbatches;
/*
* Compute cost and selectivity of the hashquals and qpquals (other
- * restriction clauses) separately. We use approx_selectivity here
- * for speed --- in most cases, any errors won't affect the result
- * much.
+ * restriction clauses) separately. We use approx_selectivity here for
+ * speed --- in most cases, any errors won't affect the result much.
*
- * Note: it's probably bogus to use the normal selectivity calculation
- * here when either the outer or inner path is a UniquePath.
+ * Note: it's probably bogus to use the normal selectivity calculation here
+ * when either the outer or inner path is a UniquePath.
*/
hash_selec = approx_selectivity(root, hashclauses,
path->jpath.jointype);
startup_cost += inner_path->total_cost;
/*
- * Cost of computing hash function: must do it once per input tuple.
- * We charge one cpu_operator_cost for each column's hash function.
+ * Cost of computing hash function: must do it once per input tuple. We
+ * charge one cpu_operator_cost for each column's hash function.
*
- * XXX when a hashclause is more complex than a single operator, we
- * really should charge the extra eval costs of the left or right
- * side, as appropriate, here. This seems more work than it's worth
- * at the moment.
+ * XXX when a hashclause is more complex than a single operator, we really
+ * should charge the extra eval costs of the left or right side, as
+ * appropriate, here. This seems more work than it's worth at the moment.
*/
startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
inner_path->parent->width,
&numbuckets,
&numbatches);
- virtualbuckets = (double) numbuckets * (double) numbatches;
+ virtualbuckets = (double) numbuckets *(double) numbatches;
/*
- * Determine bucketsize fraction for inner relation. We use the
- * smallest bucketsize estimated for any individual hashclause; this
- * is undoubtedly conservative.
+ * Determine bucketsize fraction for inner relation. We use the smallest
+ * bucketsize estimated for any individual hashclause; this is undoubtedly
+ * conservative.
*
- * BUT: if inner relation has been unique-ified, we can assume it's good
- * for hashing. This is important both because it's the right answer,
- * and because we avoid contaminating the cache with a value that's
- * wrong for non-unique-ified paths.
+ * BUT: if inner relation has been unique-ified, we can assume it's good for
+ * hashing. This is important both because it's the right answer, and
+ * because we avoid contaminating the cache with a value that's wrong for
+ * non-unique-ified paths.
*/
if (IsA(inner_path, UniquePath))
innerbucketsize = 1.0 / virtualbuckets;
Assert(IsA(restrictinfo, RestrictInfo));
/*
- * First we have to figure out which side of the hashjoin
- * clause is the inner side.
+ * First we have to figure out which side of the hashjoin clause
+ * is the inner side.
*
* Since we tend to visit the same clauses over and over when
- * planning a large query, we cache the bucketsize estimate in
- * the RestrictInfo node to avoid repeated lookups of
- * statistics.
+ * planning a large query, we cache the bucketsize estimate in the
+ * RestrictInfo node to avoid repeated lookups of statistics.
*/
if (bms_is_subset(restrictinfo->right_relids,
inner_path->parent->relids))
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- get_rightop(restrictinfo->clause),
+ get_rightop(restrictinfo->clause),
virtualbuckets);
restrictinfo->right_bucketsize = thisbucketsize;
}
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- get_leftop(restrictinfo->clause),
+ get_leftop(restrictinfo->clause),
virtualbuckets);
restrictinfo->left_bucketsize = thisbucketsize;
}
/*
* If inner relation is too big then we will need to "batch" the join,
- * which implies writing and reading most of the tuples to disk an
- * extra time. Charge one cost unit per page of I/O (correct since it
- * should be nice and sequential...). Writing the inner rel counts as
- * startup cost, all the rest as run cost.
+ * which implies writing and reading most of the tuples to disk an extra
+ * time. Charge one cost unit per page of I/O (correct since it should be
+ * nice and sequential...). Writing the inner rel counts as startup cost,
+ * all the rest as run cost.
*/
if (numbatches > 1)
{
/* CPU costs */
/*
- * If we're doing JOIN_IN then we will stop comparing inner tuples to
- * an outer tuple as soon as we have one match. Account for the
- * effects of this by scaling down the cost estimates in proportion to
- * the expected output size. (This assumes that all the quals
- * attached to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop comparing inner tuples to an
+ * outer tuple as soon as we have one match. Account for the effects of
+ * this by scaling down the cost estimates in proportion to the expected
+ * output size. (This assumes that all the quals attached to the join are
+ * IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(&path->jpath, root);
/*
- * The number of tuple comparisons needed is the number of outer
- * tuples times the typical number of tuples in a hash bucket, which
- * is the inner relation size times its bucketsize fraction. At each
- * one, we need to evaluate the hashjoin quals. (Note: charging the
- * full qual eval cost at each tuple is pessimistic, since we don't
- * evaluate the quals unless the hash values match exactly.)
+ * The number of tuple comparisons needed is the number of outer tuples
+ * times the typical number of tuples in a hash bucket, which is the inner
+ * relation size times its bucketsize fraction. At each one, we need to
+ * evaluate the hashjoin quals. (Note: charging the full qual eval cost
+ * at each tuple is pessimistic, since we don't evaluate the quals unless
+ * the hash values match exactly.)
*/
startup_cost += hash_qual_cost.startup;
run_cost += hash_qual_cost.per_tuple *
/*
* For each tuple that gets through the hashjoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic
- * since not all of the quals may get evaluated at each tuple.)
+ * clauses that are to be applied at the join. (This is pessimistic since
+ * not all of the quals may get evaluated at each tuple.)
*/
startup_cost += qp_qual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
/*
* Bias against putting larger relation on inside. We don't want an
- * absolute prohibition, though, since larger relation might have
- * better bucketsize --- and we can't trust the size estimates
- * unreservedly, anyway. Instead, inflate the run cost by the square
- * root of the size ratio. (Why square root? No real good reason,
- * but it seems reasonable...)
+ * absolute prohibition, though, since larger relation might have better
+ * bucketsize --- and we can't trust the size estimates unreservedly,
+ * anyway. Instead, inflate the run cost by the square root of the size
+ * ratio. (Why square root? No real good reason, but it seems
+ * reasonable...)
*
* Note: before 7.4 we implemented this by inflating startup cost; but if
- * there's a disable_cost component in the input paths' startup cost,
- * that unfairly penalizes the hash. Probably it'd be better to keep
- * track of disable penalty separately from cost.
+ * there's a disable_cost component in the input paths' startup cost, that
+ * unfairly penalizes the hash. Probably it'd be better to keep track of
+ * disable penalty separately from cost.
*/
if (innerbytes > outerbytes && outerbytes > 0)
run_cost *= sqrt(innerbytes / outerbytes);
return false;
/*
- * Our basic strategy is to charge one cpu_operator_cost for each
- * operator or function node in the given tree. Vars and Consts are
- * charged zero, and so are boolean operators (AND, OR, NOT).
- * Simplistic, but a lot better than no model at all.
+ * Our basic strategy is to charge one cpu_operator_cost for each operator
+ * or function node in the given tree. Vars and Consts are charged zero,
+ * and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
+ * better than no model at all.
*
- * Should we try to account for the possibility of short-circuit
- * evaluation of AND/OR?
+ * Should we try to account for the possibility of short-circuit evaluation
+ * of AND/OR?
*/
if (IsA(node, FuncExpr) ||
IsA(node, OpExpr) ||
{
/*
* A subplan node in an expression typically indicates that the
- * subplan will be executed on each evaluation, so charge
- * accordingly. (Sub-selects that can be executed as InitPlans
- * have already been removed from the expression.)
+ * subplan will be executed on each evaluation, so charge accordingly.
+ * (Sub-selects that can be executed as InitPlans have already been
+ * removed from the expression.)
*
- * An exception occurs when we have decided we can implement the
- * subplan by hashing.
+ * An exception occurs when we have decided we can implement the subplan
+ * by hashing.
*
*/
SubPlan *subplan = (SubPlan *) node;
if (subplan->useHashTable)
{
/*
- * If we are using a hash table for the subquery outputs, then
- * the cost of evaluating the query is a one-time cost. We
- * charge one cpu_operator_cost per tuple for the work of
- * loading the hashtable, too.
+ * If we are using a hash table for the subquery outputs, then the
+ * cost of evaluating the query is a one-time cost. We charge one
+ * cpu_operator_cost per tuple for the work of loading the
+ * hashtable, too.
*/
total->startup += plan->total_cost +
cpu_operator_cost * plan->plan_rows;
/*
- * The per-tuple costs include the cost of evaluating the
- * lefthand expressions, plus the cost of probing the
- * hashtable. Recursion into the exprs list will handle the
- * lefthand expressions properly, and will count one
- * cpu_operator_cost for each comparison operator. That is
- * probably too low for the probing cost, but it's hard to
- * make a better estimate, so live with it for now.
+ * The per-tuple costs include the cost of evaluating the lefthand
+ * expressions, plus the cost of probing the hashtable. Recursion
+ * into the exprs list will handle the lefthand expressions
+ * properly, and will count one cpu_operator_cost for each
+ * comparison operator. That is probably too low for the probing
+ * cost, but it's hard to make a better estimate, so live with it
+ * for now.
*/
}
else
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output we
- * will actually need to scan. NOTE: this logic should agree
- * with the estimates used by make_subplan() in
- * plan/subselect.c.
+ * evaluation. We need to estimate how much of the output we will
+ * actually need to scan. NOTE: this logic should agree with the
+ * estimates used by make_subplan() in plan/subselect.c.
*/
Cost plan_run_cost = plan->total_cost - plan->startup_cost;
/*
* Also account for subplan's startup cost. If the subplan is
- * uncorrelated or undirect correlated, AND its topmost node
- * is a Sort or Material node, assume that we'll only need to
- * pay its startup cost once; otherwise assume we pay the
- * startup cost every time.
+ * uncorrelated or undirect correlated, AND its topmost node is a
+ * Sort or Material node, assume that we'll only need to pay its
+ * startup cost once; otherwise assume we pay the startup cost
+ * every time.
*/
if (subplan->parParam == NIL &&
(IsA(plan, Sort) ||
/*
* Compute joinclause selectivity. Note that we are only considering
- * clauses that become restriction clauses at this join level; we are
- * not double-counting them because they were not considered in
- * estimating the sizes of the component rels.
+ * clauses that become restriction clauses at this join level; we are not
+ * double-counting them because they were not considered in estimating the
+ * sizes of the component rels.
*/
selec = clauselist_selectivity(root,
restrictlist,
/*
* Basically, we multiply size of Cartesian product by selectivity.
*
- * If we are doing an outer join, take that into account: the output must
- * be at least as large as the non-nullable input. (Is there any
- * chance of being even smarter?)
+ * If we are doing an outer join, take that into account: the output must be
+ * at least as large as the non-nullable input. (Is there any chance of
+ * being even smarter?)
*
- * For JOIN_IN and variants, the Cartesian product is figured with
- * respect to a unique-ified input, and then we can clamp to the size
- * of the other input.
+ * For JOIN_IN and variants, the Cartesian product is figured with respect to
+ * a unique-ified input, and then we can clamp to the size of the other
+ * input.
*/
switch (jointype)
{
return 1.0;
/*
- * Return 1.0 if the inner side is already known unique. The case
- * where the inner path is already a UniquePath probably cannot happen
- * in current usage, but check it anyway for completeness. The
- * interesting case is where we've determined the inner relation
- * itself is unique, which we can check by looking at the rows
- * estimate for its UniquePath.
+ * Return 1.0 if the inner side is already known unique. The case where
+ * the inner path is already a UniquePath probably cannot happen in
+ * current usage, but check it anyway for completeness. The interesting
+ * case is where we've determined the inner relation itself is unique,
+ * which we can check by looking at the rows estimate for its UniquePath.
*/
if (IsA(path->innerjoinpath, UniquePath))
return 1.0;
/*
* Compute same result set_joinrel_size_estimates would compute for
- * JOIN_INNER. Note that we use the input rels' absolute size
- * estimates, not PATH_ROWS() which might be less; if we used
- * PATH_ROWS() we'd be double-counting the effects of any join clauses
- * used in input scans.
+ * JOIN_INNER. Note that we use the input rels' absolute size estimates,
+ * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
+ * double-counting the effects of any join clauses used in input scans.
*/
selec = clauselist_selectivity(root,
path->joinrestrictinfo,
/*
* Estimate number of rows the function itself will return.
*
- * XXX no idea how to do this yet; but we can at least check whether
- * function returns set or not...
+ * XXX no idea how to do this yet; but we can at least check whether function
+ * returns set or not...
*/
if (expression_returns_set(rte->funcexpr))
rel->tuples = 1000;
ndx = var->varattno - rel->min_attr;
/*
- * The width probably hasn't been cached yet, but may as well
- * check
+ * The width probably hasn't been cached yet, but may as well check
*/
if (rel->attr_widths[ndx] > 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.190 2005/09/24 22:54:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.191 2005/10/15 02:49:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static List *find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
- List *clauses, List *outer_clauses,
- bool istoplevel, bool isjoininner,
- Relids outer_relids);
+ List *clauses, List *outer_clauses,
+ bool istoplevel, bool isjoininner,
+ Relids outer_relids);
static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths);
static int bitmap_path_comparator(const void *a, const void *b);
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths);
bool indexkey_on_left);
static Relids indexable_outerrelids(RelOptInfo *rel);
static bool matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel,
- Relids outer_relids);
+ Relids outer_relids);
static List *find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
- Relids outer_relids, bool isouterjoin);
+ Relids outer_relids, bool isouterjoin);
static ScanDirection match_variant_ordering(PlannerInfo *root,
- IndexOptInfo *index,
- List *restrictclauses);
+ IndexOptInfo *index,
+ List *restrictclauses);
static List *identify_ignorable_ordering_cols(PlannerInfo *root,
- IndexOptInfo *index,
- List *restrictclauses);
+ IndexOptInfo *index,
+ List *restrictclauses);
static bool match_index_to_query_keys(PlannerInfo *root,
- IndexOptInfo *index,
- ScanDirection indexscandir,
- List *ignorables);
+ IndexOptInfo *index,
+ ScanDirection indexscandir,
+ List *ignorables);
static bool match_boolean_index_clause(Node *clause, int indexcol,
- IndexOptInfo *index);
+ IndexOptInfo *index);
static bool match_special_index_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static Expr *expand_boolean_index_clause(Node *clause, int indexcol,
- IndexOptInfo *index);
+ IndexOptInfo *index);
static List *expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass);
static List *prefix_quals(Node *leftop, Oid opclass,
Const *prefix, Pattern_Prefix_Status pstatus);
true, false, NULL);
/*
- * We can submit them all to add_path. (This generates access paths for
- * plain IndexScan plans.) However, for the next step we will only want
+ * We can submit them all to add_path. (This generates access paths for
+ * plain IndexScan plans.) However, for the next step we will only want
* the ones that have some selectivity; we must discard anything that was
* generated solely for ordering purposes.
*/
bitindexpaths = list_concat(bitindexpaths, indexpaths);
/*
- * If we found anything usable, generate a BitmapHeapPath for the
- * most promising combination of bitmap index paths.
+ * If we found anything usable, generate a BitmapHeapPath for the most
+ * promising combination of bitmap index paths.
*/
if (bitindexpaths != NIL)
{
bool index_is_ordered;
/*
- * Ignore partial indexes that do not match the query. If a partial
- * index is marked predOK then we know it's OK; otherwise, if we
- * are at top level we know it's not OK (since predOK is exactly
- * whether its predicate could be proven from the toplevel clauses).
- * Otherwise, we have to test whether the added clauses are
- * sufficient to imply the predicate. If so, we could use
- * the index in the current context.
+ * Ignore partial indexes that do not match the query. If a partial
+ * index is marked predOK then we know it's OK; otherwise, if we are
+ * at top level we know it's not OK (since predOK is exactly whether
+ * its predicate could be proven from the toplevel clauses).
+ * Otherwise, we have to test whether the added clauses are sufficient
+ * to imply the predicate. If so, we could use the index in the
+ * current context.
*
- * We set useful_predicate to true iff the predicate was proven
- * using the current set of clauses. This is needed to prevent
- * matching a predOK index to an arm of an OR, which would be
- * a legal but pointlessly inefficient plan. (A better plan will
- * be generated by just scanning the predOK index alone, no OR.)
+ * We set useful_predicate to true iff the predicate was proven using the
+ * current set of clauses. This is needed to prevent matching a
+ * predOK index to an arm of an OR, which would be a legal but
+ * pointlessly inefficient plan. (A better plan will be generated by
+ * just scanning the predOK index alone, no OR.)
*/
useful_predicate = false;
if (index->indpred != NIL)
else
{
if (istoplevel)
- continue; /* no point in trying to prove it */
+ continue; /* no point in trying to prove it */
/* Form all_clauses if not done already */
if (all_clauses == NIL)
outer_clauses);
if (!predicate_implied_by(index->indpred, all_clauses))
- continue; /* can't use it at all */
+ continue; /* can't use it at all */
if (!predicate_implied_by(index->indpred, outer_clauses))
useful_predicate = true;
&found_clause);
/*
- * Not all index AMs support scans with no restriction clauses.
- * We can't generate a scan over an index with amoptionalkey = false
+ * Not all index AMs support scans with no restriction clauses. We
+ * can't generate a scan over an index with amoptionalkey = false
* unless there's at least one restriction clause.
*/
if (restrictclauses == NIL && !index->amoptionalkey)
continue;
/*
- * 2. Compute pathkeys describing index's ordering, if any, then
- * see how many of them are actually useful for this query. This
- * is not relevant unless we are at top level.
+ * 2. Compute pathkeys describing index's ordering, if any, then see
+ * how many of them are actually useful for this query. This is not
+ * relevant unless we are at top level.
*/
index_is_ordered = OidIsValid(index->ordering[0]);
if (istoplevel && index_is_ordered && !isjoininner)
/*
* 3. Generate an indexscan path if there are relevant restriction
* clauses in the current clauses, OR the index ordering is
- * potentially useful for later merging or final output ordering,
- * OR the index has a predicate that was proven by the current
- * clauses.
+ * potentially useful for later merging or final output ordering, OR
+ * the index has a predicate that was proven by the current clauses.
*/
if (found_clause || useful_pathkeys != NIL || useful_predicate)
{
}
/*
- * 4. If the index is ordered, and there is a requested query
- * ordering that we failed to match, consider variant ways of
- * achieving the ordering. Again, this is only interesting
- * at top level.
+ * 4. If the index is ordered, and there is a requested query ordering
+ * that we failed to match, consider variant ways of achieving the
+ * ordering. Again, this is only interesting at top level.
*/
if (istoplevel && index_is_ordered && !isjoininner &&
root->query_pathkeys != NIL &&
pathkeys_useful_for_ordering(root, useful_pathkeys) == 0)
{
- ScanDirection scandir;
+ ScanDirection scandir;
scandir = match_variant_ordering(root, index, restrictclauses);
if (!ScanDirectionIsNoMovement(scandir))
foreach(l, clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- List *pathlist;
- Path *bitmapqual;
- ListCell *j;
+ List *pathlist;
+ Path *bitmapqual;
+ ListCell *j;
Assert(IsA(rinfo, RestrictInfo));
/* Ignore RestrictInfos that aren't ORs */
continue;
/*
- * We must be able to match at least one index to each of the arms
- * of the OR, else we can't use it.
+ * We must be able to match at least one index to each of the arms of
+ * the OR, else we can't use it.
*/
pathlist = NIL;
foreach(j, ((BoolExpr *) rinfo->orclause)->args)
{
- Node *orarg = (Node *) lfirst(j);
- List *indlist;
+ Node *orarg = (Node *) lfirst(j);
+ List *indlist;
/* OR arguments should be ANDs or sub-RestrictInfos */
if (and_clause(orarg))
{
- List *andargs = ((BoolExpr *) orarg)->args;
+ List *andargs = ((BoolExpr *) orarg)->args;
indlist = find_usable_indexes(root, rel,
andargs,
isjoininner,
outer_relids);
}
+
/*
- * If nothing matched this arm, we can't do anything
- * with this OR clause.
+ * If nothing matched this arm, we can't do anything with this OR
+ * clause.
*/
if (indlist == NIL)
{
pathlist = NIL;
break;
}
+
/*
- * OK, pick the most promising AND combination,
- * and add it to pathlist.
+ * OK, pick the most promising AND combination, and add it to
+ * pathlist.
*/
bitmapqual = choose_bitmap_and(root, rel, indlist);
pathlist = lappend(pathlist, bitmapqual);
}
+
/*
- * If we have a match for every arm, then turn them
- * into a BitmapOrPath, and add to result list.
+ * If we have a match for every arm, then turn them into a
+ * BitmapOrPath, and add to result list.
*/
if (pathlist != NIL)
{
* Given a nonempty list of bitmap paths, AND them into one path.
*
* This is a nontrivial decision since we can legally use any subset of the
- * given path set. We want to choose a good tradeoff between selectivity
+ * given path set. We want to choose a good tradeoff between selectivity
* and cost of computing the bitmap.
*
* The result is either a single one of the inputs, or a BitmapAndPath
int i;
ListCell *l;
- Assert(npaths > 0); /* else caller error */
+ Assert(npaths > 0); /* else caller error */
if (npaths == 1)
return (Path *) linitial(paths); /* easy case */
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
- * OR clauses. As a compromise, we sort the paths by selectivity.
- * We always take the first, and sequentially add on paths that result
- * in a lower estimated cost.
+ * OR clauses. As a compromise, we sort the paths by selectivity. We
+ * always take the first, and sequentially add on paths that result in a
+ * lower estimated cost.
*
- * We also make some effort to detect directly redundant input paths,
- * as can happen if there are multiple possibly usable indexes. For
- * this we look only at plain IndexPath inputs, not at sub-OR clauses.
- * And we consider an index redundant if all its index conditions were
- * already used by earlier indexes. (We could use predicate_implied_by
- * to have a more intelligent, but much more expensive, check --- but in
- * most cases simple pointer equality should suffice, since after all the
- * index conditions are all coming from the same RestrictInfo lists.)
+ * We also make some effort to detect directly redundant input paths, as can
+ * happen if there are multiple possibly usable indexes. For this we look
+ * only at plain IndexPath inputs, not at sub-OR clauses. And we consider
+ * an index redundant if all its index conditions were already used by
+ * earlier indexes. (We could use predicate_implied_by to have a more
+ * intelligent, but much more expensive, check --- but in most cases
+ * simple pointer equality should suffice, since after all the index
+ * conditions are all coming from the same RestrictInfo lists.)
*
- * XXX is there any risk of throwing away a useful partial index here
- * because we don't explicitly look at indpred? At least in simple
- * cases, the partial index will sort before competing non-partial
- * indexes and so it makes the right choice, but perhaps we need to
- * work harder.
+ * XXX is there any risk of throwing away a useful partial index here because
+ * we don't explicitly look at indpred? At least in simple cases, the
+ * partial index will sort before competing non-partial indexes and so it
+ * makes the right choice, but perhaps we need to work harder.
*
* Note: outputting the selected sub-paths in selectivity order is a good
* thing even if we weren't using that as part of the selection method,
qualsofar = list_copy(((IndexPath *) patharray[0])->indexclauses);
else
qualsofar = NIL;
- lastcell = list_head(paths); /* for quick deletions */
+ lastcell = list_head(paths); /* for quick deletions */
for (i = 1; i < npaths; i++)
{
- Path *newpath = patharray[i];
- List *newqual = NIL;
- Cost newcost;
+ Path *newpath = patharray[i];
+ List *newqual = NIL;
+ Cost newcost;
if (IsA(newpath, IndexPath))
{
static int
bitmap_path_comparator(const void *a, const void *b)
{
- Path *pa = *(Path * const *) a;
- Path *pb = *(Path * const *) b;
+ Path *pa = *(Path *const *) a;
+ Path *pb = *(Path *const *) b;
Cost acost;
Cost bcost;
- Selectivity aselec;
- Selectivity bselec;
+ Selectivity aselec;
+ Selectivity bselec;
cost_bitmap_tree_node(pa, &acost, &aselec);
cost_bitmap_tree_node(pb, &bcost, &bselec);
*
* We can use clauses from either the current clauses or outer_clauses lists,
* but *found_clause is set TRUE only if we used at least one clause from
- * the "current clauses" list. See find_usable_indexes() for motivation.
+ * the "current clauses" list. See find_usable_indexes() for motivation.
*
* outer_relids determines what Vars will be allowed on the other side
* of a possible index qual; see match_clause_to_indexcol().
* to the caller-specified outer_relids relations (which had better not
* include the relation whose index is being tested). outer_relids should
* be NULL when checking simple restriction clauses, and the outer side
- * of the join when building a join inner scan. Other than that, the
+ * of the join when building a join inner scan. Other than that, the
* only thing we don't like is volatile functions.
*
* Note: in most cases we already know that the clause as a whole uses
return true;
/*
- * If we didn't find a member of the index's opclass, see whether
- * it is a "special" indexable operator.
+ * If we didn't find a member of the index's opclass, see whether it
+ * is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, true))
return true;
return true;
/*
- * If we didn't find a member of the index's opclass, see whether
- * it is a "special" indexable operator.
+ * If we didn't find a member of the index's opclass, see whether it
+ * is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, false))
return true;
/*
* Note: if Postgres tried to optimize queries by forming equivalence
* classes over equi-joined attributes (i.e., if it recognized that a
- * qualification such as "where a.b=c.d and a.b=5" could make use of
- * an index on c.d), then we could use that equivalence class info
- * here with joininfo lists to do more complete tests for the usability
- * of a partial index. For now, the test only uses restriction
- * clauses (those in baserestrictinfo). --Nels, Dec '92
+ * qualification such as "where a.b=c.d and a.b=5" could make use of an
+ * index on c.d), then we could use that equivalence class info here with
+ * joininfo lists to do more complete tests for the usability of a partial
+ * index. For now, the test only uses restriction clauses (those in
+ * baserestrictinfo). --Nels, Dec '92
*
- * XXX as of 7.1, equivalence class info *is* available. Consider
- * improving this code as foreseen by Nels.
+ * XXX as of 7.1, equivalence class info *is* available. Consider improving
+ * this code as foreseen by Nels.
*/
foreach(ilist, rel->indexlist)
/*
* indexable_outerrelids
* Finds all other relids that participate in any indexable join clause
- * for the specified table. Returns a set of relids.
+ * for the specified table. Returns a set of relids.
*/
static Relids
indexable_outerrelids(RelOptInfo *rel)
foreach(l, rel->joininfo)
{
RestrictInfo *joininfo = (RestrictInfo *) lfirst(l);
- Relids other_rels;
+ Relids other_rels;
other_rels = bms_difference(joininfo->required_relids, rel->relids);
if (matches_any_index(joininfo, rel, other_rels))
{
foreach(l, ((BoolExpr *) rinfo->orclause)->args)
{
- Node *orarg = (Node *) lfirst(l);
+ Node *orarg = (Node *) lfirst(l);
/* OR arguments should be ANDs or sub-RestrictInfos */
if (and_clause(orarg))
return NULL;
/*
- * Otherwise, we have to do path selection in the memory context of
- * the given rel, so that any created path can be safely attached to
- * the rel's cache of best inner paths. (This is not currently an
- * issue for normal planning, but it is an issue for GEQO planning.)
+ * Otherwise, we have to do path selection in the memory context of the
+ * given rel, so that any created path can be safely attached to the rel's
+ * cache of best inner paths. (This is not currently an issue for normal
+ * planning, but it is an issue for GEQO planning.)
*/
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
/*
- * Intersect the given outer_relids with index_outer_relids to find
- * the set of outer relids actually relevant for this rel. If there
- * are none, again we can fail immediately.
+ * Intersect the given outer_relids with index_outer_relids to find the
+ * set of outer relids actually relevant for this rel. If there are none,
+ * again we can fail immediately.
*/
outer_relids = bms_intersect(rel->index_outer_relids, outer_relids);
if (bms_is_empty(outer_relids))
}
/*
- * Look to see if we already computed the result for this set of
- * relevant outerrels. (We include the isouterjoin status in the
- * cache lookup key for safety. In practice I suspect this is not
- * necessary because it should always be the same for a given
- * innerrel.)
+ * Look to see if we already computed the result for this set of relevant
+ * outerrels. (We include the isouterjoin status in the cache lookup key
+ * for safety. In practice I suspect this is not necessary because it
+ * should always be the same for a given innerrel.)
*/
foreach(l, rel->index_inner_paths)
{
bitindexpaths = list_concat(bitindexpaths, list_copy(indexpaths));
/*
- * If we found anything usable, generate a BitmapHeapPath for the
- * most promising combination of bitmap index paths.
+ * If we found anything usable, generate a BitmapHeapPath for the most
+ * promising combination of bitmap index paths.
*/
if (bitindexpaths != NIL)
{
ListCell *l;
/*
- * We can always use plain restriction clauses for the rel. We
- * scan these first because we want them first in the clause
- * list for the convenience of remove_redundant_join_clauses,
- * which can never remove non-join clauses and hence won't be able
- * to get rid of a non-join clause if it appears after a join
- * clause it is redundant with.
+ * We can always use plain restriction clauses for the rel. We scan these
+ * first because we want them first in the clause list for the convenience
+ * of remove_redundant_join_clauses, which can never remove non-join
+ * clauses and hence won't be able to get rid of a non-join clause if it
+ * appears after a join clause it is redundant with.
*/
foreach(l, rel->baserestrictinfo)
{
*
* If able to match the requested query pathkeys, returns either
* ForwardScanDirection or BackwardScanDirection to indicate the proper index
- * scan direction. If no match, returns NoMovementScanDirection.
+ * scan direction. If no match, returns NoMovementScanDirection.
*/
static ScanDirection
match_variant_ordering(PlannerInfo *root,
* Forget the whole thing if not a btree index; our check for ignorable
* columns assumes we are dealing with btree opclasses. (It'd be possible
* to factor out just the try for backwards indexscan, but considering
- * that we presently have no orderable indexes except btrees anyway,
- * it's hardly worth contorting this code for that case.)
+ * that we presently have no orderable indexes except btrees anyway, it's
+ * hardly worth contorting this code for that case.)
*
* Note: if you remove this, you probably need to put in a check on
* amoptionalkey to prevent possible clauseless scan on an index that
*/
if (index->relam != BTREE_AM_OID)
return NoMovementScanDirection;
+
/*
- * Figure out which index columns can be optionally ignored because
- * they have an equality constraint. This is the same set for either
- * forward or backward scan, so we do it just once.
+ * Figure out which index columns can be optionally ignored because they
+ * have an equality constraint. This is the same set for either forward
+ * or backward scan, so we do it just once.
*/
ignorables = identify_ignorable_ordering_cols(root, index,
restrictclauses);
+
/*
- * Try to match to forward scan, then backward scan. However, we can
- * skip the forward-scan case if there are no ignorable columns,
- * because find_usable_indexes() would have found the match already.
+ * Try to match to forward scan, then backward scan. However, we can skip
+ * the forward-scan case if there are no ignorable columns, because
+ * find_usable_indexes() would have found the match already.
*/
if (ignorables &&
match_index_to_query_keys(root, index, ForwardScanDirection,
List *restrictclauses)
{
List *result = NIL;
- int indexcol = 0; /* note this is 0-based */
+ int indexcol = 0; /* note this is 0-based */
ListCell *l;
/* restrictclauses is either NIL or has a sublist per column */
foreach(l, restrictclauses)
{
- List *sublist = (List *) lfirst(l);
- Oid opclass = index->classlist[indexcol];
- ListCell *l2;
+ List *sublist = (List *) lfirst(l);
+ Oid opclass = index->classlist[indexcol];
+ ListCell *l2;
foreach(l2, sublist)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l2);
OpExpr *clause = (OpExpr *) rinfo->clause;
- Oid clause_op;
- int op_strategy;
- bool varonleft;
- bool ispc;
+ Oid clause_op;
+ int op_strategy;
+ bool varonleft;
+ bool ispc;
/* We know this clause passed match_clause_to_indexcol */
index))
{
/*
- * The clause means either col = TRUE or col = FALSE;
- * we do not care which, it's an equality constraint
- * either way.
+ * The clause means either col = TRUE or col = FALSE; we
+ * do not care which, it's an equality constraint either
+ * way.
*/
- result = lappend_int(result, indexcol+1);
+ result = lappend_int(result, indexcol + 1);
break;
}
}
op_strategy = get_op_opclass_strategy(clause_op, opclass);
/*
- * You might expect to see Assert(op_strategy != 0) here,
- * but you won't: the clause might contain a special indexable
- * operator rather than an ordinary opclass member. Currently
- * none of the special operators are very likely to expand to
- * an equality operator; we do not bother to check, but just
- * assume no match.
+ * You might expect to see Assert(op_strategy != 0) here, but you
+ * won't: the clause might contain a special indexable operator
+ * rather than an ordinary opclass member. Currently none of the
+ * special operators are very likely to expand to an equality
+ * operator; we do not bother to check, but just assume no match.
*/
if (op_strategy != BTEqualStrategyNumber)
continue;
rinfo->left_relids);
if (ispc)
{
- result = lappend_int(result, indexcol+1);
+ result = lappend_int(result, indexcol + 1);
break;
}
}
index_pathkeys = build_index_pathkeys(root, index, indexscandir);
/*
- * Can we match to the query's requested pathkeys? The inner loop
- * skips over ignorable index columns while trying to match.
+ * Can we match to the query's requested pathkeys? The inner loop skips
+ * over ignorable index columns while trying to match.
*/
index_cell = list_head(index_pathkeys);
index_col = 0;
for (;;)
{
- List *isubkey;
+ List *isubkey;
if (index_cell == NULL)
return false;
isubkey = (List *) lfirst(index_cell);
index_cell = lnext(index_cell);
index_col++; /* index_col is now 1-based */
+
/*
* Since we are dealing with canonicalized pathkeys, pointer
* comparison is sufficient to determine a match.
int indkey;
/*
- * Ignore any RelabelType node above the operand. This is needed to
- * be able to apply indexscanning in binary-compatible-operator cases.
- * Note: we can assume there is at most one RelabelType node;
+ * Ignore any RelabelType node above the operand. This is needed to be
+ * able to apply indexscanning in binary-compatible-operator cases. Note:
+ * we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
*/
if (operand && IsA(operand, RelabelType))
else
{
/*
- * Index expression; find the correct expression. (This search
- * could be avoided, at the cost of complicating all the callers
- * of this routine; doesn't seem worth it.)
+ * Index expression; find the correct expression. (This search could
+ * be avoided, at the cost of complicating all the callers of this
+ * routine; doesn't seem worth it.)
*/
ListCell *indexpr_item;
int i;
*
* Another thing that we do with this machinery is to provide special
* smarts for "boolean" indexes (that is, indexes on boolean columns
- * that support boolean equality). We can transform a plain reference
+ * that support boolean equality). We can transform a plain reference
* to the indexkey into "indexkey = true", or "NOT indexkey" into
* "indexkey = false", so as to make the expression indexable using the
* regular index operators. (As of Postgres 8.1, we must do this here
indexcol, index))
return true;
}
+
/*
* Since we only consider clauses at top level of WHERE, we can convert
- * indexkey IS TRUE and indexkey IS FALSE to index searches as well.
- * The different meaning for NULL isn't important.
+ * indexkey IS TRUE and indexkey IS FALSE to index searches as well. The
+ * different meaning for NULL isn't important.
*/
else if (clause && IsA(clause, BooleanTest))
{
- BooleanTest *btest = (BooleanTest *) clause;
+ BooleanTest *btest = (BooleanTest *) clause;
if (btest->booltesttype == IS_TRUE ||
btest->booltesttype == IS_FALSE)
/*
* Currently, all known special operators require the indexkey on the
- * left, but this test could be pushed into the switch statement if
- * some are added that do not...
+ * left, but this test could be pushed into the switch statement if some
+ * are added that do not...
*/
if (!indexkey_on_left)
return false;
case OID_NAME_LIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_BYTEA_LIKE_OP:
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICLIKE_OP:
case OID_NAME_ICLIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_REGEXEQ_OP:
case OID_NAME_REGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICREGEXEQ_OP:
case OID_NAME_ICREGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_INET_SUB_OP:
* want to apply. (A hash index, for example, will not support ">=".)
* Currently, only btree supports the operators we need.
*
- * We insist on the opclass being the specific one we expect, else we'd
- * do the wrong thing if someone were to make a reverse-sort opclass
- * with the same operators.
+ * We insist on the opclass being the specific one we expect, else we'd do
+ * the wrong thing if someone were to make a reverse-sort opclass with the
+ * same operators.
*/
switch (expr_op)
{
/* First check for boolean cases */
if (IsBooleanOpclass(curClass))
{
- Expr *boolqual;
+ Expr *boolqual;
boolqual = expand_boolean_index_clause((Node *) rinfo->clause,
indexcol,
/* NOT clause? */
if (not_clause(clause))
{
- Node *arg = (Node *) get_notclausearg((Expr *) clause);
+ Node *arg = (Node *) get_notclausearg((Expr *) clause);
/* It must have matched the indexkey */
Assert(match_index_to_operand(arg, indexcol, index));
}
if (clause && IsA(clause, BooleanTest))
{
- BooleanTest *btest = (BooleanTest *) clause;
- Node *arg = (Node *) btest->arg;
+ BooleanTest *btest = (BooleanTest *) clause;
+ Node *arg = (Node *) btest->arg;
/* It must have matched the indexkey */
Assert(match_index_to_operand(arg, indexcol, index));
expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
{
Expr *clause = rinfo->clause;
+
/* we know these will succeed */
Node *leftop = get_leftop(clause);
Node *rightop = get_rightop(clause);
switch (expr_op)
{
/*
- * LIKE and regex operators are not members of any index
- * opclass, so if we find one in an indexqual list we can
- * assume that it was accepted by
- * match_special_index_operator().
+ * LIKE and regex operators are not members of any index opclass,
+ * so if we find one in an indexqual list we can assume that it
+ * was accepted by match_special_index_operator().
*/
case OID_TEXT_LIKE_OP:
case OID_BPCHAR_LIKE_OP:
}
/*
- * If necessary, coerce the prefix constant to the right type. The
- * given prefix constant is either text or bytea type.
+ * If necessary, coerce the prefix constant to the right type. The given
+ * prefix constant is either text or bytea type.
*/
if (prefix_const->consttype != datatype)
{
{
case TEXTOID:
prefix = DatumGetCString(DirectFunctionCall1(textout,
- prefix_const->constvalue));
+ prefix_const->constvalue));
break;
case BYTEAOID:
prefix = DatumGetCString(DirectFunctionCall1(byteaout,
- prefix_const->constvalue));
+ prefix_const->constvalue));
break;
default:
elog(ERROR, "unexpected const type: %u",
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.95 2005/06/05 22:32:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.96 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Find potential mergejoin clauses. We can skip this if we are not
- * interested in doing a mergejoin. However, mergejoin is currently
- * our only way of implementing full outer joins, so override
- * mergejoin disable if it's a full join.
+ * interested in doing a mergejoin. However, mergejoin is currently our
+ * only way of implementing full outer joins, so override mergejoin
+ * disable if it's a full join.
*/
if (enable_mergejoin || jointype == JOIN_FULL)
mergeclause_list = select_mergejoin_clauses(joinrel,
/*
* 3. Consider paths where the inner relation need not be explicitly
- * sorted. This includes mergejoins only (nestloops were already
- * built in match_unsorted_outer).
+ * sorted. This includes mergejoins only (nestloops were already built in
+ * match_unsorted_outer).
*
* Diked out as redundant 2/13/2000 -- tgl. There isn't any really
- * significant difference between the inner and outer side of a
- * mergejoin, so match_unsorted_inner creates no paths that aren't
- * equivalent to those made by match_unsorted_outer when
- * add_paths_to_joinrel() is invoked with the two rels given in the
- * other order.
+ * significant difference between the inner and outer side of a mergejoin,
+ * so match_unsorted_inner creates no paths that aren't equivalent to
+ * those made by match_unsorted_outer when add_paths_to_joinrel() is
+ * invoked with the two rels given in the other order.
*/
match_unsorted_inner(root, joinrel, outerrel, innerrel,
restrictlist, mergeclause_list, jointype);
#endif
/*
- * 4. Consider paths where both outer and inner relations must be
- * hashed before being joined.
+ * 4. Consider paths where both outer and inner relations must be hashed
+ * before being joined.
*/
if (enable_hashjoin)
hash_inner_and_outer(root, joinrel, outerrel, innerrel,
/*
* We only consider the cheapest-total-cost input paths, since we are
* assuming here that a sort is required. We will consider
- * cheapest-startup-cost input paths later, and only if they don't
- * need a sort.
+ * cheapest-startup-cost input paths later, and only if they don't need a
+ * sort.
*
- * If unique-ification is requested, do it and then handle as a plain
- * inner join.
+ * If unique-ification is requested, do it and then handle as a plain inner
+ * join.
*/
outer_path = outerrel->cheapest_total_path;
inner_path = innerrel->cheapest_total_path;
}
/*
- * Each possible ordering of the available mergejoin clauses will
- * generate a differently-sorted result path at essentially the same
- * cost. We have no basis for choosing one over another at this level
- * of joining, but some sort orders may be more useful than others for
- * higher-level mergejoins, so it's worth considering multiple
- * orderings.
+ * Each possible ordering of the available mergejoin clauses will generate
+ * a differently-sorted result path at essentially the same cost. We have
+ * no basis for choosing one over another at this level of joining, but
+ * some sort orders may be more useful than others for higher-level
+ * mergejoins, so it's worth considering multiple orderings.
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
- * redundant. Therefore, what we do is convert the mergeclause list
- * to a list of canonical pathkeys, and then consider different
- * orderings of the pathkeys.
+ * redundant. Therefore, what we do is convert the mergeclause list to a
+ * list of canonical pathkeys, and then consider different orderings of
+ * the pathkeys.
*
* Generating a path for *every* permutation of the pathkeys doesn't seem
* like a winning strategy; the cost in planning time is too high. For
- * now, we generate one path for each pathkey, listing that pathkey
- * first and the rest in random order. This should allow at least a
- * one-clause mergejoin without re-sorting against any other possible
- * mergejoin partner path. But if we've not guessed the right
- * ordering of secondary keys, we may end up evaluating clauses as
- * qpquals when they could have been done as mergeclauses. We need to
- * figure out a better way. (Two possible approaches: look at all the
- * relevant index relations to suggest plausible sort orders, or make
- * just one output path and somehow mark it as having a sort-order
- * that can be rearranged freely.)
+ * now, we generate one path for each pathkey, listing that pathkey first
+ * and the rest in random order. This should allow at least a one-clause
+ * mergejoin without re-sorting against any other possible mergejoin
+ * partner path. But if we've not guessed the right ordering of secondary
+ * keys, we may end up evaluating clauses as qpquals when they could have
+ * been done as mergeclauses. We need to figure out a better way. (Two
+ * possible approaches: look at all the relevant index relations to
+ * suggest plausible sort orders, or make just one output path and somehow
+ * mark it as having a sort-order that can be rearranged freely.)
*/
all_pathkeys = make_pathkeys_for_mergeclauses(root,
mergeclause_list,
/*
* Select mergeclause(s) that match this sort ordering. If we had
- * redundant merge clauses then we will get a subset of the
- * original clause list. There had better be some match,
- * however...
+ * redundant merge clauses then we will get a subset of the original
+ * clause list. There had better be some match, however...
*/
cur_mergeclauses = find_mergeclauses_for_pathkeys(root,
cur_pathkeys,
- mergeclause_list);
+ mergeclause_list);
Assert(cur_mergeclauses != NIL);
/* Forget it if can't use all the clauses in right/full join */
if (useallclauses &&
- list_length(cur_mergeclauses) != list_length(mergeclause_list))
+ list_length(cur_mergeclauses) != list_length(mergeclause_list))
continue;
/*
* Build sort pathkeys for both sides.
*
* Note: it's possible that the cheapest paths will already be sorted
- * properly. create_mergejoin_path will detect that case and
- * suppress an explicit sort step, so we needn't do so here.
+ * properly. create_mergejoin_path will detect that case and suppress
+ * an explicit sort step, so we needn't do so here.
*/
outerkeys = make_pathkeys_for_mergeclauses(root,
cur_mergeclauses,
/*
* Nestloop only supports inner, left, and IN joins. Also, if we are
- * doing a right or full join, we must use *all* the mergeclauses as
- * join clauses, else we will not have a valid plan. (Although these
- * two flags are currently inverses, keep them separate for clarity
- * and possible future changes.)
+ * doing a right or full join, we must use *all* the mergeclauses as join
+ * clauses, else we will not have a valid plan. (Although these two flags
+ * are currently inverses, keep them separate for clarity and possible
+ * future changes.)
*/
switch (jointype)
{
else if (nestjoinOK)
{
/*
- * If the cheapest inner path is a join or seqscan, we should
- * consider materializing it. (This is a heuristic: we could
- * consider it always, but for inner indexscans it's probably a
- * waste of time.)
+ * If the cheapest inner path is a join or seqscan, we should consider
+ * materializing it. (This is a heuristic: we could consider it
+ * always, but for inner indexscans it's probably a waste of time.)
*/
if (!(IsA(inner_cheapest_total, IndexPath) ||
IsA(inner_cheapest_total, BitmapHeapPath) ||
create_material_path(innerrel, inner_cheapest_total);
/*
- * Get the best innerjoin indexpath (if any) for this outer rel.
- * It's the same for all outer paths.
+ * Get the best innerjoin indexpath (if any) for this outer rel. It's
+ * the same for all outer paths.
*/
bestinnerjoin = best_inner_indexscan(root, innerrel,
outerrel->relids, jointype);
int sortkeycnt;
/*
- * If we need to unique-ify the outer path, it's pointless to
- * consider any but the cheapest outer.
+ * If we need to unique-ify the outer path, it's pointless to consider
+ * any but the cheapest outer.
*/
if (save_jointype == JOIN_UNIQUE_OUTER)
{
}
/*
- * The result will have this sort order (even if it is implemented
- * as a nestloop, and even if some of the mergeclauses are
- * implemented by qpquals rather than as true mergeclauses):
+ * The result will have this sort order (even if it is implemented as
+ * a nestloop, and even if some of the mergeclauses are implemented by
+ * qpquals rather than as true mergeclauses):
*/
merge_pathkeys = build_join_pathkeys(root, joinrel, jointype,
outerpath->pathkeys);
innerrel);
/*
- * Generate a mergejoin on the basis of sorting the cheapest
- * inner. Since a sort will be needed, only cheapest total cost
- * matters. (But create_mergejoin_path will do the right thing if
+ * Generate a mergejoin on the basis of sorting the cheapest inner.
+ * Since a sort will be needed, only cheapest total cost matters.
+ * (But create_mergejoin_path will do the right thing if
* inner_cheapest_total is already correctly sorted.)
*/
add_path(joinrel, (Path *)
continue;
/*
- * Look for presorted inner paths that satisfy the innersortkey
- * list --- or any truncation thereof, if we are allowed to build
- * a mergejoin using a subset of the merge clauses. Here, we
- * consider both cheap startup cost and cheap total cost. Ignore
+ * Look for presorted inner paths that satisfy the innersortkey list
+ * --- or any truncation thereof, if we are allowed to build a
+ * mergejoin using a subset of the merge clauses. Here, we consider
+ * both cheap startup cost and cheap total cost. Ignore
* inner_cheapest_total, since we already made a path with it.
*/
num_sortkeys = list_length(innersortkeys);
/*
* Look for an inner path ordered well enough for the first
- * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is
- * modified destructively, which is why we made a copy...
+ * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
+ * destructively, which is why we made a copy...
*/
trialsortkeys = list_truncate(trialsortkeys, sortkeycnt);
innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist,
if (innerpath != cheapest_total_inner)
{
/*
- * Avoid rebuilding clause list if we already made
- * one; saves memory in big join trees...
+ * Avoid rebuilding clause list if we already made one;
+ * saves memory in big join trees...
*/
if (newclauses == NIL)
{
{
newclauses =
find_mergeclauses_for_pathkeys(root,
- trialsortkeys,
- mergeclauses);
+ trialsortkeys,
+ mergeclauses);
Assert(newclauses != NIL);
}
else
* We need to build only one hashpath for any given pair of outer and
* inner relations; all of the hashable clauses will be used as keys.
*
- * Scan the join's restrictinfo list to find hashjoinable clauses that
- * are usable with this pair of sub-relations.
+ * Scan the join's restrictinfo list to find hashjoinable clauses that are
+ * usable with this pair of sub-relations.
*/
hashclauses = NIL;
foreach(l, restrictlist)
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
- bms_is_subset(restrictinfo->right_relids, outerrel->relids))
+ bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}
if (hashclauses)
{
/*
- * We consider both the cheapest-total-cost and
- * cheapest-startup-cost outer paths. There's no need to consider
- * any but the cheapest-total-cost inner path, however.
+ * We consider both the cheapest-total-cost and cheapest-startup-cost
+ * outer paths. There's no need to consider any but the
+ * cheapest-total-cost inner path, however.
*/
Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
Path *cheapest_total_outer = outerrel->cheapest_total_path;
RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
/*
- * If processing an outer join, only use its own join clauses in
- * the merge. For inner joins we need not be so picky.
+ * If processing an outer join, only use its own join clauses in the
+ * merge. For inner joins we need not be so picky.
*
- * Furthermore, if it is a right/full join then *all* the explicit
- * join clauses must be mergejoinable, else the executor will
- * fail. If we are asked for a right join then just return NIL to
- * indicate no mergejoin is possible (we can handle it as a left
- * join instead). If we are asked for a full join then emit an
- * error, because there is no fallback.
+ * Furthermore, if it is a right/full join then *all* the explicit join
+ * clauses must be mergejoinable, else the executor will fail. If we
+ * are asked for a right join then just return NIL to indicate no
+ * mergejoin is possible (we can handle it as a left join instead). If
+ * we are asked for a full join then emit an error, because there is
+ * no fallback.
*/
if (isouterjoin)
{
/*
* Check if clause is usable with these input rels. All the vars
- * needed on each side of the clause must be available from one or
- * the other of the input rels.
+ * needed on each side of the clause must be available from one or the
+ * other of the input rels.
*/
if (bms_is_subset(restrictinfo->left_relids, outerrel->relids) &&
bms_is_subset(restrictinfo->right_relids, innerrel->relids))
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
- bms_is_subset(restrictinfo->right_relids, outerrel->relids))
+ bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.75 2005/07/28 22:27:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.76 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* First, consider left-sided and right-sided plans, in which rels of
- * exactly level-1 member relations are joined against initial
- * relations. We prefer to join using join clauses, but if we find a
- * rel of level-1 members that has no join clauses, we will generate
- * Cartesian-product joins against all initial rels not already
- * contained in it.
+ * exactly level-1 member relations are joined against initial relations.
+ * We prefer to join using join clauses, but if we find a rel of level-1
+ * members that has no join clauses, we will generate Cartesian-product
+ * joins against all initial rels not already contained in it.
*
- * In the first pass (level == 2), we try to join each initial rel to
- * each initial rel that appears later in joinrels[1]. (The
- * mirror-image joins are handled automatically by make_join_rel.) In
- * later passes, we try to join rels of size level-1 from
- * joinrels[level-1] to each initial rel in joinrels[1].
+ * In the first pass (level == 2), we try to join each initial rel to each
+ * initial rel that appears later in joinrels[1]. (The mirror-image joins
+ * are handled automatically by make_join_rel.) In later passes, we try
+ * to join rels of size level-1 from joinrels[level-1] to each initial rel
+ * in joinrels[1].
*/
foreach(r, joinrels[level - 1])
{
if (old_rel->joininfo != NIL)
{
/*
- * Note that if all available join clauses for this rel
- * require more than one other rel, we will fail to make any
- * joins against it here. In most cases that's OK; it'll be
- * considered by "bushy plan" join code in a higher-level pass
- * where we have those other rels collected into a join rel.
+ * Note that if all available join clauses for this rel require
+ * more than one other rel, we will fail to make any joins against
+ * it here. In most cases that's OK; it'll be considered by
+ * "bushy plan" join code in a higher-level pass where we have
+ * those other rels collected into a join rel.
*/
new_rels = make_rels_by_clause_joins(root,
old_rel,
other_rels);
/*
- * An exception occurs when there is a clauseless join inside
- * an IN (sub-SELECT) construct. Here, the members of the
- * subselect all have join clauses (against the stuff outside
- * the IN), but they *must* be joined to each other before we
- * can make use of those join clauses. So do the clauseless
- * join bit.
+ * An exception occurs when there is a clauseless join inside an
+ * IN (sub-SELECT) construct. Here, the members of the subselect
+ * all have join clauses (against the stuff outside the IN), but
+ * they *must* be joined to each other before we can make use of
+ * those join clauses. So do the clauseless join bit.
*
* See also the last-ditch case below.
*/
/*
* At levels above 2 we will generate the same joined relation in
* multiple ways --- for example (a join b) join c is the same
- * RelOptInfo as (b join c) join a, though the second case will
- * add a different set of Paths to it. To avoid making extra work
- * for subsequent passes, do not enter the same RelOptInfo into
- * our output list multiple times.
+ * RelOptInfo as (b join c) join a, though the second case will add a
+ * different set of Paths to it. To avoid making extra work for
+ * subsequent passes, do not enter the same RelOptInfo into our output
+ * list multiple times.
*/
result_rels = list_concat_unique_ptr(result_rels, new_rels);
}
/*
- * Now, consider "bushy plans" in which relations of k initial rels
- * are joined to relations of level-k initial rels, for 2 <= k <=
- * level-2.
+ * Now, consider "bushy plans" in which relations of k initial rels are
+ * joined to relations of level-k initial rels, for 2 <= k <= level-2.
*
* We only consider bushy-plan joins for pairs of rels where there is a
- * suitable join clause, in order to avoid unreasonable growth of
- * planning time.
+ * suitable join clause, in order to avoid unreasonable growth of planning
+ * time.
*/
for (k = 2;; k++)
{
int other_level = level - k;
/*
- * Since make_join_rel(x, y) handles both x,y and y,x cases, we
- * only need to go as far as the halfway point.
+ * Since make_join_rel(x, y) handles both x,y and y,x cases, we only
+ * need to go as far as the halfway point.
*/
if (k > other_level)
break;
{
/*
* OK, we can build a rel of the right level from this
- * pair of rels. Do so if there is at least one
- * usable join clause.
+ * pair of rels. Do so if there is at least one usable
+ * join clause.
*/
if (have_relevant_joinclause(old_rel, new_rel))
{
}
/*
- * Last-ditch effort: if we failed to find any usable joins so far,
- * force a set of cartesian-product joins to be generated. This
- * handles the special case where all the available rels have join
- * clauses but we cannot use any of the joins yet. An example is
+ * Last-ditch effort: if we failed to find any usable joins so far, force
+ * a set of cartesian-product joins to be generated. This handles the
+ * special case where all the available rels have join clauses but we
+ * cannot use any of the joins yet. An example is
*
* SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
*
* The join clause will be usable at level 3, but at level 2 we have no
- * choice but to make cartesian joins. We consider only left-sided
- * and right-sided cartesian joins in this case (no bushy).
+ * choice but to make cartesian joins. We consider only left-sided and
+ * right-sided cartesian joins in this case (no bushy).
*/
if (result_rels == NIL)
{
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
/*
- * As long as given other_rels are distinct, don't need to
- * test to see if jrel is already part of output list.
+ * As long as given other_rels are distinct, don't need to test to
+ * see if jrel is already part of output list.
*/
if (jrel)
result = lcons(jrel, result);
elog(ERROR, "invalid join order");
/*
- * Since we are only going to consider this one way to do it,
- * we're done generating Paths for this joinrel and can now select
- * the cheapest. In fact we *must* do so now, since next level up
- * will need it!
+ * Since we are only going to consider this one way to do it, we're
+ * done generating Paths for this joinrel and can now select the
+ * cheapest. In fact we *must* do so now, since next level up will
+ * need it!
*/
set_cheapest(rel);
joinrelids = bms_union(rel1->relids, rel2->relids);
/*
- * If we are implementing IN clauses as joins, there are some joins
- * that are illegal. Check to see if the proposed join is trouble. We
- * can skip the work if looking at an outer join, however, because
- * only top-level joins might be affected.
+ * If we are implementing IN clauses as joins, there are some joins that
+ * are illegal. Check to see if the proposed join is trouble. We can skip
+ * the work if looking at an outer join, however, because only top-level
+ * joins might be affected.
*/
if (jointype == JOIN_INNER)
{
/*
* This IN clause is not relevant unless its RHS overlaps the
- * proposed join. (Check this first as a fast path for
- * dismissing most irrelevant INs quickly.)
+ * proposed join. (Check this first as a fast path for dismissing
+ * most irrelevant INs quickly.)
*/
if (!bms_overlap(ininfo->righthand, joinrelids))
continue;
continue;
/*
- * Cannot join if proposed join contains rels not in the RHS
- * *and* contains only part of the RHS. We must build the
- * complete RHS (subselect's join) before it can be joined to
- * rels outside the subselect.
+ * Cannot join if proposed join contains rels not in the RHS *and*
+ * contains only part of the RHS. We must build the complete RHS
+ * (subselect's join) before it can be joined to rels outside the
+ * subselect.
*/
if (!bms_is_subset(ininfo->righthand, joinrelids))
{
}
/*
- * At this point we are considering a join of the IN's RHS to
- * some other rel(s).
+ * At this point we are considering a join of the IN's RHS to some
+ * other rel(s).
*
- * If we already joined IN's RHS to any other rels in either
- * input path, then this join is not constrained (the
- * necessary work was done at the lower level where that join
- * occurred).
+ * If we already joined IN's RHS to any other rels in either input
+ * path, then this join is not constrained (the necessary work was
+ * done at the lower level where that join occurred).
*/
if (bms_is_subset(ininfo->righthand, rel1->relids) &&
!bms_equal(ininfo->righthand, rel1->relids))
* innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles
* RHS/LHS.
*
- * JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS;
- * conversely JOIN_UNIQUE_INNER will work if innerrel is
- * exactly RHS.
+ * JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS; conversely
+ * JOIN_UNIQUE_INNER will work if innerrel is exactly RHS.
*
- * But none of these will work if we already found another IN
- * that needs to trigger here.
+ * But none of these will work if we already found another IN that
+ * needs to trigger here.
*/
if (jointype != JOIN_INNER)
{
}
/*
- * Find or build the join RelOptInfo, and compute the restrictlist
- * that goes with this particular joining.
+ * Find or build the join RelOptInfo, and compute the restrictlist that
+ * goes with this particular joining.
*/
joinrel = build_join_rel(root, joinrelids, rel1, rel2, jointype,
&restrictlist);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.74 2005/07/28 20:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.75 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (restriction_is_or_clause(rinfo))
{
/*
- * Use the generate_bitmap_or_paths() machinery to estimate
- * the value of each OR clause. We can use regular
- * restriction clauses along with the OR clause contents to
- * generate indexquals. We pass outer_relids = NULL so that
- * sub-clauses that are actually joins will be ignored.
+ * Use the generate_bitmap_or_paths() machinery to estimate the
+ * value of each OR clause. We can use regular restriction
+ * clauses along with the OR clause contents to generate
+ * indexquals. We pass outer_relids = NULL so that sub-clauses
+ * that are actually joins will be ignored.
*/
- List *orpaths;
- ListCell *k;
+ List *orpaths;
+ ListCell *k;
orpaths = generate_bitmap_or_paths(root, rel,
list_make1(rinfo),
/* Locate the cheapest OR path */
foreach(k, orpaths)
{
- BitmapOrPath *path = (BitmapOrPath *) lfirst(k);
+ BitmapOrPath *path = (BitmapOrPath *) lfirst(k);
Assert(IsA(path, BitmapOrPath));
if (bestpath == NULL ||
return false;
/*
- * Convert the path's indexclauses structure to a RestrictInfo tree.
- * We include any partial-index predicates so as to get a reasonable
+ * Convert the path's indexclauses structure to a RestrictInfo tree. We
+ * include any partial-index predicates so as to get a reasonable
* representation of what the path is actually scanning.
*/
newrinfos = make_restrictinfo_from_bitmapqual((Path *) bestpath,
rel->baserestrictinfo = list_concat(rel->baserestrictinfo, newrinfos);
/*
- * Adjust the original OR clause's cached selectivity to compensate
- * for the selectivity of the added (but redundant) lower-level qual.
- * This should result in the join rel getting approximately the same
- * rows estimate as it would have gotten without all these
- * shenanigans. (XXX major hack alert ... this depends on the
- * assumption that the selectivity will stay cached ...)
+ * Adjust the original OR clause's cached selectivity to compensate for
+ * the selectivity of the added (but redundant) lower-level qual. This
+ * should result in the join rel getting approximately the same rows
+ * estimate as it would have gotten without all these shenanigans. (XXX
+ * major hack alert ... this depends on the assumption that the
+ * selectivity will stay cached ...)
*/
or_selec = clause_selectivity(root, (Node *) or_rinfo,
0, JOIN_INNER);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.72 2005/08/27 22:13:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.73 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static PathKeyItem *makePathKeyItem(Node *key, Oid sortop, bool checkType);
static void generate_outer_join_implications(PlannerInfo *root,
- List *equi_key_set,
- Relids *relids);
+ List *equi_key_set,
+ Relids *relids);
static void sub_generate_join_implications(PlannerInfo *root,
- List *equi_key_set, Relids *relids,
- Node *item1, Oid sortop1,
- Relids item1_relids);
+ List *equi_key_set, Relids *relids,
+ Node *item1, Oid sortop1,
+ Relids item1_relids);
static void process_implied_const_eq(PlannerInfo *root,
- List *equi_key_set, Relids *relids,
- Node *item1, Oid sortop1,
- Relids item1_relids,
- bool delete_it);
+ List *equi_key_set, Relids *relids,
+ Node *item1, Oid sortop1,
+ Relids item1_relids,
+ bool delete_it);
static List *make_canonical_pathkey(PlannerInfo *root, PathKeyItem *item);
static Var *find_indexkey_var(PlannerInfo *root, RelOptInfo *rel,
AttrNumber varattno);
PathKeyItem *item = makeNode(PathKeyItem);
/*
- * Some callers pass expressions that are not necessarily of the same
- * type as the sort operator expects as input (for example when
- * dealing with an index that uses binary-compatible operators). We
- * must relabel these with the correct type so that the key
- * expressions will be seen as equal() to expressions that have been
- * correctly labeled.
+ * Some callers pass expressions that are not necessarily of the same type
+ * as the sort operator expects as input (for example when dealing with an
+ * index that uses binary-compatible operators). We must relabel these
+ * with the correct type so that the key expressions will be seen as
+ * equal() to expressions that have been correctly labeled.
*/
if (checkType)
{
return;
/*
- * Our plan is to make a two-element set, then sweep through the
- * existing equijoin sets looking for matches to item1 or item2. When
- * we find one, we remove that set from equi_key_list and union it
- * into our new set. When done, we add the new set to the front of
- * equi_key_list.
+ * Our plan is to make a two-element set, then sweep through the existing
+ * equijoin sets looking for matches to item1 or item2. When we find one,
+ * we remove that set from equi_key_list and union it into our new set.
+ * When done, we add the new set to the front of equi_key_list.
*
* It may well be that the two items we're given are already known to be
* equijoin-equivalent, in which case we don't need to change our data
* structure. If we find both of them in the same equivalence set to
* start with, we can quit immediately.
*
- * This is a standard UNION-FIND problem, for which there exist better
- * data structures than simple lists. If this code ever proves to be
- * a bottleneck then it could be sped up --- but for now, simple is
+ * This is a standard UNION-FIND problem, for which there exist better data
+ * structures than simple lists. If this code ever proves to be a
+ * bottleneck then it could be sped up --- but for now, simple is
* beautiful.
*/
newset = NIL;
if (item1here || item2here)
{
/*
- * If find both in same equivalence set, no need to do any
- * more
+ * If find both in same equivalence set, no need to do any more
*/
if (item1here && item2here)
{
int i1;
/*
- * A set containing only two items cannot imply any equalities
- * beyond the one that created the set, so we can skip it ---
- * unless outer joins appear in the query.
+ * A set containing only two items cannot imply any equalities beyond
+ * the one that created the set, so we can skip it --- unless outer
+ * joins appear in the query.
*/
if (nitems < 3 && !root->hasOuterJoins)
continue;
/*
- * Collect info about relids mentioned in each item. For this
- * routine we only really care whether there are any at all in
- * each item, but process_implied_equality() needs the exact sets,
- * so we may as well pull them here.
+ * Collect info about relids mentioned in each item. For this routine
+ * we only really care whether there are any at all in each item, but
+ * process_implied_equality() needs the exact sets, so we may as well
+ * pull them here.
*/
relids = (Relids *) palloc(nitems * sizeof(Relids));
have_consts = false;
* Match each item in the set with all that appear after it (it's
* sufficient to generate A=B, need not process B=A too).
*
- * A set containing only two items cannot imply any equalities
- * beyond the one that created the set, so we can skip this
- * processing in that case.
+ * A set containing only two items cannot imply any equalities beyond the
+ * one that created the set, so we can skip this processing in that
+ * case.
*/
if (nitems >= 3)
{
* the time it gets here, the restriction will look like
* COALESCE(LEFTVAR, RIGHTVAR) = CONSTANT
* and we will have a join clause LEFTVAR = RIGHTVAR that we can match the
- * COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT
+ * COALESCE expression to. In this situation we can push LEFTVAR = CONSTANT
* and RIGHTVAR = CONSTANT into the input relations, since any rows not
* meeting these conditions cannot contribute to the join result.
*
*/
static void
sub_generate_join_implications(PlannerInfo *root,
- List *equi_key_set, Relids *relids,
- Node *item1, Oid sortop1, Relids item1_relids)
+ List *equi_key_set, Relids *relids,
+ Node *item1, Oid sortop1, Relids item1_relids)
{
ListCell *l;
foreach(l, root->left_join_clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Node *leftop = get_leftop(rinfo->clause);
+ Node *leftop = get_leftop(rinfo->clause);
if (equal(leftop, item1) && rinfo->left_sortop == sortop1)
{
/*
- * Match, so find constant member(s) of set and generate
- * implied INNERVAR = CONSTANT
+ * Match, so find constant member(s) of set and generate implied
+ * INNERVAR = CONSTANT
*/
- Node *rightop = get_rightop(rinfo->clause);
+ Node *rightop = get_rightop(rinfo->clause);
process_implied_const_eq(root, equi_key_set, relids,
rightop,
rinfo->right_sortop,
rinfo->right_relids,
false);
+
/*
* We can remove explicit tests of this outer-join qual, too,
- * since we now have tests forcing each of its sides
- * to the same value.
+ * since we now have tests forcing each of its sides to the same
+ * value.
*/
process_implied_equality(root,
leftop, rightop,
rinfo->left_sortop, rinfo->right_sortop,
rinfo->left_relids, rinfo->right_relids,
true);
+
/*
- * And recurse to see if we can deduce anything from
- * INNERVAR = CONSTANT
+ * And recurse to see if we can deduce anything from INNERVAR =
+ * CONSTANT
*/
sub_generate_join_implications(root, equi_key_set, relids,
rightop,
foreach(l, root->right_join_clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Node *rightop = get_rightop(rinfo->clause);
+ Node *rightop = get_rightop(rinfo->clause);
if (equal(rightop, item1) && rinfo->right_sortop == sortop1)
{
/*
- * Match, so find constant member(s) of set and generate
- * implied INNERVAR = CONSTANT
+ * Match, so find constant member(s) of set and generate implied
+ * INNERVAR = CONSTANT
*/
- Node *leftop = get_leftop(rinfo->clause);
+ Node *leftop = get_leftop(rinfo->clause);
process_implied_const_eq(root, equi_key_set, relids,
leftop,
rinfo->left_sortop,
rinfo->left_relids,
false);
+
/*
* We can remove explicit tests of this outer-join qual, too,
- * since we now have tests forcing each of its sides
- * to the same value.
+ * since we now have tests forcing each of its sides to the same
+ * value.
*/
process_implied_equality(root,
leftop, rightop,
rinfo->left_sortop, rinfo->right_sortop,
rinfo->left_relids, rinfo->right_relids,
true);
+
/*
- * And recurse to see if we can deduce anything from
- * INNERVAR = CONSTANT
+ * And recurse to see if we can deduce anything from INNERVAR =
+ * CONSTANT
*/
sub_generate_join_implications(root, equi_key_set, relids,
leftop,
if (IsA(item1, CoalesceExpr))
{
CoalesceExpr *cexpr = (CoalesceExpr *) item1;
- Node *cfirst;
- Node *csecond;
+ Node *cfirst;
+ Node *csecond;
if (list_length(cexpr->args) != 2)
return;
csecond = (Node *) lsecond(cexpr->args);
/*
- * Examine each mergejoinable full-join clause, looking for a
- * clause of the form "x = y" matching the COALESCE(x,y) expression
+ * Examine each mergejoinable full-join clause, looking for a clause
+ * of the form "x = y" matching the COALESCE(x,y) expression
*/
foreach(l, root->full_join_clauses)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Node *leftop = get_leftop(rinfo->clause);
- Node *rightop = get_rightop(rinfo->clause);
+ Node *leftop = get_leftop(rinfo->clause);
+ Node *rightop = get_rightop(rinfo->clause);
/*
- * We can assume the COALESCE() inputs are in the same order
- * as the join clause, since both were automatically generated
- * in the cases we care about.
+ * We can assume the COALESCE() inputs are in the same order as
+ * the join clause, since both were automatically generated in the
+ * cases we care about.
*
- * XXX currently this may fail to match in cross-type cases
- * because the COALESCE will contain typecast operations while
- * the join clause may not (if there is a cross-type mergejoin
- * operator available for the two column types).
- * Is it OK to strip implicit coercions from the COALESCE
- * arguments? What of the sortops in such cases?
+ * XXX currently this may fail to match in cross-type cases because
+ * the COALESCE will contain typecast operations while the join
+ * clause may not (if there is a cross-type mergejoin operator
+ * available for the two column types). Is it OK to strip implicit
+ * coercions from the COALESCE arguments? What of the sortops in
+ * such cases?
*/
if (equal(leftop, cfirst) &&
equal(rightop, csecond) &&
sortop1,
item1_relids,
true);
+
/*
* We can remove explicit tests of this outer-join qual, too,
- * since we now have tests forcing each of its sides
- * to the same value.
+ * since we now have tests forcing each of its sides to the
+ * same value.
*/
process_implied_equality(root,
leftop, rightop,
rinfo->left_relids,
rinfo->right_relids,
true);
+
/*
- * And recurse to see if we can deduce anything from
- * LEFTVAR = CONSTANT
+ * And recurse to see if we can deduce anything from LEFTVAR =
+ * CONSTANT
*/
sub_generate_join_implications(root, equi_key_set, relids,
leftop,
List *cpathkey;
/*
- * It's sufficient to look at the first entry in the sublist; if
- * there are more entries, they're already part of an equivalence
- * set by definition.
+ * It's sufficient to look at the first entry in the sublist; if there
+ * are more entries, they're already part of an equivalence set by
+ * definition.
*/
Assert(pathkey != NIL);
item = (PathKeyItem *) linitial(pathkey);
cpathkey = make_canonical_pathkey(root, item);
/*
- * Eliminate redundant ordering requests --- ORDER BY A,A is the
- * same as ORDER BY A. We want to check this only after we have
- * canonicalized the keys, so that equivalent-key knowledge is
- * used when deciding if an item is redundant.
+ * Eliminate redundant ordering requests --- ORDER BY A,A is the same
+ * as ORDER BY A. We want to check this only after we have
+ * canonicalized the keys, so that equivalent-key knowledge is used
+ * when deciding if an item is redundant.
*/
new_pathkeys = list_append_unique_ptr(new_pathkeys, cpathkey);
}
List *subkey2 = (List *) lfirst(key2);
/*
- * XXX would like to check that we've been given canonicalized
- * input, but PlannerInfo not accessible here...
+ * XXX would like to check that we've been given canonicalized input,
+ * but PlannerInfo not accessible here...
*/
#ifdef NOT_USED
Assert(list_member_ptr(root->equi_key_list, subkey1));
#endif
/*
- * We will never have two subkeys where one is a subset of the
- * other, because of the canonicalization process. Either they
- * are equal or they ain't. Furthermore, we only need pointer
- * comparison to detect equality.
+ * We will never have two subkeys where one is a subset of the other,
+ * because of the canonicalization process. Either they are equal or
+ * they ain't. Furthermore, we only need pointer comparison to detect
+ * equality.
*/
if (subkey1 != subkey2)
return PATHKEYS_DIFFERENT; /* no need to keep looking */
/*
* If we reached the end of only one list, the other is longer and
- * therefore not a subset. (We assume the additional sublist(s) of
- * the other list are not NIL --- no pathkey list should ever have a
- * NIL sublist.)
+ * therefore not a subset. (We assume the additional sublist(s) of the
+ * other list are not NIL --- no pathkey list should ever have a NIL
+ * sublist.)
*/
if (key1 == NULL && key2 == NULL)
return PATHKEYS_EQUAL;
Path *path = (Path *) lfirst(l);
/*
- * Since cost comparison is a lot cheaper than pathkey comparison,
- * do that first. (XXX is that still true?)
+ * Since cost comparison is a lot cheaper than pathkey comparison, do
+ * that first. (XXX is that still true?)
*/
if (matched_path != NULL &&
compare_path_costs(matched_path, path, cost_criterion) <= 0)
Path *path = (Path *) lfirst(l);
/*
- * Since cost comparison is a lot cheaper than pathkey comparison,
- * do that first.
+ * Since cost comparison is a lot cheaper than pathkey comparison, do
+ * that first.
*/
if (matched_path != NULL &&
- compare_fractional_path_costs(matched_path, path, fraction) <= 0)
+ compare_fractional_path_costs(matched_path, path, fraction) <= 0)
continue;
if (pathkeys_contained_in(pathkeys, path->pathkeys))
cpathkey = make_canonical_pathkey(root, item);
/*
- * Eliminate redundant ordering info; could happen if query is
- * such that index keys are equijoined...
+ * Eliminate redundant ordering info; could happen if query is such
+ * that index keys are equijoined...
*/
retval = list_append_unique_ptr(retval, cpathkey);
/*
* convert_subquery_pathkeys
* Build a pathkeys list that describes the ordering of a subquery's
- * result, in the terms of the outer query. This is essentially a
+ * result, in the terms of the outer query. This is essentially a
* task of conversion.
*
* 'rel': outer query's RelOptInfo for the subquery relation.
/*
* The sub_pathkey could contain multiple elements (representing
- * knowledge that multiple items are effectively equal). Each
- * element might match none, one, or more of the output columns
- * that are visible to the outer query. This means we may have
- * multiple possible representations of the sub_pathkey in the
- * context of the outer query. Ideally we would generate them all
- * and put them all into a pathkey list of the outer query,
- * thereby propagating equality knowledge up to the outer query.
- * Right now we cannot do so, because the outer query's canonical
- * pathkey sets are already frozen when this is called. Instead
- * we prefer the one that has the highest "score" (number of
- * canonical pathkey peers, plus one if it matches the outer
- * query_pathkeys). This is the most likely to be useful in the
- * outer query.
+ * knowledge that multiple items are effectively equal). Each element
+ * might match none, one, or more of the output columns that are
+ * visible to the outer query. This means we may have multiple
+ * possible representations of the sub_pathkey in the context of the
+ * outer query. Ideally we would generate them all and put them all
+ * into a pathkey list of the outer query, thereby propagating
+ * equality knowledge up to the outer query. Right now we cannot do
+ * so, because the outer query's canonical pathkey sets are already
+ * frozen when this is called. Instead we prefer the one that has the
+ * highest "score" (number of canonical pathkey peers, plus one if it
+ * matches the outer query_pathkeys). This is the most likely to be
+ * useful in the outer query.
*/
foreach(j, sub_pathkey)
{
return NIL;
/*
- * This used to be quite a complex bit of code, but now that all
- * pathkey sublists start out life canonicalized, we don't have to do
- * a darn thing here! The inner-rel vars we used to need to add are
- * *already* part of the outer pathkey!
+ * This used to be quite a complex bit of code, but now that all pathkey
+ * sublists start out life canonicalized, we don't have to do a darn thing
+ * here! The inner-rel vars we used to need to add are *already* part of
+ * the outer pathkey!
*
- * We do, however, need to truncate the pathkeys list, since it may
- * contain pathkeys that were useful for forming this joinrel but are
+ * We do, however, need to truncate the pathkeys list, since it may contain
+ * pathkeys that were useful for forming this joinrel but are
* uninteresting to higher levels.
*/
return truncate_useless_pathkeys(root, joinrel, outer_pathkeys);
/*
* We can match a pathkey against either left or right side of any
- * mergejoin clause. (We examine both sides since we aren't told
- * if the given pathkeys are for inner or outer input path; no
- * confusion is possible.) Furthermore, if there are multiple
- * matching clauses, take them all. In plain inner-join scenarios
- * we expect only one match, because redundant-mergeclause
- * elimination will have removed any redundant mergeclauses from
- * the input list. However, in outer-join scenarios there might be
- * multiple matches. An example is
+ * mergejoin clause. (We examine both sides since we aren't told if
+ * the given pathkeys are for inner or outer input path; no confusion
+ * is possible.) Furthermore, if there are multiple matching clauses,
+ * take them all. In plain inner-join scenarios we expect only one
+ * match, because redundant-mergeclause elimination will have removed
+ * any redundant mergeclauses from the input list. However, in
+ * outer-join scenarios there might be multiple matches. An example is
*
- * select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and
- * a.v1 = b.v2;
+ * select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and a.v1 =
+ * b.v2;
*
* Given the pathkeys ((a.v1), (a.v2)) it is okay to return all three
- * clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and
- * indeed we *must* do so or we will be unable to form a valid
- * plan.
+ * clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and indeed
+ * we *must* do so or we will be unable to form a valid plan.
*/
foreach(j, restrictinfos)
{
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can
- * still mergejoin if we found at least one mergeclause.)
+ * sort-key positions in the pathkeys are useless. (But we can still
+ * mergejoin if we found at least one mergeclause.)
*/
if (matched_restrictinfos == NIL)
break;
/*
- * If we did find usable mergeclause(s) for this sort-key
- * position, add them to result list.
+ * If we did find usable mergeclause(s) for this sort-key position,
+ * add them to result list.
*/
mergeclauses = list_concat(mergeclauses, matched_restrictinfos);
}
}
/*
- * When we are given multiple merge clauses, it's possible that
- * some clauses refer to the same vars as earlier clauses. There's
- * no reason for us to specify sort keys like (A,B,A) when (A,B)
- * will do --- and adding redundant sort keys makes add_path think
- * that this sort order is different from ones that are really the
- * same, so don't do it. Since we now have a canonicalized
- * pathkey, a simple ptrMember test is sufficient to detect
- * redundant keys.
+ * When we are given multiple merge clauses, it's possible that some
+ * clauses refer to the same vars as earlier clauses. There's no
+ * reason for us to specify sort keys like (A,B,A) when (A,B) will do
+ * --- and adding redundant sort keys makes add_path think that this
+ * sort order is different from ones that are really the same, so
+ * don't do it. Since we now have a canonicalized pathkey, a simple
+ * ptrMember test is sufficient to detect redundant keys.
*/
pathkeys = list_append_unique_ptr(pathkeys, pathkey);
}
cache_mergeclause_pathkeys(root, restrictinfo);
/*
- * We can compare canonical pathkey sublists by simple
- * pointer equality; see compare_pathkeys.
+ * We can compare canonical pathkey sublists by simple pointer
+ * equality; see compare_pathkeys.
*/
if (pathkey == restrictinfo->left_pathkey ||
pathkey == restrictinfo->right_pathkey)
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can
- * still mergejoin if we found at least one mergeclause.)
+ * sort-key positions in the pathkeys are useless. (But we can still
+ * mergejoin if we found at least one mergeclause.)
*/
if (matched)
useful++;
* WHERE ctid IN (tid1, tid2, ...)
*
* There is currently no special support for joins involving CTID; in
- * particular nothing corresponding to best_inner_indexscan(). Since it's
+ * particular nothing corresponding to best_inner_indexscan(). Since it's
* not very useful to store TIDs of one table in another table, there
* doesn't seem to be enough use-case to justify adding a lot of code
* for that.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.24 2005/08/23 20:49:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.25 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* If it is, return the pseudoconstant subnode; if not, return NULL.
*
- * We check that the CTID Var belongs to relation "varno". That is probably
+ * We check that the CTID Var belongs to relation "varno". That is probably
* redundant considering this is only applied to restriction clauses, but
* let's be safe.
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.200 2005/10/13 00:06:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.201 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *tlist, List *scan_clauses,
List **nonlossy_clauses);
static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root,
- BitmapHeapPath *best_path,
- List *tlist, List *scan_clauses);
+ BitmapHeapPath *best_path,
+ List *tlist, List *scan_clauses);
static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
- List **qual, List **indexqual);
+ List **qual, List **indexqual);
static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
List *tlist, List *scan_clauses);
static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root, Path *best_path,
List **indexstrategy,
List **indexsubtype);
static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index,
- Oid *opclass);
+ Oid *opclass);
static List *get_switched_clauses(List *clauses, Relids outerrelids);
static void copy_path_costsize(Plan *dest, Path *src);
static void copy_plan_costsize(Plan *dest, Plan *src);
List *indexstrategy, List *indexsubtype,
ScanDirection indexscandir);
static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
- List *indexqual,
- List *indexqualorig,
- List *indexstrategy,
- List *indexsubtype);
+ List *indexqual,
+ List *indexqualorig,
+ List *indexstrategy,
+ List *indexsubtype);
static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
- List *qpqual,
- Plan *lefttree,
- List *bitmapqualorig,
- Index scanrelid);
+ List *qpqual,
+ Plan *lefttree,
+ List *bitmapqualorig,
+ Index scanrelid);
static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
List *tideval);
static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
break;
case T_Material:
plan = (Plan *) create_material_plan(root,
- (MaterialPath *) best_path);
+ (MaterialPath *) best_path);
break;
case T_Unique:
plan = (Plan *) create_unique_plan(root,
Scan *plan;
/*
- * For table scans, rather than using the relation targetlist (which
- * is only those Vars actually needed by the query), we prefer to
- * generate a tlist containing all Vars in order. This will allow the
- * executor to optimize away projection of the table tuples, if
- * possible. (Note that planner.c may replace the tlist we generate
- * here, forcing projection to occur.)
+ * For table scans, rather than using the relation targetlist (which is
+ * only those Vars actually needed by the query), we prefer to generate a
+ * tlist containing all Vars in order. This will allow the executor to
+ * optimize away projection of the table tuples, if possible. (Note that
+ * planner.c may replace the tlist we generate here, forcing projection to
+ * occur.)
*/
if (use_physical_tlist(rel))
{
tlist = build_relation_tlist(rel);
/*
- * Extract the relevant restriction clauses from the parent relation;
- * the executor must apply all these restrictions during the scan.
+ * Extract the relevant restriction clauses from the parent relation; the
+ * executor must apply all these restrictions during the scan.
*/
scan_clauses = rel->baserestrictinfo;
case T_BitmapHeapScan:
plan = (Scan *) create_bitmap_scan_plan(root,
- (BitmapHeapPath *) best_path,
+ (BitmapHeapPath *) best_path,
tlist,
scan_clauses);
break;
int i;
/*
- * OK for subquery and function scans; otherwise, can't do it for
- * anything except real relations.
+ * OK for subquery and function scans; otherwise, can't do it for anything
+ * except real relations.
*/
if (rel->rtekind != RTE_RELATION)
{
return false;
/*
- * Can't do it if any system columns are requested, either. (This
- * could possibly be fixed but would take some fragile assumptions in
- * setrefs.c, I think.)
+ * Can't do it if any system columns are requested, either. (This could
+ * possibly be fixed but would take some fragile assumptions in setrefs.c,
+ * I think.)
*/
for (i = rel->min_attr; i <= 0; i++)
{
#ifdef NOT_USED
/*
- * * Expensive function pullups may have pulled local predicates *
- * into this path node. Put them in the qpqual of the plan node. *
- * JMH, 6/15/92
+ * * Expensive function pullups may have pulled local predicates * into
+ * this path node. Put them in the qpqual of the plan node. * JMH,
+ * 6/15/92
*/
if (get_loc_restrictinfo(best_path) != NIL)
set_qpqual((Plan) plan,
list_concat(get_qpqual((Plan) plan),
- get_actual_clauses(get_loc_restrictinfo(best_path))));
+ get_actual_clauses(get_loc_restrictinfo(best_path))));
#endif
return plan;
ListCell *subpaths;
/*
- * It is possible for the subplans list to contain only one entry,
- * or even no entries. Handle these cases specially.
+ * It is possible for the subplans list to contain only one entry, or even
+ * no entries. Handle these cases specially.
*
- * XXX ideally, if there's just one entry, we'd not bother to generate
- * an Append node but just return the single child. At the moment this
- * does not work because the varno of the child scan plan won't match
- * the parent-rel Vars it'll be asked to emit.
+ * XXX ideally, if there's just one entry, we'd not bother to generate an
+ * Append node but just return the single child. At the moment this does
+ * not work because the varno of the child scan plan won't match the
+ * parent-rel Vars it'll be asked to emit.
*/
if (best_path->subpaths == NIL)
{
if (newitems)
{
/*
- * If the top plan node can't do projections, we need to add a
- * Result node to help it along.
+ * If the top plan node can't do projections, we need to add a Result
+ * node to help it along.
*/
if (!is_projection_capable_plan(subplan))
subplan = (Plan *) make_result(newtlist, NULL, subplan);
}
/*
- * Build control information showing which subplan output columns are
- * to be examined by the grouping step. Unfortunately we can't merge this
+ * Build control information showing which subplan output columns are to
+ * be examined by the grouping step. Unfortunately we can't merge this
* with the previous loop, since we didn't then know which version of the
* subplan tlist we'd end up using.
*/
numGroups = (long) Min(best_path->rows, (double) LONG_MAX);
/*
- * Since the Agg node is going to project anyway, we can give it
- * the minimum output tlist, without any stuff we might have added
- * to the subplan tlist.
+ * Since the Agg node is going to project anyway, we can give it the
+ * minimum output tlist, without any stuff we might have added to the
+ * subplan tlist.
*/
plan = (Plan *) make_agg(root,
build_relation_tlist(best_path->path.parent),
stripped_indexquals = get_actual_clauses(indexquals);
/*
- * The executor needs a copy with the indexkey on the left of each
- * clause and with index attr numbers substituted for table ones. This
- * pass also gets strategy info and looks for "lossy" operators.
+ * The executor needs a copy with the indexkey on the left of each clause
+ * and with index attr numbers substituted for table ones. This pass also
+ * gets strategy info and looks for "lossy" operators.
*/
fix_indexqual_references(indexquals, best_path,
&fixed_indexquals,
/*
* If this is an innerjoin scan, the indexclauses will contain join
- * clauses that are not present in scan_clauses (since the passed-in
- * value is just the rel's baserestrictinfo list). We must add these
- * clauses to scan_clauses to ensure they get checked. In most cases
- * we will remove the join clauses again below, but if a join clause
- * contains a special operator, we need to make sure it gets into the
- * scan_clauses.
+ * clauses that are not present in scan_clauses (since the passed-in value
+ * is just the rel's baserestrictinfo list). We must add these clauses to
+ * scan_clauses to ensure they get checked. In most cases we will remove
+ * the join clauses again below, but if a join clause contains a special
+ * operator, we need to make sure it gets into the scan_clauses.
*
* Note: pointer comparison should be enough to determine RestrictInfo
* matches.
scan_clauses = list_union_ptr(scan_clauses, best_path->indexclauses);
/*
- * The qpqual list must contain all restrictions not automatically
- * handled by the index. All the predicates in the indexquals will be
- * checked (either by the index itself, or by nodeIndexscan.c), but if
- * there are any "special" operators involved then they must be included
- * in qpqual. Also, any lossy index operators must be rechecked in
- * the qpqual. The upshot is that qpqual must contain scan_clauses
- * minus whatever appears in nonlossy_indexquals.
+ * The qpqual list must contain all restrictions not automatically handled
+ * by the index. All the predicates in the indexquals will be checked
+ * (either by the index itself, or by nodeIndexscan.c), but if there are
+ * any "special" operators involved then they must be included in qpqual.
+ * Also, any lossy index operators must be rechecked in the qpqual. The
+ * upshot is that qpqual must contain scan_clauses minus whatever appears
+ * in nonlossy_indexquals.
*
- * In normal cases simple pointer equality checks will be enough to
- * spot duplicate RestrictInfos, so we try that first. In some situations
- * (particularly with OR'd index conditions) we may have scan_clauses
- * that are not equal to, but are logically implied by, the index quals;
- * so we also try a predicate_implied_by() check to see if we can discard
- * quals that way. (predicate_implied_by assumes its first input contains
- * only immutable functions, so we have to check that.) We can also
- * discard quals that are implied by a partial index's predicate.
+ * In normal cases simple pointer equality checks will be enough to spot
+ * duplicate RestrictInfos, so we try that first. In some situations
+ * (particularly with OR'd index conditions) we may have scan_clauses that
+ * are not equal to, but are logically implied by, the index quals; so we
+ * also try a predicate_implied_by() check to see if we can discard quals
+ * that way. (predicate_implied_by assumes its first input contains only
+ * immutable functions, so we have to check that.) We can also discard
+ * quals that are implied by a partial index's predicate.
*
- * While at it, we strip off the RestrictInfos to produce a list of
- * plain expressions.
+ * While at it, we strip off the RestrictInfos to produce a list of plain
+ * expressions.
*/
qpqual = NIL;
foreach(l, scan_clauses)
continue;
if (!contain_mutable_functions((Node *) rinfo->clause))
{
- List *clausel = list_make1(rinfo->clause);
+ List *clausel = list_make1(rinfo->clause);
if (predicate_implied_by(clausel, nonlossy_indexquals))
continue;
scan_clauses = get_actual_clauses(scan_clauses);
/*
- * If this is a innerjoin scan, the indexclauses will contain join
- * clauses that are not present in scan_clauses (since the passed-in
- * value is just the rel's baserestrictinfo list). We must add these
- * clauses to scan_clauses to ensure they get checked. In most cases
- * we will remove the join clauses again below, but if a join clause
- * contains a special operator, we need to make sure it gets into the
- * scan_clauses.
+ * If this is a innerjoin scan, the indexclauses will contain join clauses
+ * that are not present in scan_clauses (since the passed-in value is just
+ * the rel's baserestrictinfo list). We must add these clauses to
+ * scan_clauses to ensure they get checked. In most cases we will remove
+ * the join clauses again below, but if a join clause contains a special
+ * operator, we need to make sure it gets into the scan_clauses.
*/
if (best_path->isjoininner)
{
}
/*
- * The qpqual list must contain all restrictions not automatically
- * handled by the index. All the predicates in the indexquals will be
- * checked (either by the index itself, or by nodeBitmapHeapscan.c),
- * but if there are any "special" or lossy operators involved then they
- * must be added to qpqual. The upshot is that qpquals must contain
- * scan_clauses minus whatever appears in indexquals.
+ * The qpqual list must contain all restrictions not automatically handled
+ * by the index. All the predicates in the indexquals will be checked
+ * (either by the index itself, or by nodeBitmapHeapscan.c), but if there
+ * are any "special" or lossy operators involved then they must be added
+ * to qpqual. The upshot is that qpquals must contain scan_clauses minus
+ * whatever appears in indexquals.
*
* In normal cases simple equal() checks will be enough to spot duplicate
* clauses, so we try that first. In some situations (particularly with
*
* XXX For the moment, we only consider partial index predicates in the
* simple single-index-scan case. Is it worth trying to be smart about
- * more complex cases? Perhaps create_bitmap_subplan should be made to
+ * more complex cases? Perhaps create_bitmap_subplan should be made to
* include predicate info in what it constructs.
*/
qpqual = NIL;
foreach(l, scan_clauses)
{
- Node *clause = (Node *) lfirst(l);
+ Node *clause = (Node *) lfirst(l);
if (list_member(indexquals, clause))
continue;
if (!contain_mutable_functions(clause))
{
- List *clausel = list_make1(clause);
+ List *clausel = list_make1(clause);
if (predicate_implied_by(clausel, indexquals))
continue;
if (IsA(best_path->bitmapqual, IndexPath))
{
- IndexPath *ipath = (IndexPath *) best_path->bitmapqual;
+ IndexPath *ipath = (IndexPath *) best_path->bitmapqual;
if (predicate_implied_by(clausel, ipath->indexinfo->indpred))
continue;
/*
* There may well be redundant quals among the subplans, since a
* top-level WHERE qual might have gotten used to form several
- * different index quals. We don't try exceedingly hard to
- * eliminate redundancies, but we do eliminate obvious duplicates
- * by using list_concat_unique.
+ * different index quals. We don't try exceedingly hard to eliminate
+ * redundancies, but we do eliminate obvious duplicates by using
+ * list_concat_unique.
*/
foreach(l, apath->bitmapquals)
{
- Plan *subplan;
- List *subqual;
- List *subindexqual;
+ Plan *subplan;
+ List *subqual;
+ List *subindexqual;
subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
&subqual, &subindexqual);
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
*/
foreach(l, opath->bitmapquals)
{
- Plan *subplan;
- List *subqual;
- List *subindexqual;
+ Plan *subplan;
+ List *subqual;
+ List *subindexqual;
subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
&subqual, &subindexqual);
plan->plan_rows =
clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
plan->plan_width = 0; /* meaningless */
+
/*
* If there were constant-TRUE subquals, the OR reduces to constant
* TRUE. Also, avoid generating one-element ORs, which could happen
}
else if (IsA(bitmapqual, IndexPath))
{
- IndexPath *ipath = (IndexPath *) bitmapqual;
- IndexScan *iscan;
- List *nonlossy_clauses;
+ IndexPath *ipath = (IndexPath *) bitmapqual;
+ IndexScan *iscan;
+ List *nonlossy_clauses;
/* Use the regular indexscan plan build machinery... */
iscan = create_indexscan_plan(root, ipath, NIL, NIL,
if (IsA(best_path->innerjoinpath, IndexPath))
{
/*
- * An index is being used to reduce the number of tuples scanned
- * in the inner relation. If there are join clauses being used
- * with the index, we may remove those join clauses from the list
- * of clauses that have to be checked as qpquals at the join node.
+ * An index is being used to reduce the number of tuples scanned in
+ * the inner relation. If there are join clauses being used with the
+ * index, we may remove those join clauses from the list of clauses
+ * that have to be checked as qpquals at the join node.
*
* We can also remove any join clauses that are redundant with those
- * being used in the index scan; prior redundancy checks will not
- * have caught this case because the join clauses would never have
- * been put in the same joininfo list.
+ * being used in the index scan; prior redundancy checks will not have
+ * caught this case because the join clauses would never have been put
+ * in the same joininfo list.
*
- * We can skip this if the index path is an ordinary indexpath and
- * not a special innerjoin path.
+ * We can skip this if the index path is an ordinary indexpath and not a
+ * special innerjoin path.
*/
IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath;
select_nonredundant_join_clauses(root,
joinrestrictclauses,
innerpath->indexclauses,
- IS_OUTER_JOIN(best_path->jointype));
+ IS_OUTER_JOIN(best_path->jointype));
}
}
else if (IsA(best_path->innerjoinpath, BitmapHeapPath))
* Same deal for bitmapped index scans.
*
* Note: both here and above, we ignore any implicit index restrictions
- * associated with the use of partial indexes. This is OK because
+ * associated with the use of partial indexes. This is OK because
* we're only trying to prove we can dispense with some join quals;
* failing to prove that doesn't result in an incorrect plan. It is
- * the right way to proceed because adding more quals to the stuff
- * we got from the original query would just make it harder to detect
+ * the right way to proceed because adding more quals to the stuff we
+ * got from the original query would just make it harder to detect
* duplication.
*/
BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath;
select_nonredundant_join_clauses(root,
joinrestrictclauses,
bitmapclauses,
- IS_OUTER_JOIN(best_path->jointype));
+ IS_OUTER_JOIN(best_path->jointype));
}
}
}
/*
- * Remove the mergeclauses from the list of join qual clauses, leaving
- * the list of quals that must be checked as qpquals.
+ * Remove the mergeclauses from the list of join qual clauses, leaving the
+ * list of quals that must be checked as qpquals.
*/
mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
joinclauses = list_difference(joinclauses, mergeclauses);
/*
- * Rearrange mergeclauses, if needed, so that the outer variable is
- * always on the left.
+ * Rearrange mergeclauses, if needed, so that the outer variable is always
+ * on the left.
*/
mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
- best_path->jpath.outerjoinpath->parent->relids);
+ best_path->jpath.outerjoinpath->parent->relids);
/* Sort clauses into best execution order */
/* NB: do NOT reorder the mergeclauses */
/*
* Create explicit sort nodes for the outer and inner join paths if
- * necessary. The sort cost was already accounted for in the path.
- * Make sure there are no excess columns in the inputs if sorting.
+ * necessary. The sort cost was already accounted for in the path. Make
+ * sure there are no excess columns in the inputs if sorting.
*/
if (best_path->outersortkeys)
{
}
/*
- * Remove the hashclauses from the list of join qual clauses, leaving
- * the list of quals that must be checked as qpquals.
+ * Remove the hashclauses from the list of join qual clauses, leaving the
+ * list of quals that must be checked as qpquals.
*/
hashclauses = get_actual_clauses(best_path->path_hashclauses);
joinclauses = list_difference(joinclauses, hashclauses);
/*
- * Rearrange hashclauses, if needed, so that the outer variable is
- * always on the left.
+ * Rearrange hashclauses, if needed, so that the outer variable is always
+ * on the left.
*/
hashclauses = get_switched_clauses(best_path->path_hashclauses,
- best_path->jpath.outerjoinpath->parent->relids);
+ best_path->jpath.outerjoinpath->parent->relids);
/* Sort clauses into best execution order */
joinclauses = order_qual_clauses(root, joinclauses);
/*
* Make a copy that will become the fixed clause.
*
- * We used to try to do a shallow copy here, but that fails if there
- * is a subplan in the arguments of the opclause. So just do a
- * full copy.
+ * We used to try to do a shallow copy here, but that fails if there is a
+ * subplan in the arguments of the opclause. So just do a full copy.
*/
newclause = (OpExpr *) copyObject((Node *) clause);
/*
- * Check to see if the indexkey is on the right; if so, commute
- * the clause. The indexkey should be the side that refers to
- * (only) the base relation.
+ * Check to see if the indexkey is on the right; if so, commute the
+ * clause. The indexkey should be the side that refers to (only) the
+ * base relation.
*/
if (!bms_equal(rinfo->left_relids, index->rel->relids))
CommuteClause(newclause);
/*
- * Now, determine which index attribute this is, change the
- * indexkey operand as needed, and get the index opclass.
+ * Now, determine which index attribute this is, change the indexkey
+ * operand as needed, and get the index opclass.
*/
linitial(newclause->args) =
fix_indexqual_operand(linitial(newclause->args),
*fixed_indexquals = lappend(*fixed_indexquals, newclause);
/*
- * Look up the (possibly commuted) operator in the operator class
- * to get its strategy numbers and the recheck indicator. This
- * also double-checks that we found an operator matching the
- * index.
+ * Look up the (possibly commuted) operator in the operator class to
+ * get its strategy numbers and the recheck indicator. This also
+ * double-checks that we found an operator matching the index.
*/
get_op_opclass_properties(newclause->opno, opclass,
&stratno, &stratsubtype, &recheck);
fix_indexqual_operand(Node *node, IndexOptInfo *index, Oid *opclass)
{
/*
- * We represent index keys by Var nodes having the varno of the base
- * table but varattno equal to the index's attribute number (index
- * column position). This is a bit hokey ... would be cleaner to use
- * a special-purpose node type that could not be mistaken for a
- * regular Var. But it will do for now.
+ * We represent index keys by Var nodes having the varno of the base table
+ * but varattno equal to the index's attribute number (index column
+ * position). This is a bit hokey ... would be cleaner to use a
+ * special-purpose node type that could not be mistaken for a regular Var.
+ * But it will do for now.
*/
Var *result;
int pos;
if (bms_is_subset(restrictinfo->right_relids, outerrelids))
{
/*
- * Duplicate just enough of the structure to allow commuting
- * the clause without changing the original list. Could use
+ * Duplicate just enough of the structure to allow commuting the
+ * clause without changing the original list. Could use
* copyObject, but a complete deep copy is overkill.
*/
OpExpr *temp = makeNode(OpExpr);
Plan *plan = &node->scan.plan;
/*
- * Cost is figured here for the convenience of prepunion.c. Note this
- * is only correct for the case where qpqual is empty; otherwise
- * caller should overwrite cost with a better estimate.
+ * Cost is figured here for the convenience of prepunion.c. Note this is
+ * only correct for the case where qpqual is empty; otherwise caller
+ * should overwrite cost with a better estimate.
*/
copy_plan_costsize(plan, subplan);
plan->total_cost += cpu_tuple_cost * subplan->plan_rows;
ListCell *subnode;
/*
- * Compute cost as sum of subplan costs. We charge nothing extra for
- * the Append itself, which perhaps is too optimistic, but since it
- * doesn't do any selection or projection, it is a pretty cheap node.
+ * Compute cost as sum of subplan costs. We charge nothing extra for the
+ * Append itself, which perhaps is too optimistic, but since it doesn't do
+ * any selection or projection, it is a pretty cheap node.
*/
plan->startup_cost = 0;
plan->total_cost = 0;
copy_plan_costsize(plan, lefttree);
/*
- * For plausibility, make startup & total costs equal total cost of
- * input plan; this only affects EXPLAIN display not decisions.
+ * For plausibility, make startup & total costs equal total cost of input
+ * plan; this only affects EXPLAIN display not decisions.
*/
plan->startup_cost = plan->total_cost;
plan->targetlist = copyObject(lefttree->targetlist);
Oid *sortOperators;
/*
- * We will need at most list_length(pathkeys) sort columns; possibly
- * less
+ * We will need at most list_length(pathkeys) sort columns; possibly less
*/
numsortkeys = list_length(pathkeys);
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
/*
* We can sort by any one of the sort key items listed in this
* sublist. For now, we take the first one that corresponds to an
- * available Var in the tlist. If there isn't any, use the first
- * one that is an expression in the input's vars.
+ * available Var in the tlist. If there isn't any, use the first one
+ * that is an expression in the input's vars.
*
- * XXX if we have a choice, is there any way of figuring out which
- * might be cheapest to execute? (For example, int4lt is likely
- * much cheaper to execute than numericlt, but both might appear
- * in the same pathkey sublist...) Not clear that we ever will
- * have a choice in practice, so it may not matter.
+ * XXX if we have a choice, is there any way of figuring out which might
+ * be cheapest to execute? (For example, int4lt is likely much
+ * cheaper to execute than numericlt, but both might appear in the
+ * same pathkey sublist...) Not clear that we ever will have a choice
+ * in practice, so it may not matter.
*/
foreach(j, keysublist)
{
}
/*
- * The column might already be selected as a sort key, if the
- * pathkeys contain duplicate entries. (This can happen in
- * scenarios where multiple mergejoinable clauses mention the same
- * var, for example.) So enter it only once in the sort arrays.
+ * The column might already be selected as a sort key, if the pathkeys
+ * contain duplicate entries. (This can happen in scenarios where
+ * multiple mergejoinable clauses mention the same var, for example.)
+ * So enter it only once in the sort arrays.
*/
numsortkeys = add_sort_column(tle->resno, pathkey->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
}
Assert(numsortkeys > 0);
Oid *sortOperators;
/*
- * We will need at most list_length(sortcls) sort columns; possibly
- * less
+ * We will need at most list_length(sortcls) sort columns; possibly less
*/
numsortkeys = list_length(sortcls);
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
* redundantly.
*/
numsortkeys = add_sort_column(tle->resno, sortcl->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
}
Assert(numsortkeys > 0);
Oid *sortOperators;
/*
- * We will need at most list_length(groupcls) sort columns; possibly
- * less
+ * We will need at most list_length(groupcls) sort columns; possibly less
*/
numsortkeys = list_length(groupcls);
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
* redundantly.
*/
numsortkeys = add_sort_column(tle->resno, grpcl->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
grpno++;
}
plan->total_cost = agg_path.total_cost;
/*
- * We will produce a single output tuple if not grouping, and a tuple
- * per group otherwise.
+ * We will produce a single output tuple if not grouping, and a tuple per
+ * group otherwise.
*/
if (aggstrategy == AGG_PLAIN)
plan->plan_rows = 1;
plan->plan_rows = numGroups;
/*
- * We also need to account for the cost of evaluation of the qual (ie,
- * the HAVING clause) and the tlist. Note that cost_qual_eval doesn't
- * charge anything for Aggref nodes; this is okay since they are
- * really comparable to Vars.
+ * We also need to account for the cost of evaluation of the qual (ie, the
+ * HAVING clause) and the tlist. Note that cost_qual_eval doesn't charge
+ * anything for Aggref nodes; this is okay since they are really
+ * comparable to Vars.
*
- * See notes in grouping_planner about why this routine and make_group
- * are the only ones in this file that worry about tlist eval cost.
+ * See notes in grouping_planner about why this routine and make_group are
+ * the only ones in this file that worry about tlist eval cost.
*/
if (qual)
{
plan->plan_rows = numGroups;
/*
- * We also need to account for the cost of evaluation of the qual (ie,
- * the HAVING clause) and the tlist.
+ * We also need to account for the cost of evaluation of the qual (ie, the
+ * HAVING clause) and the tlist.
*
- * XXX this double-counts the cost of evaluation of any expressions used
- * for grouping, since in reality those will have been evaluated at a
- * lower plan level and will only be copied by the Group node. Worth
- * fixing?
+ * XXX this double-counts the cost of evaluation of any expressions used for
+ * grouping, since in reality those will have been evaluated at a lower
+ * plan level and will only be copied by the Group node. Worth fixing?
*
- * See notes in grouping_planner about why this routine and make_agg are
- * the only ones in this file that worry about tlist eval cost.
+ * See notes in grouping_planner about why this routine and make_agg are the
+ * only ones in this file that worry about tlist eval cost.
*/
if (qual)
{
copy_plan_costsize(plan, lefttree);
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples. (XXX
- * probably this is an overestimate.)
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples. (XXX probably this is
+ * an overestimate.)
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
/*
- * plan->plan_rows is left as a copy of the input subplan's plan_rows;
- * ie, we assume the filter removes nothing. The caller must alter
- * this if he has a better idea.
+ * plan->plan_rows is left as a copy of the input subplan's plan_rows; ie,
+ * we assume the filter removes nothing. The caller must alter this if he
+ * has a better idea.
*/
plan->targetlist = copyObject(lefttree->targetlist);
plan->righttree = NULL;
/*
- * convert SortClause list into array of attr indexes, as wanted by
- * exec
+ * convert SortClause list into array of attr indexes, as wanted by exec
*/
Assert(numCols > 0);
uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
copy_plan_costsize(plan, lefttree);
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples.
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples.
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
plan->righttree = NULL;
/*
- * convert SortClause list into array of attr indexes, as wanted by
- * exec
+ * convert SortClause list into array of attr indexes, as wanted by exec
*/
Assert(numCols > 0);
dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
* building a subquery then it's important to report correct info to the
* outer planner.
*
- * When the offset or count couldn't be estimated, use 10% of the
- * estimated number of rows emitted from the subplan.
+ * When the offset or count couldn't be estimated, use 10% of the estimated
+ * number of rows emitted from the subplan.
*/
if (offset_est != 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.109 2005/09/28 21:17:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.110 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
result = bms_add_members(result,
distribute_quals_to_rels(root,
lfirst(l),
- below_outer_join));
+ below_outer_join));
}
/*
ListCell *qual;
/*
- * Order of operations here is subtle and critical. First we
- * recurse to handle sub-JOINs. Their join quals will be placed
- * without regard for whether this level is an outer join, which
- * is correct. Then we place our own join quals, which are
- * restricted by lower outer joins in any case, and are forced to
- * this level if this is an outer join and they mention the outer
- * side. Finally, if this is an outer join, we mark baserels
- * contained within the inner side(s) with our own rel set; this
- * will prevent quals above us in the join tree that use those
- * rels from being pushed down below this level. (It's okay for
- * upper quals to be pushed down to the outer side, however.)
+ * Order of operations here is subtle and critical. First we recurse
+ * to handle sub-JOINs. Their join quals will be placed without
+ * regard for whether this level is an outer join, which is correct.
+ * Then we place our own join quals, which are restricted by lower
+ * outer joins in any case, and are forced to this level if this is an
+ * outer join and they mention the outer side. Finally, if this is an
+ * outer join, we mark baserels contained within the inner side(s)
+ * with our own rel set; this will prevent quals above us in the join
+ * tree that use those rels from being pushed down below this level.
+ * (It's okay for upper quals to be pushed down to the outer side,
+ * however.)
*/
switch (j->jointype)
{
case JOIN_UNION:
/*
- * This is where we fail if upper levels of planner
- * haven't rewritten UNION JOIN as an Append ...
+ * This is where we fail if upper levels of planner haven't
+ * rewritten UNION JOIN as an Append ...
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("UNION JOIN is not implemented")));
- nonnullable_rels = NULL; /* keep compiler quiet */
+ nonnullable_rels = NULL; /* keep compiler quiet */
nullable_rels = NULL;
break;
default:
elog(ERROR, "unrecognized join type: %d",
(int) j->jointype);
- nonnullable_rels = NULL; /* keep compiler quiet */
+ nonnullable_rels = NULL; /* keep compiler quiet */
nullable_rels = NULL;
break;
}
RelOptInfo *rel = find_base_rel(root, relno);
/*
- * Since we do this bottom-up, any outer-rels previously marked
- * should be within the new outer join set.
+ * Since we do this bottom-up, any outer-rels previously marked should
+ * be within the new outer join set.
*/
Assert(bms_is_subset(rel->outerjoinset, outerrels));
/*
* Presently the executor cannot support FOR UPDATE/SHARE marking of
* rels appearing on the nullable side of an outer join. (It's
- * somewhat unclear what that would mean, anyway: what should we
- * mark when a result row is generated from no element of the
- * nullable relation?) So, complain if target rel is FOR UPDATE/SHARE.
- * It's sufficient to make this check once per rel, so do it only
- * if rel wasn't already known nullable.
+ * somewhat unclear what that would mean, anyway: what should we mark
+ * when a result row is generated from no element of the nullable
+ * relation?) So, complain if target rel is FOR UPDATE/SHARE. It's
+ * sufficient to make this check once per rel, so do it only if rel
+ * wasn't already known nullable.
*/
if (rel->outerjoinset == NULL)
{
/*
* If the clause is variable-free, we force it to be evaluated at its
* original syntactic level. Note that this should not happen for
- * top-level clauses, because query_planner() special-cases them. But
- * it will happen for variable-free JOIN/ON clauses. We don't have to
- * be real smart about such a case, we just have to be correct.
+ * top-level clauses, because query_planner() special-cases them. But it
+ * will happen for variable-free JOIN/ON clauses. We don't have to be
+ * real smart about such a case, we just have to be correct.
*/
if (bms_is_empty(relids))
relids = qualscope;
/*
* If the qual came from implied-equality deduction, we always
* evaluate the qual at its natural semantic level. It is the
- * responsibility of the deducer not to create any quals that
- * should be delayed by outer-join rules.
+ * responsibility of the deducer not to create any quals that should
+ * be delayed by outer-join rules.
*/
Assert(bms_equal(relids, qualscope));
/* Needn't feed it back for more deductions */
else if (bms_overlap(relids, outerjoin_nonnullable))
{
/*
- * The qual is attached to an outer join and mentions (some of
- * the) rels on the nonnullable side. Force the qual to be
- * evaluated exactly at the level of joining corresponding to the
- * outer join. We cannot let it get pushed down into the
- * nonnullable side, since then we'd produce no output rows,
- * rather than the intended single null-extended row, for any
- * nonnullable-side rows failing the qual.
+ * The qual is attached to an outer join and mentions (some of the)
+ * rels on the nonnullable side. Force the qual to be evaluated
+ * exactly at the level of joining corresponding to the outer join. We
+ * cannot let it get pushed down into the nonnullable side, since then
+ * we'd produce no output rows, rather than the intended single
+ * null-extended row, for any nonnullable-side rows failing the qual.
*
- * Note: an outer-join qual that mentions only nullable-side rels can
- * be pushed down into the nullable side without changing the join
+ * Note: an outer-join qual that mentions only nullable-side rels can be
+ * pushed down into the nullable side without changing the join
* result, so we treat it the same as an ordinary inner-join qual,
* except for not setting maybe_equijoin (see below).
*/
relids = qualscope;
+
/*
- * We can't use such a clause to deduce equijoin (the left and
- * right sides might be unequal above the join because one of
- * them has gone to NULL) ... but we might be able to use it
- * for more limited purposes. Note: for the current uses of
- * deductions from an outer-join clause, it seems safe to make
- * the deductions even when the clause is below a higher-level
- * outer join; so we do not check below_outer_join here.
+ * We can't use such a clause to deduce equijoin (the left and right
+ * sides might be unequal above the join because one of them has gone
+ * to NULL) ... but we might be able to use it for more limited
+ * purposes. Note: for the current uses of deductions from an
+ * outer-join clause, it seems safe to make the deductions even when
+ * the clause is below a higher-level outer join; so we do not check
+ * below_outer_join here.
*/
maybe_equijoin = false;
maybe_outer_join = true;
else
{
/*
- * For a non-outer-join qual, we can evaluate the qual as soon as
- * (1) we have all the rels it mentions, and (2) we are at or
- * above any outer joins that can null any of these rels and are
- * below the syntactic location of the given qual. To enforce the
- * latter, scan the base rels listed in relids, and merge their
- * outer-join sets into the clause's own reference list. At the
- * time we are called, the outerjoinset of each baserel will show
- * exactly those outer joins that are below the qual in the join
- * tree.
+ * For a non-outer-join qual, we can evaluate the qual as soon as (1)
+ * we have all the rels it mentions, and (2) we are at or above any
+ * outer joins that can null any of these rels and are below the
+ * syntactic location of the given qual. To enforce the latter, scan
+ * the base rels listed in relids, and merge their outer-join sets
+ * into the clause's own reference list. At the time we are called,
+ * the outerjoinset of each baserel will show exactly those outer
+ * joins that are below the qual in the join tree.
*/
Relids addrelids = NULL;
Relids tmprelids;
if (bms_is_subset(addrelids, relids))
{
/*
- * Qual is not delayed by any lower outer-join restriction.
- * If it is not itself below or within an outer join, we
- * can consider it "valid everywhere", so consider feeding
- * it to the equijoin machinery. (If it is within an outer
- * join, we can't consider it "valid everywhere": once the
- * contained variables have gone to NULL, we'd be asserting
- * things like NULL = NULL, which is not true.)
+ * Qual is not delayed by any lower outer-join restriction. If it
+ * is not itself below or within an outer join, we can consider it
+ * "valid everywhere", so consider feeding it to the equijoin
+ * machinery. (If it is within an outer join, we can't consider
+ * it "valid everywhere": once the contained variables have gone
+ * to NULL, we'd be asserting things like NULL = NULL, which is
+ * not true.)
*/
if (!below_outer_join && outerjoin_nonnullable == NULL)
maybe_equijoin = true;
Assert(bms_is_subset(relids, qualscope));
/*
- * Because application of the qual will be delayed by outer
- * join, we mustn't assume its vars are equal everywhere.
+ * Because application of the qual will be delayed by outer join,
+ * we mustn't assume its vars are equal everywhere.
*/
maybe_equijoin = false;
}
}
/*
- * Mark the qual as "pushed down" if it can be applied at a level
- * below its original syntactic level. This allows us to distinguish
- * original JOIN/ON quals from higher-level quals pushed down to the
- * same joinrel. A qual originating from WHERE is always considered
- * "pushed down".
+ * Mark the qual as "pushed down" if it can be applied at a level below
+ * its original syntactic level. This allows us to distinguish original
+ * JOIN/ON quals from higher-level quals pushed down to the same joinrel.
+ * A qual originating from WHERE is always considered "pushed down".
*/
if (!is_pushed_down)
is_pushed_down = !bms_equal(relids, qualscope);
rel = find_base_rel(root, bms_singleton_member(relids));
/*
- * Check for a "mergejoinable" clause even though it's not a
- * join clause. This is so that we can recognize that "a.x =
- * a.y" makes x and y eligible to be considered equal, even
- * when they belong to the same rel. Without this, we would
- * not recognize that "a.x = a.y AND a.x = b.z AND a.y = c.q"
- * allows us to consider z and q equal after their rels are
- * joined.
+ * Check for a "mergejoinable" clause even though it's not a join
+ * clause. This is so that we can recognize that "a.x = a.y"
+ * makes x and y eligible to be considered equal, even when they
+ * belong to the same rel. Without this, we would not recognize
+ * that "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to
+ * consider z and q equal after their rels are joined.
*/
check_mergejoinable(restrictinfo);
/*
- * If the clause was deduced from implied equality, check to
- * see whether it is redundant with restriction clauses we
- * already have for this rel. Note we cannot apply this check
- * to user-written clauses, since we haven't found the
- * canonical pathkey sets yet while processing user clauses.
- * (NB: no comparable check is done in the join-clause case;
- * redundancy will be detected when the join clause is moved
- * into a join rel's restriction list.)
+ * If the clause was deduced from implied equality, check to see
+ * whether it is redundant with restriction clauses we already
+ * have for this rel. Note we cannot apply this check to
+ * user-written clauses, since we haven't found the canonical
+ * pathkey sets yet while processing user clauses. (NB: no
+ * comparable check is done in the join-clause case; redundancy
+ * will be detected when the join clause is moved into a join
+ * rel's restriction list.)
*/
if (!is_deduced ||
!qual_is_redundant(root, restrictinfo,
case BMS_MULTIPLE:
/*
- * 'clause' is a join clause, since there is more than one rel
- * in the relid set.
+ * 'clause' is a join clause, since there is more than one rel in
+ * the relid set.
*/
/*
* Check for hash or mergejoinable operators.
*
- * We don't bother setting the hashjoin info if we're not going
- * to need it. We do want to know about mergejoinable ops in
- * all cases, however, because we use mergejoinable ops for
- * other purposes such as detecting redundant clauses.
+ * We don't bother setting the hashjoin info if we're not going to
+ * need it. We do want to know about mergejoinable ops in all
+ * cases, however, because we use mergejoinable ops for other
+ * purposes such as detecting redundant clauses.
*/
check_mergejoinable(restrictinfo);
if (enable_hashjoin)
/*
* Add vars used in the join clause to targetlists of their
- * relations, so that they will be emitted by the plan nodes
- * that scan those relations (else they won't be available at
- * the join node!).
+ * relations, so that they will be emitted by the plan nodes that
+ * scan those relations (else they won't be available at the join
+ * node!).
*/
vars = pull_var_clause(clause, false);
add_vars_to_targetlist(root, vars, relids);
default:
/*
- * 'clause' references no rels, and therefore we have no place
- * to attach it. Shouldn't get here if callers are working
- * properly.
+ * 'clause' references no rels, and therefore we have no place to
+ * attach it. Shouldn't get here if callers are working properly.
*/
elog(ERROR, "cannot cope with variable-free clause");
break;
}
/*
- * If the clause has a mergejoinable operator, we may be able to
- * deduce more things from it under the principle of transitivity.
+ * If the clause has a mergejoinable operator, we may be able to deduce
+ * more things from it under the principle of transitivity.
*
* If it is not an outer-join qualification nor bubbled up due to an outer
* join, then the two sides represent equivalent PathKeyItems for path
/*
* If the exprs involve a single rel, we need to look at that rel's
- * baserestrictinfo list. If multiple rels, we can scan the joininfo
- * list of any of 'em.
+ * baserestrictinfo list. If multiple rels, we can scan the joininfo list
+ * of any of 'em.
*/
if (membership == BMS_SINGLETON)
{
}
/*
- * Scan to see if equality is already known. If so, we're done in the
- * add case, and done after removing it in the delete case.
+ * Scan to see if equality is already known. If so, we're done in the add
+ * case, and done after removing it in the delete case.
*/
foreach(itm, restrictlist)
{
{
/* delete it from local restrictinfo list */
rel1->baserestrictinfo = list_delete_ptr(rel1->baserestrictinfo,
- restrictinfo);
+ restrictinfo);
}
else
{
return;
/*
- * This equality is new information, so construct a clause
- * representing it to add to the query data structures.
+ * This equality is new information, so construct a clause representing it
+ * to add to the query data structures.
*/
ltype = exprType(item1);
rtype = exprType(item2);
if (!HeapTupleIsValid(eq_operator))
{
/*
- * Would it be safe to just not add the equality to the query if
- * we have no suitable equality operator for the combination of
+ * Would it be safe to just not add the equality to the query if we
+ * have no suitable equality operator for the combination of
* datatypes? NO, because sortkey selection may screw up anyway.
*/
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for types %s and %s",
- format_type_be(ltype), format_type_be(rtype))));
+ errmsg("could not identify an equality operator for types %s and %s",
+ format_type_be(ltype), format_type_be(rtype))));
}
pgopform = (Form_pg_operator) GETSTRUCT(eq_operator);
/*
* Push the new clause into all the appropriate restrictinfo lists.
*
- * Note: we mark the qual "pushed down" to ensure that it can never be
- * taken for an original JOIN/ON clause.
+ * Note: we mark the qual "pushed down" to ensure that it can never be taken
+ * for an original JOIN/ON clause.
*/
distribute_qual_to_rels(root, (Node *) clause,
true, true, false, NULL, relids);
return false;
/*
- * Scan existing quals to find those referencing same pathkeys.
- * Usually there will be few, if any, so build a list of just the
- * interesting ones.
+ * Scan existing quals to find those referencing same pathkeys. Usually
+ * there will be few, if any, so build a list of just the interesting
+ * ones.
*/
oldquals = NIL;
foreach(olditem, restrictlist)
/*
* Now, we want to develop a list of exprs that are known equal to the
- * left side of the new qual. We traverse the old-quals list
- * repeatedly to transitively expand the exprs list. If at any point
- * we find we can reach the right-side expr of the new qual, we are
- * done. We give up when we can't expand the equalexprs list any
- * more.
+ * left side of the new qual. We traverse the old-quals list repeatedly
+ * to transitively expand the exprs list. If at any point we find we can
+ * reach the right-side expr of the new qual, we are done. We give up
+ * when we can't expand the equalexprs list any more.
*/
equalexprs = list_make1(newleft);
do
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.9 2005/09/21 19:15:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.10 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool find_minmax_aggs_walker(Node *node, List **context);
static bool build_minmax_path(PlannerInfo *root, RelOptInfo *rel,
- MinMaxAggInfo *info);
+ MinMaxAggInfo *info);
static ScanDirection match_agg_to_index_col(MinMaxAggInfo *info,
- IndexOptInfo *index, int indexcol);
+ IndexOptInfo *index, int indexcol);
static void make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info,
- List *constant_quals);
-static Node *replace_aggs_with_params_mutator(Node *node, List **context);
+ List *constant_quals);
+static Node *replace_aggs_with_params_mutator(Node *node, List **context);
static Oid fetch_agg_sort_op(Oid aggfnoid);
* generic scan-all-the-rows plan.
*
* We are passed the preprocessed tlist, and the best path
- * devised for computing the input of a standard Agg node. If we are able
+ * devised for computing the input of a standard Agg node. If we are able
* to optimize all the aggregates, and the result is estimated to be cheaper
* than the generic aggregate method, then generate and return a Plan that
* does it that way. Otherwise, return NULL.
if (!parse->hasAggs)
return NULL;
- Assert(!parse->setOperations); /* shouldn't get here if a setop */
- Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */
+ Assert(!parse->setOperations); /* shouldn't get here if a setop */
+ Assert(parse->rowMarks == NIL); /* nor if FOR UPDATE */
/*
* Reject unoptimizable cases.
*
- * We don't handle GROUP BY, because our current implementations of
- * grouping require looking at all the rows anyway, and so there's not
- * much point in optimizing MIN/MAX.
+ * We don't handle GROUP BY, because our current implementations of grouping
+ * require looking at all the rows anyway, and so there's not much point
+ * in optimizing MIN/MAX.
*/
if (parse->groupClause)
return NULL;
/*
- * We also restrict the query to reference exactly one table, since
- * join conditions can't be handled reasonably. (We could perhaps
- * handle a query containing cartesian-product joins, but it hardly
- * seems worth the trouble.)
+ * We also restrict the query to reference exactly one table, since join
+ * conditions can't be handled reasonably. (We could perhaps handle a
+ * query containing cartesian-product joins, but it hardly seems worth the
+ * trouble.)
*/
Assert(parse->jointree != NULL && IsA(parse->jointree, FromExpr));
if (list_length(parse->jointree->fromlist) != 1)
rel = find_base_rel(root, rtr->rtindex);
/*
- * Also reject cases with subplans or volatile functions in WHERE.
- * This may be overly paranoid, but it's not entirely clear if the
+ * Also reject cases with subplans or volatile functions in WHERE. This
+ * may be overly paranoid, but it's not entirely clear if the
* transformation is safe then.
*/
if (contain_subplans(parse->jointree->quals) ||
return NULL;
/*
- * Since this optimization is not applicable all that often, we want
- * to fall out before doing very much work if possible. Therefore
- * we do the work in several passes. The first pass scans the tlist
- * and HAVING qual to find all the aggregates and verify that
- * each of them is a MIN/MAX aggregate. If that succeeds, the second
- * pass looks at each aggregate to see if it is optimizable; if so
- * we make an IndexPath describing how we would scan it. (We do not
- * try to optimize if only some aggs are optimizable, since that means
- * we'll have to scan all the rows anyway.) If that succeeds, we have
- * enough info to compare costs against the generic implementation.
- * Only if that test passes do we build a Plan.
+ * Since this optimization is not applicable all that often, we want to
+ * fall out before doing very much work if possible. Therefore we do the
+ * work in several passes. The first pass scans the tlist and HAVING qual
+ * to find all the aggregates and verify that each of them is a MIN/MAX
+ * aggregate. If that succeeds, the second pass looks at each aggregate
+ * to see if it is optimizable; if so we make an IndexPath describing how
+ * we would scan it. (We do not try to optimize if only some aggs are
+ * optimizable, since that means we'll have to scan all the rows anyway.)
+ * If that succeeds, we have enough info to compare costs against the
+ * generic implementation. Only if that test passes do we build a Plan.
*/
/* Pass 1: find all the aggregates */
/*
* Make the cost comparison.
*
- * Note that we don't include evaluation cost of the tlist here;
- * this is OK since it isn't included in best_path's cost either,
- * and should be the same in either case.
+ * Note that we don't include evaluation cost of the tlist here; this is OK
+ * since it isn't included in best_path's cost either, and should be the
+ * same in either case.
*/
cost_agg(&agg_p, root, AGG_PLAIN, list_length(aggs_list),
0, 0,
return NULL; /* too expensive */
/*
- * OK, we are going to generate an optimized plan. The first thing we
- * need to do is look for any non-variable WHERE clauses that query_planner
- * might have removed from the basic plan. (Normal WHERE clauses will
- * be properly incorporated into the sub-plans by create_plan.) If there
- * are any, they will be in a gating Result node atop the best_path.
- * They have to be incorporated into a gating Result in each sub-plan
- * in order to produce the semantically correct result.
+ * OK, we are going to generate an optimized plan. The first thing we
+ * need to do is look for any non-variable WHERE clauses that
+ * query_planner might have removed from the basic plan. (Normal WHERE
+ * clauses will be properly incorporated into the sub-plans by
+ * create_plan.) If there are any, they will be in a gating Result node
+ * atop the best_path. They have to be incorporated into a gating Result
+ * in each sub-plan in order to produce the semantically correct result.
*/
if (IsA(best_path, ResultPath))
{
*context = lappend(*context, info);
/*
- * We need not recurse into the argument, since it can't contain
- * any aggregates.
+ * We need not recurse into the argument, since it can't contain any
+ * aggregates.
*/
return false;
}
/*
* Look for a match to one of the index columns. (In a stupidly
- * designed index, there could be multiple matches, but we only
- * care about the first one.)
+ * designed index, there could be multiple matches, but we only care
+ * about the first one.)
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
{
/*
* If the match is not at the first index column, we have to verify
* that there are "x = something" restrictions on all the earlier
- * index columns. Since we'll need the restrictclauses list anyway
- * to build the path, it's convenient to extract that first and then
- * look through it for the equality restrictions.
+ * index columns. Since we'll need the restrictclauses list anyway to
+ * build the path, it's convenient to extract that first and then look
+ * through it for the equality restrictions.
*/
restrictclauses = group_clauses_by_indexkey(index,
- index->rel->baserestrictinfo,
+ index->rel->baserestrictinfo,
NIL,
NULL,
&found_clause);
continue; /* definitely haven't got enough */
for (prevcol = 0; prevcol < indexcol; prevcol++)
{
- List *rinfos = (List *) list_nth(restrictclauses, prevcol);
- ListCell *ll;
+ List *rinfos = (List *) list_nth(restrictclauses, prevcol);
+ ListCell *ll;
foreach(ll, rinfos)
{
NullTest *ntest;
/*
- * Generate a suitably modified query. Much of the work here is
- * probably unnecessary in the normal case, but we want to make it look
- * good if someone tries to EXPLAIN the result.
+ * Generate a suitably modified query. Much of the work here is probably
+ * unnecessary in the normal case, but we want to make it look good if
+ * someone tries to EXPLAIN the result.
*/
memcpy(&subroot, root, sizeof(PlannerInfo));
subroot.parse = subparse = (Query *) copyObject(root->parse);
false, true);
/*
- * Generate the plan for the subquery. We already have a Path for
- * the basic indexscan, but we have to convert it to a Plan and
- * attach a LIMIT node above it. We might need a gating Result, too,
- * to handle any non-variable qual clauses.
+ * Generate the plan for the subquery. We already have a Path for the
+ * basic indexscan, but we have to convert it to a Plan and attach a LIMIT
+ * node above it. We might need a gating Result, too, to handle any
+ * non-variable qual clauses.
*
- * Also we must add a "WHERE foo IS NOT NULL" restriction to the
- * indexscan, to be sure we don't return a NULL, which'd be contrary
- * to the standard behavior of MIN/MAX. XXX ideally this should be
- * done earlier, so that the selectivity of the restriction could be
- * included in our cost estimates. But that looks painful, and in
- * most cases the fraction of NULLs isn't high enough to change the
- * decision.
+ * Also we must add a "WHERE foo IS NOT NULL" restriction to the indexscan,
+ * to be sure we don't return a NULL, which'd be contrary to the standard
+ * behavior of MIN/MAX. XXX ideally this should be done earlier, so that
+ * the selectivity of the restriction could be included in our cost
+ * estimates. But that looks painful, and in most cases the fraction of
+ * NULLs isn't high enough to change the decision.
*/
plan = create_plan(&subroot, (Path *) info->path);
copyObject(constant_quals),
plan);
- plan = (Plan *) make_limit(plan,
+ plan = (Plan *) make_limit(plan,
subparse->limitOffset,
subparse->limitCount,
0, 1);
* Replace original aggregate calls with subplan output Params
*/
static Node *
-replace_aggs_with_params_mutator(Node *node, List **context)
+replace_aggs_with_params_mutator(Node *node, List **context)
{
if (node == NULL)
return NULL;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.88 2005/09/28 21:17:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.89 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* does not use grouping
*
* Note: the PlannerInfo node also includes a query_pathkeys field, which is
- * both an input and an output of query_planner(). The input value signals
+ * both an input and an output of query_planner(). The input value signals
* query_planner that the indicated sort order is wanted in the final output
* plan. But this value has not yet been "canonicalized", since the needed
* info does not get computed until we scan the qual clauses. We canonicalize
if (parse->jointree->fromlist == NIL)
{
*cheapest_path = (Path *) create_result_path(NULL, NULL,
- (List *) parse->jointree->quals);
+ (List *) parse->jointree->quals);
*sorted_path = NULL;
return;
}
/*
* Pull out any non-variable WHERE clauses so these can be put in a
* toplevel "Result" node, where they will gate execution of the whole
- * plan (the Result will not invoke its descendant plan unless the
- * quals are true). Note that any *really* non-variable quals will
- * have been optimized away by eval_const_expressions(). What we're
- * mostly interested in here is quals that depend only on outer-level
- * vars, although if the qual reduces to "WHERE FALSE" this path will
- * also be taken.
+ * plan (the Result will not invoke its descendant plan unless the quals
+ * are true). Note that any *really* non-variable quals will have been
+ * optimized away by eval_const_expressions(). What we're mostly
+ * interested in here is quals that depend only on outer-level vars,
+ * although if the qual reduces to "WHERE FALSE" this path will also be
+ * taken.
*/
parse->jointree->quals = (Node *)
pull_constant_clauses((List *) parse->jointree->quals,
&constant_quals);
/*
- * Init planner lists to empty. We create the base_rel_array with a
- * size that will be sufficient if no pullups or inheritance additions
- * happen ... otherwise it will be enlarged as needed.
+ * Init planner lists to empty. We create the base_rel_array with a size
+ * that will be sufficient if no pullups or inheritance additions happen
+ * ... otherwise it will be enlarged as needed.
*
* NOTE: in_info_list was set up by subquery_planner, do not touch here
*/
add_base_rels_to_query(root, (Node *) parse->jointree);
/*
- * Examine the targetlist and qualifications, adding entries to
- * baserel targetlists for all referenced Vars. Restrict and join
- * clauses are added to appropriate lists belonging to the mentioned
- * relations. We also build lists of equijoined keys for pathkey
- * construction.
+ * Examine the targetlist and qualifications, adding entries to baserel
+ * targetlists for all referenced Vars. Restrict and join clauses are
+ * added to appropriate lists belonging to the mentioned relations. We
+ * also build lists of equijoined keys for pathkey construction.
*
- * Note: all subplan nodes will have "flat" (var-only) tlists. This
- * implies that all expression evaluations are done at the root of the
- * plan tree. Once upon a time there was code to try to push
- * expensive function calls down to lower plan nodes, but that's dead
- * code and has been for a long time...
+ * Note: all subplan nodes will have "flat" (var-only) tlists. This implies
+ * that all expression evaluations are done at the root of the plan tree.
+ * Once upon a time there was code to try to push expensive function calls
+ * down to lower plan nodes, but that's dead code and has been for a long
+ * time...
*/
build_base_rel_tlists(root, tlist);
(void) distribute_quals_to_rels(root, (Node *) parse->jointree, false);
/*
- * Use the completed lists of equijoined keys to deduce any implied
- * but unstated equalities (for example, A=B and B=C imply A=C).
+ * Use the completed lists of equijoined keys to deduce any implied but
+ * unstated equalities (for example, A=B and B=C imply A=C).
*/
generate_implied_equalities(root);
/*
- * We should now have all the pathkey equivalence sets built, so it's
- * now possible to convert the requested query_pathkeys to canonical
- * form. Also canonicalize the groupClause and sortClause pathkeys
- * for use later.
+ * We should now have all the pathkey equivalence sets built, so it's now
+ * possible to convert the requested query_pathkeys to canonical form.
+ * Also canonicalize the groupClause and sortClause pathkeys for use
+ * later.
*/
root->query_pathkeys = canonicalize_pathkeys(root, root->query_pathkeys);
root->group_pathkeys = canonicalize_pathkeys(root, root->group_pathkeys);
elog(ERROR, "failed to construct the join relation");
/*
- * If there's grouping going on, estimate the number of result groups.
- * We couldn't do this any earlier because it depends on relation size
+ * If there's grouping going on, estimate the number of result groups. We
+ * couldn't do this any earlier because it depends on relation size
* estimates that were set up above.
*
- * Then convert tuple_fraction to fractional form if it is absolute,
- * and adjust it based on the knowledge that grouping_planner will be
- * doing grouping or aggregation work with our result.
+ * Then convert tuple_fraction to fractional form if it is absolute, and
+ * adjust it based on the knowledge that grouping_planner will be doing
+ * grouping or aggregation work with our result.
*
* This introduces some undesirable coupling between this code and
* grouping_planner, but the alternatives seem even uglier; we couldn't
final_rel->rows);
/*
- * In GROUP BY mode, an absolute LIMIT is relative to the number
- * of groups not the number of tuples. If the caller gave us
- * a fraction, keep it as-is. (In both cases, we are effectively
- * assuming that all the groups are about the same size.)
+ * In GROUP BY mode, an absolute LIMIT is relative to the number of
+ * groups not the number of tuples. If the caller gave us a fraction,
+ * keep it as-is. (In both cases, we are effectively assuming that
+ * all the groups are about the same size.)
*/
if (tuple_fraction >= 1.0)
tuple_fraction /= *num_groups;
/*
* If both GROUP BY and ORDER BY are specified, we will need two
- * levels of sort --- and, therefore, certainly need to read all
- * the tuples --- unless ORDER BY is a subset of GROUP BY.
+ * levels of sort --- and, therefore, certainly need to read all the
+ * tuples --- unless ORDER BY is a subset of GROUP BY.
*/
if (parse->groupClause && parse->sortClause &&
!pathkeys_contained_in(root->sort_pathkeys, root->group_pathkeys))
else if (parse->hasAggs || root->hasHavingQual)
{
/*
- * Ungrouped aggregate will certainly want to read all the tuples,
- * and it will deliver a single result row (so leave *num_groups 1).
+ * Ungrouped aggregate will certainly want to read all the tuples, and
+ * it will deliver a single result row (so leave *num_groups 1).
*/
tuple_fraction = 0.0;
}
{
/*
* Since there was no grouping or aggregation, it's reasonable to
- * assume the UNIQUE filter has effects comparable to GROUP BY.
- * Return the estimated number of output rows for use by caller.
- * (If DISTINCT is used with grouping, we ignore its effects for
- * rowcount estimation purposes; this amounts to assuming the grouped
- * rows are distinct already.)
+ * assume the UNIQUE filter has effects comparable to GROUP BY. Return
+ * the estimated number of output rows for use by caller. (If DISTINCT
+ * is used with grouping, we ignore its effects for rowcount
+ * estimation purposes; this amounts to assuming the grouped rows are
+ * distinct already.)
*/
List *distinctExprs;
else
{
/*
- * Plain non-grouped, non-aggregated query: an absolute tuple
- * fraction can be divided by the number of tuples.
+ * Plain non-grouped, non-aggregated query: an absolute tuple fraction
+ * can be divided by the number of tuples.
*/
if (tuple_fraction >= 1.0)
tuple_fraction /= final_rel->rows;
}
/*
- * Pick out the cheapest-total path and the cheapest presorted path
- * for the requested pathkeys (if there is one). We should take the
- * tuple fraction into account when selecting the cheapest presorted
- * path, but not when selecting the cheapest-total path, since if we
- * have to sort then we'll have to fetch all the tuples. (But there's
- * a special case: if query_pathkeys is NIL, meaning order doesn't
- * matter, then the "cheapest presorted" path will be the cheapest
- * overall for the tuple fraction.)
+ * Pick out the cheapest-total path and the cheapest presorted path for
+ * the requested pathkeys (if there is one). We should take the tuple
+ * fraction into account when selecting the cheapest presorted path, but
+ * not when selecting the cheapest-total path, since if we have to sort
+ * then we'll have to fetch all the tuples. (But there's a special case:
+ * if query_pathkeys is NIL, meaning order doesn't matter, then the
+ * "cheapest presorted" path will be the cheapest overall for the tuple
+ * fraction.)
*
- * The cheapest-total path is also the one to use if grouping_planner
- * decides to use hashed aggregation, so we return it separately even
- * if this routine thinks the presorted path is the winner.
+ * The cheapest-total path is also the one to use if grouping_planner decides
+ * to use hashed aggregation, so we return it separately even if this
+ * routine thinks the presorted path is the winner.
*/
cheapestpath = final_rel->cheapest_total_path;
/*
* Forget about the presorted path if it would be cheaper to sort the
- * cheapest-total path. Here we need consider only the behavior at
- * the tuple fraction point.
+ * cheapest-total path. Here we need consider only the behavior at the
+ * tuple fraction point.
*/
if (sortedpath)
{
}
/*
- * If we have constant quals, add a toplevel Result step to process
- * them.
+ * If we have constant quals, add a toplevel Result step to process them.
*/
if (constant_quals)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.193 2005/09/24 22:54:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.194 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Plan *inheritance_planner(PlannerInfo *root, List *inheritlist);
static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction);
static double preprocess_limit(PlannerInfo *root,
- double tuple_fraction,
- int *offset_est, int *count_est);
+ double tuple_fraction,
+ int *offset_est, int *count_est);
static bool choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
Path *cheapest_path, Path *sorted_path,
double dNumGroups, AggClauseCounts *agg_counts);
* these global state variables must be saved and restored.
*
* Query level and the param list cannot be moved into the per-query
- * PlannerInfo structure since their whole purpose is communication
- * across multiple sub-queries. Also, boundParams is explicitly info
- * from outside the query, and so is likewise better handled as a global
- * variable.
+ * PlannerInfo structure since their whole purpose is communication across
+ * multiple sub-queries. Also, boundParams is explicitly info from outside
+ * the query, and so is likewise better handled as a global variable.
*
- * Note we do NOT save and restore PlannerPlanId: it exists to assign
- * unique IDs to SubPlan nodes, and we want those IDs to be unique for
- * the life of a backend. Also, PlannerInitPlan is saved/restored in
+ * Note we do NOT save and restore PlannerPlanId: it exists to assign unique
+ * IDs to SubPlan nodes, and we want those IDs to be unique for the life
+ * of a backend. Also, PlannerInitPlan is saved/restored in
* subquery_planner, not here.
*/
save_PlannerQueryLevel = PlannerQueryLevel;
if (isCursor)
{
/*
- * We have no real idea how many tuples the user will ultimately
- * FETCH from a cursor, but it seems a good bet that he doesn't
- * want 'em all. Optimize for 10% retrieval (you gotta better
- * number? Should this be a SETtable parameter?)
+ * We have no real idea how many tuples the user will ultimately FETCH
+ * from a cursor, but it seems a good bet that he doesn't want 'em
+ * all. Optimize for 10% retrieval (you gotta better number? Should
+ * this be a SETtable parameter?)
*/
tuple_fraction = 0.10;
}
root->parse = parse;
/*
- * Look for IN clauses at the top level of WHERE, and transform them
- * into joins. Note that this step only handles IN clauses originally
- * at top level of WHERE; if we pull up any subqueries in the next
- * step, their INs are processed just before pulling them up.
+ * Look for IN clauses at the top level of WHERE, and transform them into
+ * joins. Note that this step only handles IN clauses originally at top
+ * level of WHERE; if we pull up any subqueries in the next step, their
+ * INs are processed just before pulling them up.
*/
root->in_info_list = NIL;
if (parse->hasSubLinks)
pull_up_subqueries(root, (Node *) parse->jointree, false);
/*
- * Detect whether any rangetable entries are RTE_JOIN kind; if not, we
- * can avoid the expense of doing flatten_join_alias_vars(). Also
- * check for outer joins --- if none, we can skip reduce_outer_joins()
- * and some other processing. This must be done after we have done
+ * Detect whether any rangetable entries are RTE_JOIN kind; if not, we can
+ * avoid the expense of doing flatten_join_alias_vars(). Also check for
+ * outer joins --- if none, we can skip reduce_outer_joins() and some
+ * other processing. This must be done after we have done
* pull_up_subqueries, of course.
*
* Note: if reduce_outer_joins manages to eliminate all outer joins,
- * root->hasOuterJoins is not reset currently. This is OK since its
+ * root->hasOuterJoins is not reset currently. This is OK since its
* purpose is merely to suppress unnecessary processing in simple cases.
*/
root->hasJoinRTEs = false;
/*
* Set hasHavingQual to remember if HAVING clause is present. Needed
- * because preprocess_expression will reduce a constant-true condition
- * to an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
+ * because preprocess_expression will reduce a constant-true condition to
+ * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
*/
root->hasHavingQual = (parse->havingQual != NULL);
}
/*
- * In some cases we may want to transfer a HAVING clause into WHERE.
- * We cannot do so if the HAVING clause contains aggregates (obviously)
- * or volatile functions (since a HAVING clause is supposed to be executed
+ * In some cases we may want to transfer a HAVING clause into WHERE. We
+ * cannot do so if the HAVING clause contains aggregates (obviously) or
+ * volatile functions (since a HAVING clause is supposed to be executed
* only once per group). Also, it may be that the clause is so expensive
* to execute that we're better off doing it only once per group, despite
* the loss of selectivity. This is hard to estimate short of doing the
* entire planning process twice, so we use a heuristic: clauses
- * containing subplans are left in HAVING. Otherwise, we move or copy
- * the HAVING clause into WHERE, in hopes of eliminating tuples before
+ * containing subplans are left in HAVING. Otherwise, we move or copy the
+ * HAVING clause into WHERE, in hopes of eliminating tuples before
* aggregation instead of after.
*
- * If the query has explicit grouping then we can simply move such a
- * clause into WHERE; any group that fails the clause will not be
- * in the output because none of its tuples will reach the grouping
- * or aggregation stage. Otherwise we must have a degenerate
- * (variable-free) HAVING clause, which we put in WHERE so that
- * query_planner() can use it in a gating Result node, but also keep
- * in HAVING to ensure that we don't emit a bogus aggregated row.
- * (This could be done better, but it seems not worth optimizing.)
+ * If the query has explicit grouping then we can simply move such a clause
+ * into WHERE; any group that fails the clause will not be in the output
+ * because none of its tuples will reach the grouping or aggregation
+ * stage. Otherwise we must have a degenerate (variable-free) HAVING
+ * clause, which we put in WHERE so that query_planner() can use it in a
+ * gating Result node, but also keep in HAVING to ensure that we don't
+ * emit a bogus aggregated row. (This could be done better, but it seems
+ * not worth optimizing.)
*
* Note that both havingQual and parse->jointree->quals are in
- * implicitly-ANDed-list form at this point, even though they are
- * declared as Node *.
+ * implicitly-ANDed-list form at this point, even though they are declared
+ * as Node *.
*/
newHaving = NIL;
foreach(l, (List *) parse->havingQual)
parse->havingQual = (Node *) newHaving;
/*
- * If we have any outer joins, try to reduce them to plain inner
- * joins. This step is most easily done after we've done expression
+ * If we have any outer joins, try to reduce them to plain inner joins.
+ * This step is most easily done after we've done expression
* preprocessing.
*/
if (root->hasOuterJoins)
reduce_outer_joins(root);
/*
- * See if we can simplify the jointree; opportunities for this may
- * come from having pulled up subqueries, or from flattening explicit
- * JOIN syntax. We must do this after flattening JOIN alias
- * variables, since eliminating explicit JOIN nodes from the jointree
- * will cause get_relids_for_join() to fail. But it should happen
- * after reduce_outer_joins, anyway.
+ * See if we can simplify the jointree; opportunities for this may come
+ * from having pulled up subqueries, or from flattening explicit JOIN
+ * syntax. We must do this after flattening JOIN alias variables, since
+ * eliminating explicit JOIN nodes from the jointree will cause
+ * get_relids_for_join() to fail. But it should happen after
+ * reduce_outer_joins, anyway.
*/
parse->jointree = (FromExpr *)
simplify_jointree(root, (Node *) parse->jointree);
/*
- * Do the main planning. If we have an inherited target relation,
- * that needs special processing, else go straight to
- * grouping_planner.
+ * Do the main planning. If we have an inherited target relation, that
+ * needs special processing, else go straight to grouping_planner.
*/
if (parse->resultRelation &&
(lst = expand_inherited_rtentry(root, parse->resultRelation)) != NIL)
/*
* If any subplans were generated, or if we're inside a subplan, build
- * initPlan list and extParam/allParam sets for plan nodes, and attach
- * the initPlans to the top plan node.
+ * initPlan list and extParam/allParam sets for plan nodes, and attach the
+ * initPlans to the top plan node.
*/
if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1)
SS_finalize_plan(plan, parse->rtable);
preprocess_expression(PlannerInfo *root, Node *expr, int kind)
{
/*
- * Fall out quickly if expression is empty. This occurs often enough
- * to be worth checking. Note that null->null is the correct conversion
- * for implicit-AND result format, too.
+ * Fall out quickly if expression is empty. This occurs often enough to
+ * be worth checking. Note that null->null is the correct conversion for
+ * implicit-AND result format, too.
*/
if (expr == NULL)
return NULL;
/*
* If the query has any join RTEs, replace join alias variables with
* base-relation variables. We must do this before sublink processing,
- * else sublinks expanded out from join aliases wouldn't get
- * processed.
+ * else sublinks expanded out from join aliases wouldn't get processed.
*/
if (root->hasJoinRTEs)
expr = flatten_join_alias_vars(root, expr);
* careful to maintain AND/OR flatness --- that is, do not generate a tree
* with AND directly under AND, nor OR directly under OR.
*
- * Because this is a relatively expensive process, we skip it when the
- * query is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()".
- * The expression will only be evaluated once anyway, so no point in
+ * Because this is a relatively expensive process, we skip it when the query
+ * is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". The
+ * expression will only be evaluated once anyway, so no point in
* pre-simplifying; we can't execute it any faster than the executor can,
* and we will waste cycles copying the tree. Notice however that we
- * still must do it for quals (to get AND/OR flatness); and if we are
- * in a subquery we should not assume it will be done only once.
+ * still must do it for quals (to get AND/OR flatness); and if we are in a
+ * subquery we should not assume it will be done only once.
*/
if (root->parse->jointree->fromlist != NIL ||
kind == EXPRKIND_QUAL ||
expr = SS_process_sublinks(expr, (kind == EXPRKIND_QUAL));
/*
- * XXX do not insert anything here unless you have grokked the
- * comments in SS_replace_correlation_vars ...
+ * XXX do not insert anything here unless you have grokked the comments in
+ * SS_replace_correlation_vars ...
*/
/* Replace uplevel vars with Param nodes */
expr = SS_replace_correlation_vars(expr);
/*
- * If it's a qual or havingQual, convert it to implicit-AND format.
- * (We don't want to do this before eval_const_expressions, since the
- * latter would be unable to simplify a top-level AND correctly. Also,
+ * If it's a qual or havingQual, convert it to implicit-AND format. (We
+ * don't want to do this before eval_const_expressions, since the latter
+ * would be unable to simplify a top-level AND correctly. Also,
* SS_process_sublinks expects explicit-AND format.)
*/
if (kind == EXPRKIND_QUAL)
Plan *subplan;
/*
- * Generate modified query with this rel as target. We have to
- * be prepared to translate varnos in in_info_list as well as in
- * the Query proper.
+ * Generate modified query with this rel as target. We have to be
+ * prepared to translate varnos in in_info_list as well as in the
+ * Query proper.
*/
memcpy(&subroot, root, sizeof(PlannerInfo));
subroot.parse = (Query *)
* XXX my goodness this next bit is ugly. Really need to think about
* ways to rein in planner's habit of scribbling on its input.
*
- * Planning of the subquery might have modified the rangetable,
- * either by addition of RTEs due to expansion of inherited source
- * tables, or by changes of the Query structures inside subquery
- * RTEs. We have to ensure that this gets propagated back to the
- * master copy. However, if we aren't done planning yet, we also
- * need to ensure that subsequent calls to grouping_planner have
- * virgin sub-Queries to work from. So, if we are at the last
- * list entry, just copy the subquery rangetable back to the master
- * copy; if we are not, then extend the master copy by adding
- * whatever the subquery added. (We assume these added entries
- * will go untouched by the future grouping_planner calls. We are
- * also effectively assuming that sub-Queries will get planned
- * identically each time, or at least that the impacts on their
- * rangetables will be the same each time. Did I say this is ugly?)
+ * Planning of the subquery might have modified the rangetable, either by
+ * addition of RTEs due to expansion of inherited source tables, or by
+ * changes of the Query structures inside subquery RTEs. We have to
+ * ensure that this gets propagated back to the master copy. However,
+ * if we aren't done planning yet, we also need to ensure that
+ * subsequent calls to grouping_planner have virgin sub-Queries to
+ * work from. So, if we are at the last list entry, just copy the
+ * subquery rangetable back to the master copy; if we are not, then
+ * extend the master copy by adding whatever the subquery added. (We
+ * assume these added entries will go untouched by the future
+ * grouping_planner calls. We are also effectively assuming that
+ * sub-Queries will get planned identically each time, or at least
+ * that the impacts on their rangetables will be the same each time.
+ * Did I say this is ugly?)
*/
if (lnext(l) == NULL)
parse->rtable = subroot.parse->rtable;
else
{
- int subrtlength = list_length(subroot.parse->rtable);
+ int subrtlength = list_length(subroot.parse->rtable);
if (subrtlength > mainrtlength)
{
List *set_sortclauses;
/*
- * If there's a top-level ORDER BY, assume we have to fetch all
- * the tuples. This might seem too simplistic given all the
- * hackery below to possibly avoid the sort ... but a nonzero
- * tuple_fraction is only of use to plan_set_operations() when
- * the setop is UNION ALL, and the result of UNION ALL is always
- * unsorted.
+ * If there's a top-level ORDER BY, assume we have to fetch all the
+ * tuples. This might seem too simplistic given all the hackery below
+ * to possibly avoid the sort ... but a nonzero tuple_fraction is only
+ * of use to plan_set_operations() when the setop is UNION ALL, and
+ * the result of UNION ALL is always unsorted.
*/
if (parse->sortClause)
tuple_fraction = 0.0;
/*
- * Construct the plan for set operations. The result will not
- * need any work except perhaps a top-level sort and/or LIMIT.
+ * Construct the plan for set operations. The result will not need
+ * any work except perhaps a top-level sort and/or LIMIT.
*/
result_plan = plan_set_operations(root, tuple_fraction,
&set_sortclauses);
/*
- * Calculate pathkeys representing the sort order (if any) of the
- * set operation's result. We have to do this before overwriting
- * the sort key information...
+ * Calculate pathkeys representing the sort order (if any) of the set
+ * operation's result. We have to do this before overwriting the sort
+ * key information...
*/
current_pathkeys = make_pathkeys_for_sortclauses(set_sortclauses,
- result_plan->targetlist);
+ result_plan->targetlist);
current_pathkeys = canonicalize_pathkeys(root, current_pathkeys);
/*
- * We should not need to call preprocess_targetlist, since we must
- * be in a SELECT query node. Instead, use the targetlist
- * returned by plan_set_operations (since this tells whether it
- * returned any resjunk columns!), and transfer any sort key
- * information from the original tlist.
+ * We should not need to call preprocess_targetlist, since we must be
+ * in a SELECT query node. Instead, use the targetlist returned by
+ * plan_set_operations (since this tells whether it returned any
+ * resjunk columns!), and transfer any sort key information from the
+ * original tlist.
*/
Assert(parse->commandType == CMD_SELECT);
tlist = preprocess_targetlist(root, tlist);
/*
- * Generate appropriate target list for subplan; may be different
- * from tlist if grouping or aggregation is needed.
+ * Generate appropriate target list for subplan; may be different from
+ * tlist if grouping or aggregation is needed.
*/
sub_tlist = make_subplanTargetList(root, tlist,
- &groupColIdx, &need_tlist_eval);
+ &groupColIdx, &need_tlist_eval);
/*
* Calculate pathkeys that represent grouping/ordering requirements.
* Note: we do not attempt to detect duplicate aggregates here; a
* somewhat-overestimated count is okay for our present purposes.
*
- * Note: think not that we can turn off hasAggs if we find no aggs.
- * It is possible for constant-expression simplification to remove
- * all explicit references to aggs, but we still have to follow
- * the aggregate semantics (eg, producing only one output row).
+ * Note: think not that we can turn off hasAggs if we find no aggs. It is
+ * possible for constant-expression simplification to remove all
+ * explicit references to aggs, but we still have to follow the
+ * aggregate semantics (eg, producing only one output row).
*/
if (parse->hasAggs)
{
/*
* Figure out whether we need a sorted result from query_planner.
*
- * If we have a GROUP BY clause, then we want a result sorted
- * properly for grouping. Otherwise, if there is an ORDER BY
- * clause, we want to sort by the ORDER BY clause. (Note: if we
- * have both, and ORDER BY is a superset of GROUP BY, it would be
- * tempting to request sort by ORDER BY --- but that might just
- * leave us failing to exploit an available sort order at all.
- * Needs more thought...)
+ * If we have a GROUP BY clause, then we want a result sorted properly
+ * for grouping. Otherwise, if there is an ORDER BY clause, we want
+ * to sort by the ORDER BY clause. (Note: if we have both, and ORDER
+ * BY is a superset of GROUP BY, it would be tempting to request sort
+ * by ORDER BY --- but that might just leave us failing to exploit an
+ * available sort order at all. Needs more thought...)
*/
if (parse->groupClause)
root->query_pathkeys = root->group_pathkeys;
root->query_pathkeys = NIL;
/*
- * Generate the best unsorted and presorted paths for this Query
- * (but note there may not be any presorted path). query_planner
- * will also estimate the number of groups in the query, and
- * canonicalize all the pathkeys.
+ * Generate the best unsorted and presorted paths for this Query (but
+ * note there may not be any presorted path). query_planner will also
+ * estimate the number of groups in the query, and canonicalize all
+ * the pathkeys.
*/
query_planner(root, sub_tlist, tuple_fraction,
&cheapest_path, &sorted_path, &dNumGroups);
/*
* Select the best path. If we are doing hashed grouping, we will
- * always read all the input tuples, so use the cheapest-total
- * path. Otherwise, trust query_planner's decision about which to use.
+ * always read all the input tuples, so use the cheapest-total path.
+ * Otherwise, trust query_planner's decision about which to use.
*/
if (use_hashed_grouping || !sorted_path)
best_path = cheapest_path;
best_path = sorted_path;
/*
- * Check to see if it's possible to optimize MIN/MAX aggregates.
- * If so, we will forget all the work we did so far to choose a
- * "regular" path ... but we had to do it anyway to be able to
- * tell which way is cheaper.
+ * Check to see if it's possible to optimize MIN/MAX aggregates. If
+ * so, we will forget all the work we did so far to choose a "regular"
+ * path ... but we had to do it anyway to be able to tell which way is
+ * cheaper.
*/
result_plan = optimize_minmax_aggregates(root,
tlist,
if (result_plan != NULL)
{
/*
- * optimize_minmax_aggregates generated the full plan, with
- * the right tlist, and it has no sort order.
+ * optimize_minmax_aggregates generated the full plan, with the
+ * right tlist, and it has no sort order.
*/
current_pathkeys = NIL;
}
* GROUP BY without aggregation, so insert a group node (plus
* the appropriate sort node, if necessary).
*
- * Add an explicit sort if we couldn't make the path come
- * out the way the GROUP node needs it.
+ * Add an explicit sort if we couldn't make the path come out the
+ * way the GROUP node needs it.
*/
if (!pathkeys_contained_in(group_pathkeys, current_pathkeys))
{
* This is a degenerate case in which we are supposed to emit
* either 0 or 1 row depending on whether HAVING succeeds.
* Furthermore, there cannot be any variables in either HAVING
- * or the targetlist, so we actually do not need the FROM table
- * at all! We can just throw away the plan-so-far and generate
- * a Result node. This is a sufficiently unusual corner case
- * that it's not worth contorting the structure of this routine
- * to avoid having to generate the plan in the first place.
+ * or the targetlist, so we actually do not need the FROM
+ * table at all! We can just throw away the plan-so-far and
+ * generate a Result node. This is a sufficiently unusual
+ * corner case that it's not worth contorting the structure of
+ * this routine to avoid having to generate the plan in the
+ * first place.
*/
result_plan = (Plan *) make_result(tlist,
parse->havingQual,
} /* end of if (setOperations) */
/*
- * If we were not able to make the plan come out in the right order,
- * add an explicit sort step.
+ * If we were not able to make the plan come out in the right order, add
+ * an explicit sort step.
*/
if (parse->sortClause)
{
result_plan = (Plan *) make_unique(result_plan, parse->distinctClause);
/*
- * If there was grouping or aggregation, leave plan_rows as-is
- * (ie, assume the result was already mostly unique). If not,
- * use the number of distinct-groups calculated by query_planner.
+ * If there was grouping or aggregation, leave plan_rows as-is (ie,
+ * assume the result was already mostly unique). If not, use the
+ * number of distinct-groups calculated by query_planner.
*/
if (!parse->groupClause && !root->hasHavingQual && !parse->hasAggs)
result_plan->plan_rows = dNumGroups;
}
/*
- * Return the actual output ordering in query_pathkeys for possible
- * use by an outer query level.
+ * Return the actual output ordering in query_pathkeys for possible use by
+ * an outer query level.
*/
root->query_pathkeys = current_pathkeys;
* preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
*
* We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
- * results back in *count_est and *offset_est. These variables are set to
+ * results back in *count_est and *offset_est. These variables are set to
* 0 if the corresponding clause is not present, and -1 if it's present
* but we couldn't estimate the value for it. (The "0" convention is OK
* for OFFSET but a little bit bogus for LIMIT: effectively we estimate
* be passed to make_limit, which see if you change this code.
*
* The return value is the suitably adjusted tuple_fraction to use for
- * planning the query. This adjustment is not overridable, since it reflects
+ * planning the query. This adjustment is not overridable, since it reflects
* plan actions that grouping_planner() will certainly take, not assumptions
* about context.
*/
if (((Const *) est)->constisnull)
{
/* NULL indicates LIMIT ALL, ie, no limit */
- *count_est = 0; /* treat as not present */
+ *count_est = 0; /* treat as not present */
}
else
{
if (((Const *) est)->constisnull)
{
/* Treat NULL as no offset; the executor will too */
- *offset_est = 0; /* treat as not present */
+ *offset_est = 0; /* treat as not present */
}
else
{
else if (*offset_est != 0 && tuple_fraction > 0.0)
{
/*
- * We have an OFFSET but no LIMIT. This acts entirely differently
- * from the LIMIT case: here, we need to increase rather than
- * decrease the caller's tuple_fraction, because the OFFSET acts
- * to cause more tuples to be fetched instead of fewer. This only
- * matters if we got a tuple_fraction > 0, however.
+ * We have an OFFSET but no LIMIT. This acts entirely differently
+ * from the LIMIT case: here, we need to increase rather than decrease
+ * the caller's tuple_fraction, because the OFFSET acts to cause more
+ * tuples to be fetched instead of fewer. This only matters if we got
+ * a tuple_fraction > 0, however.
*
* As above, use 10% if OFFSET is present but unestimatable.
*/
/*
* If we have absolute counts from both caller and OFFSET, add them
- * together; likewise if they are both fractional. If one is
- * fractional and the other absolute, we want to take the larger,
- * and we heuristically assume that's the fractional one.
+ * together; likewise if they are both fractional. If one is
+ * fractional and the other absolute, we want to take the larger, and
+ * we heuristically assume that's the fractional one.
*/
if (tuple_fraction >= 1.0)
{
/* both fractional, so add them together */
tuple_fraction += limit_fraction;
if (tuple_fraction >= 1.0)
- tuple_fraction = 0.0; /* assume fetch all */
+ tuple_fraction = 0.0; /* assume fetch all */
}
}
}
* Don't do it if it doesn't look like the hashtable will fit into
* work_mem.
*
- * Beware here of the possibility that cheapest_path->parent is NULL.
- * This could happen if user does something silly like
- * SELECT 'foo' GROUP BY 1;
+ * Beware here of the possibility that cheapest_path->parent is NULL. This
+ * could happen if user does something silly like SELECT 'foo' GROUP BY 1;
*/
if (cheapest_path->parent)
{
}
else
{
- cheapest_path_rows = 1; /* assume non-set result */
- cheapest_path_width = 100; /* arbitrary */
+ cheapest_path_rows = 1; /* assume non-set result */
+ cheapest_path_width = 100; /* arbitrary */
}
/* Estimate per-hash-entry space at tuple width... */
return false;
/*
- * See if the estimated cost is no more than doing it the other way.
- * While avoiding the need for sorted input is usually a win, the fact
- * that the output won't be sorted may be a loss; so we need to do an
- * actual cost comparison.
+ * See if the estimated cost is no more than doing it the other way. While
+ * avoiding the need for sorted input is usually a win, the fact that the
+ * output won't be sorted may be a loss; so we need to do an actual cost
+ * comparison.
*
- * We need to consider
- * cheapest_path + hashagg [+ final sort]
- * versus either
- * cheapest_path [+ sort] + group or agg [+ final sort]
- * or
- * presorted_path + group or agg [+ final sort]
- * where brackets indicate a step that may not be needed. We assume
- * query_planner() will have returned a presorted path only if it's a
- * winner compared to cheapest_path for this purpose.
+ * We need to consider cheapest_path + hashagg [+ final sort] versus either
+ * cheapest_path [+ sort] + group or agg [+ final sort] or presorted_path
+ * + group or agg [+ final sort] where brackets indicate a step that may
+ * not be needed. We assume query_planner() will have returned a presorted
+ * path only if it's a winner compared to cheapest_path for this purpose.
*
- * These path variables are dummies that just hold cost fields; we don't
- * make actual Paths for these steps.
+ * These path variables are dummies that just hold cost fields; we don't make
+ * actual Paths for these steps.
*/
cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs,
numGroupCols, dNumGroups,
/*
* Otherwise, start with a "flattened" tlist (having just the vars
- * mentioned in the targetlist and HAVING qual --- but not upper-
- * level Vars; they will be replaced by Params later on).
+ * mentioned in the targetlist and HAVING qual --- but not upper- level
+ * Vars; they will be replaced by Params later on).
*/
sub_tlist = flatten_tlist(tlist);
extravars = pull_var_clause(parse->havingQual, false);
/*
* If grouping, create sub_tlist entries for all GROUP BY expressions
- * (GROUP BY items that are simple Vars should be in the list
- * already), and make an array showing where the group columns are in
- * the sub_tlist.
+ * (GROUP BY items that are simple Vars should be in the list already),
+ * and make an array showing where the group columns are in the sub_tlist.
*/
numCols = list_length(parse->groupClause);
if (numCols > 0)
Assert(orig_tlist_item != NULL);
orig_tle = (TargetEntry *) lfirst(orig_tlist_item);
orig_tlist_item = lnext(orig_tlist_item);
- if (orig_tle->resjunk) /* should not happen */
+ if (orig_tle->resjunk) /* should not happen */
elog(ERROR, "resjunk output columns are not implemented");
Assert(new_tle->resno == orig_tle->resno);
new_tle->ressortgroupref = orig_tle->ressortgroupref;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.114 2005/09/05 18:59:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.115 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int num_vars; /* number of plain Var tlist entries */
bool has_non_vars; /* are there non-plain-Var entries? */
/* array of num_vars entries: */
- tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */
+ tlist_vinfo vars[1]; /* VARIABLE LENGTH ARRAY */
} indexed_tlist; /* VARIABLE LENGTH STRUCT */
typedef struct
static bool fix_expr_references_walker(Node *node, void *context);
static void set_join_references(Join *join, List *rtable);
static void set_inner_join_references(Plan *inner_plan,
- List *rtable,
- indexed_tlist *outer_itlist);
+ List *rtable,
+ indexed_tlist *outer_itlist);
static void set_uppernode_references(Plan *plan, Index subvarno);
static indexed_tlist *build_tlist_index(List *tlist);
static Var *search_indexed_tlist_for_var(Var *var,
- indexed_tlist *itlist,
- Index newvarno);
+ indexed_tlist *itlist,
+ Index newvarno);
static Var *search_indexed_tlist_for_non_var(Node *node,
- indexed_tlist *itlist,
- Index newvarno);
+ indexed_tlist *itlist,
+ Index newvarno);
static List *join_references(List *clauses,
- List *rtable,
- indexed_tlist *outer_itlist,
- indexed_tlist *inner_itlist,
- Index acceptable_rel);
+ List *rtable,
+ indexed_tlist *outer_itlist,
+ indexed_tlist *inner_itlist,
+ Index acceptable_rel);
static Node *join_references_mutator(Node *node,
join_references_context *context);
static Node *replace_vars_with_subplan_refs(Node *node,
- indexed_tlist *subplan_itlist,
- Index subvarno);
+ indexed_tlist *subplan_itlist,
+ Index subvarno);
static Node *replace_vars_with_subplan_refs_mutator(Node *node,
- replace_vars_with_subplan_refs_context *context);
+ replace_vars_with_subplan_refs_context *context);
static bool fix_opfuncids_walker(Node *node, void *context);
static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
/*
* set_plan_references
*
- * This is the final processing pass of the planner/optimizer. The plan
+ * This is the final processing pass of the planner/optimizer. The plan
* tree is complete; we just have to adjust some representational details
* for the convenience of the executor. We update Vars in upper plan nodes
* to refer to the outputs of their subplans, and we compute regproc OIDs
fix_expr_references(plan,
(Node *) ((IndexScan *) plan)->indexqual);
fix_expr_references(plan,
- (Node *) ((IndexScan *) plan)->indexqualorig);
+ (Node *) ((IndexScan *) plan)->indexqualorig);
break;
case T_BitmapIndexScan:
/* no need to fix targetlist and qual */
Assert(plan->targetlist == NIL);
Assert(plan->qual == NIL);
fix_expr_references(plan,
- (Node *) ((BitmapIndexScan *) plan)->indexqual);
+ (Node *) ((BitmapIndexScan *) plan)->indexqual);
fix_expr_references(plan,
- (Node *) ((BitmapIndexScan *) plan)->indexqualorig);
+ (Node *) ((BitmapIndexScan *) plan)->indexqualorig);
break;
case T_BitmapHeapScan:
fix_expr_references(plan, (Node *) plan->targetlist);
fix_expr_references(plan, (Node *) plan->qual);
fix_expr_references(plan,
- (Node *) ((BitmapHeapScan *) plan)->bitmapqualorig);
+ (Node *) ((BitmapHeapScan *) plan)->bitmapqualorig);
break;
case T_TidScan:
fix_expr_references(plan, (Node *) plan->targetlist);
fix_expr_references(plan, (Node *) plan->qual);
fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual);
fix_expr_references(plan,
- (Node *) ((MergeJoin *) plan)->mergeclauses);
+ (Node *) ((MergeJoin *) plan)->mergeclauses);
break;
case T_HashJoin:
set_join_references((Join *) plan, rtable);
fix_expr_references(plan, (Node *) plan->qual);
fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual);
fix_expr_references(plan,
- (Node *) ((HashJoin *) plan)->hashclauses);
+ (Node *) ((HashJoin *) plan)->hashclauses);
break;
case T_Hash:
case T_Material:
/*
* These plan types don't actually bother to evaluate their
- * targetlists (because they just return their unmodified
- * input tuples). The optimizer is lazy about creating really
- * valid targetlists for them --- it tends to just put in a
- * pointer to the child plan node's tlist. Hence, we leave
- * the tlist alone. In particular, we do not want to process
- * subplans in the tlist, since we will likely end up reprocessing
- * subplans that also appear in lower levels of the plan tree!
+ * targetlists (because they just return their unmodified input
+ * tuples). The optimizer is lazy about creating really valid
+ * targetlists for them --- it tends to just put in a pointer to
+ * the child plan node's tlist. Hence, we leave the tlist alone.
+ * In particular, we do not want to process subplans in the tlist,
+ * since we will likely end up reprocessing subplans that also
+ * appear in lower levels of the plan tree!
*
- * Since these plan types don't check quals either, we should
- * not find any qual expression attached to them.
+ * Since these plan types don't check quals either, we should not
+ * find any qual expression attached to them.
*/
Assert(plan->qual == NIL);
break;
case T_Limit:
/*
- * Like the plan types above, Limit doesn't evaluate its tlist
- * or quals. It does have live expressions for limit/offset,
+ * Like the plan types above, Limit doesn't evaluate its tlist or
+ * quals. It does have live expressions for limit/offset,
* however.
*/
Assert(plan->qual == NIL);
case T_Result:
/*
- * Result may or may not have a subplan; no need to fix up
- * subplan references if it hasn't got one...
+ * Result may or may not have a subplan; no need to fix up subplan
+ * references if it hasn't got one...
*
* XXX why does Result use a different subvarno from Agg/Group?
*/
* NOTE: it is essential that we recurse into child plans AFTER we set
* subplan references in this plan's tlist and quals. If we did the
* reference-adjustments bottom-up, then we would fail to match this
- * plan's var nodes against the already-modified nodes of the
- * children. Fortunately, that consideration doesn't apply to SubPlan
- * nodes; else we'd need two passes over the expression trees.
+ * plan's var nodes against the already-modified nodes of the children.
+ * Fortunately, that consideration doesn't apply to SubPlan nodes; else
+ * we'd need two passes over the expression trees.
*/
plan->lefttree = set_plan_references(plan->lefttree, rtable);
plan->righttree = set_plan_references(plan->righttree, rtable);
rte->subquery->rtable);
/*
- * We have to process any initplans too; set_plan_references can't do
- * it for us because of the possibility of double-processing.
+ * We have to process any initplans too; set_plan_references can't do it
+ * for us because of the possibility of double-processing.
*/
foreach(l, plan->scan.plan.initPlan)
{
if (trivial_subqueryscan(plan))
{
/*
- * We can omit the SubqueryScan node and just pull up the subplan.
- * We have to merge its rtable into the outer rtable, which means
+ * We can omit the SubqueryScan node and just pull up the subplan. We
+ * have to merge its rtable into the outer rtable, which means
* adjusting varnos throughout the subtree.
*/
- int rtoffset = list_length(rtable);
- List *sub_rtable;
+ int rtoffset = list_length(rtable);
+ List *sub_rtable;
sub_rtable = copyObject(rte->subquery->rtable);
range_table_walker(sub_rtable,
else
{
/*
- * Keep the SubqueryScan node. We have to do the processing that
- * set_plan_references would otherwise have done on it. Notice
- * we do not do set_uppernode_references() here, because a
- * SubqueryScan will always have been created with correct
- * references to its subplan's outputs to begin with.
+ * Keep the SubqueryScan node. We have to do the processing that
+ * set_plan_references would otherwise have done on it. Notice we do
+ * not do set_uppernode_references() here, because a SubqueryScan will
+ * always have been created with correct references to its subplan's
+ * outputs to begin with.
*/
result = (Plan *) plan;
case T_SetOp:
/*
- * Even though the targetlist won't be used by the executor,
- * we fix it up for possible use by EXPLAIN (not to mention
- * ease of debugging --- wrong varnos are very confusing).
+ * Even though the targetlist won't be used by the executor, we
+ * fix it up for possible use by EXPLAIN (not to mention ease of
+ * debugging --- wrong varnos are very confusing).
*/
adjust_expr_varnos((Node *) plan->targetlist, rtoffset);
Assert(plan->qual == NIL);
case T_Limit:
/*
- * Like the plan types above, Limit doesn't evaluate its tlist
- * or quals. It does have live expressions for limit/offset,
+ * Like the plan types above, Limit doesn't evaluate its tlist or
+ * quals. It does have live expressions for limit/offset,
* however.
*/
adjust_expr_varnos((Node *) plan->targetlist, rtoffset);
/*
* Now recurse into child plans.
*
- * We don't need to (and in fact mustn't) recurse into subqueries,
- * so no need to examine initPlan list.
+ * We don't need to (and in fact mustn't) recurse into subqueries, so no need
+ * to examine initPlan list.
*/
adjust_plan_varnos(plan->lefttree, rtoffset);
adjust_plan_varnos(plan->righttree, rtoffset);
*
* This is different from the rewriter's OffsetVarNodes in that it has to
* work on an already-planned expression tree; in particular, we should not
- * disturb INNER and OUTER references. On the other hand, we don't have to
+ * disturb INNER and OUTER references. On the other hand, we don't have to
* recurse into subqueries nor deal with outer-level Vars, so it's pretty
* simple.
*/
if (IsA(inner_plan, IndexScan))
{
/*
- * An index is being used to reduce the number of tuples
- * scanned in the inner relation. If there are join clauses
- * being used with the index, we must update their outer-rel
- * var nodes to refer to the outer side of the join.
+ * An index is being used to reduce the number of tuples scanned in
+ * the inner relation. If there are join clauses being used with the
+ * index, we must update their outer-rel var nodes to refer to the
+ * outer side of the join.
*/
IndexScan *innerscan = (IndexScan *) inner_plan;
List *indexqualorig = innerscan->indexqualorig;
innerrel);
/*
- * We must fix the inner qpqual too, if it has join
- * clauses (this could happen if special operators are
- * involved: some indexquals may get rechecked as qpquals).
+ * We must fix the inner qpqual too, if it has join clauses (this
+ * could happen if special operators are involved: some indexquals
+ * may get rechecked as qpquals).
*/
if (NumRelids((Node *) inner_plan->qual) > 1)
inner_plan->qual = join_references(inner_plan->qual,
else if (IsA(inner_plan, BitmapHeapScan))
{
/*
- * The inner side is a bitmap scan plan. Fix the top node,
- * and recurse to get the lower nodes.
+ * The inner side is a bitmap scan plan. Fix the top node, and
+ * recurse to get the lower nodes.
*
- * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig
- * if they are duplicated in qpqual, so must test these independently.
+ * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig if
+ * they are duplicated in qpqual, so must test these independently.
*/
BitmapHeapScan *innerscan = (BitmapHeapScan *) inner_plan;
Index innerrel = innerscan->scan.scanrelid;
innerrel);
/*
- * We must fix the inner qpqual too, if it has join
- * clauses (this could happen if special operators are
- * involved: some indexquals may get rechecked as qpquals).
+ * We must fix the inner qpqual too, if it has join clauses (this
+ * could happen if special operators are involved: some indexquals may
+ * get rechecked as qpquals).
*/
if (NumRelids((Node *) inner_plan->qual) > 1)
inner_plan->qual = join_references(inner_plan->qual,
else if (IsA(inner_plan, BitmapAnd))
{
/* All we need do here is recurse */
- BitmapAnd *innerscan = (BitmapAnd *) inner_plan;
- ListCell *l;
+ BitmapAnd *innerscan = (BitmapAnd *) inner_plan;
+ ListCell *l;
foreach(l, innerscan->bitmapplans)
{
else if (IsA(inner_plan, BitmapOr))
{
/* All we need do here is recurse */
- BitmapOr *innerscan = (BitmapOr *) inner_plan;
- ListCell *l;
+ BitmapOr *innerscan = (BitmapOr *) inner_plan;
+ ListCell *l;
foreach(l, innerscan->bitmapplans)
{
*
* In most cases, subplan tlists will be "flat" tlists with only Vars,
* so we try to optimize that case by extracting information about Vars
- * in advance. Matching a parent tlist to a child is still an O(N^2)
+ * in advance. Matching a parent tlist to a child is still an O(N^2)
* operation, but at least with a much smaller constant factor than plain
* tlist_member() searches.
*
if (tle->expr && IsA(tle->expr, Var))
{
- Var *var = (Var *) tle->expr;
+ Var *var = (Var *) tle->expr;
vinfo->varno = var->varno;
vinfo->varattno = var->varattno;
exprType((Node *) tle->expr),
exprTypmod((Node *) tle->expr),
0);
- newvar->varnoold = 0; /* wasn't ever a plain Var */
+ newvar->varnoold = 0; /* wasn't ever a plain Var */
newvar->varoattno = 0;
return newvar;
}
static Node *
replace_vars_with_subplan_refs_mutator(Node *node,
- replace_vars_with_subplan_refs_context *context)
+ replace_vars_with_subplan_refs_context *context)
{
Var *newvar;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.99 2005/06/05 22:32:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.100 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
abslevel = PlannerQueryLevel - var->varlevelsup;
/*
- * If there's already a PlannerParamList entry for this same Var, just
- * use it. NOTE: in sufficiently complex querytrees, it is possible
- * for the same varno/abslevel to refer to different RTEs in different
- * parts of the parsetree, so that different fields might end up
- * sharing the same Param number. As long as we check the vartype as
- * well, I believe that this sort of aliasing will cause no trouble.
- * The correct field should get stored into the Param slot at
- * execution in each part of the tree.
+ * If there's already a PlannerParamList entry for this same Var, just use
+ * it. NOTE: in sufficiently complex querytrees, it is possible for the
+ * same varno/abslevel to refer to different RTEs in different parts of
+ * the parsetree, so that different fields might end up sharing the same
+ * Param number. As long as we check the vartype as well, I believe that
+ * this sort of aliasing will cause no trouble. The correct field should
+ * get stored into the Param slot at execution in each part of the tree.
*
- * We also need to demand a match on vartypmod. This does not matter for
- * the Param itself, since those are not typmod-dependent, but it does
- * matter when make_subplan() instantiates a modified copy of the Var
- * for a subplan's args list.
+ * We also need to demand a match on vartypmod. This does not matter for the
+ * Param itself, since those are not typmod-dependent, but it does matter
+ * when make_subplan() instantiates a modified copy of the Var for a
+ * subplan's args list.
*/
i = 0;
foreach(ppl, PlannerParamList)
abslevel = PlannerQueryLevel - agg->agglevelsup;
/*
- * It does not seem worthwhile to try to match duplicate outer aggs.
- * Just make a new slot every time.
+ * It does not seem worthwhile to try to match duplicate outer aggs. Just
+ * make a new slot every time.
*/
agg = (Aggref *) copyObject(agg);
IncrementVarSublevelsUp((Node *) agg, -((int) agg->agglevelsup), 0);
Node *result;
/*
- * Copy the source Query node. This is a quick and dirty kluge to
- * resolve the fact that the parser can generate trees with multiple
- * links to the same sub-Query node, but the planner wants to scribble
- * on the Query. Try to clean this up when we do querytree redesign...
+ * Copy the source Query node. This is a quick and dirty kluge to resolve
+ * the fact that the parser can generate trees with multiple links to the
+ * same sub-Query node, but the planner wants to scribble on the Query.
+ * Try to clean this up when we do querytree redesign...
*/
subquery = (Query *) copyObject(subquery);
/*
- * For an EXISTS subplan, tell lower-level planner to expect that only
- * the first tuple will be retrieved. For ALL and ANY subplans, we
- * will be able to stop evaluating if the test condition fails, so
- * very often not all the tuples will be retrieved; for lack of a
- * better idea, specify 50% retrieval. For EXPR and MULTIEXPR
- * subplans, use default behavior (we're only expecting one row out,
- * anyway).
+ * For an EXISTS subplan, tell lower-level planner to expect that only the
+ * first tuple will be retrieved. For ALL and ANY subplans, we will be
+ * able to stop evaluating if the test condition fails, so very often not
+ * all the tuples will be retrieved; for lack of a better idea, specify
+ * 50% retrieval. For EXPR and MULTIEXPR subplans, use default behavior
+ * (we're only expecting one row out, anyway).
*
- * NOTE: if you change these numbers, also change cost_qual_eval_walker()
- * in path/costsize.c.
+ * NOTE: if you change these numbers, also change cost_qual_eval_walker() in
+ * path/costsize.c.
*
* XXX If an ALL/ANY subplan is uncorrelated, we may decide to hash or
- * materialize its result below. In that case it would've been better
- * to specify full retrieval. At present, however, we can only detect
+ * materialize its result below. In that case it would've been better to
+ * specify full retrieval. At present, however, we can only detect
* correlation or lack of it after we've made the subplan :-(. Perhaps
- * detection of correlation should be done as a separate step.
- * Meanwhile, we don't want to be too optimistic about the percentage
- * of tuples retrieved, for fear of selecting a plan that's bad for
- * the materialization case.
+ * detection of correlation should be done as a separate step. Meanwhile,
+ * we don't want to be too optimistic about the percentage of tuples
+ * retrieved, for fear of selecting a plan that's bad for the
+ * materialization case.
*/
if (slink->subLinkType == EXISTS_SUBLINK)
tuple_fraction = 1.0; /* just like a LIMIT 1 */
*/
node->plan = plan = subquery_planner(subquery, tuple_fraction, NULL);
- node->plan_id = PlannerPlanId++; /* Assign unique ID to this
- * SubPlan */
+ node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */
node->rtable = subquery->rtable;
node->args = NIL;
/*
- * Make parParam list of params that current query level will pass to
- * this child plan.
+ * Make parParam list of params that current query level will pass to this
+ * child plan.
*/
tmpset = bms_copy(plan->extParam);
while ((paramid = bms_first_member(tmpset)) >= 0)
bms_free(tmpset);
/*
- * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY,
- * or MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or
- * ARRAY, we just produce a Param referring to the result of
- * evaluating the initPlan. For MULTIEXPR, we must build an AND or
- * OR-clause of the individual comparison operators, using the
- * appropriate lefthand side expressions and Params for the initPlan's
- * target items.
+ * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY, or
+ * MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or ARRAY,
+ * we just produce a Param referring to the result of evaluating the
+ * initPlan. For MULTIEXPR, we must build an AND or OR-clause of the
+ * individual comparison operators, using the appropriate lefthand side
+ * expressions and Params for the initPlan's target items.
*/
if (node->parParam == NIL && slink->subLinkType == EXISTS_SUBLINK)
{
PlannerInitPlan = lappend(PlannerInitPlan, node);
/*
- * The executable expressions are returned to become part of the
- * outer plan's expression tree; they are not kept in the initplan
- * node.
+ * The executable expressions are returned to become part of the outer
+ * plan's expression tree; they are not kept in the initplan node.
*/
if (list_length(exprs) > 1)
result = (Node *) (node->useOr ? make_orclause(exprs) :
ListCell *l;
/*
- * We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types
- * to initPlans, even when they are uncorrelated or undirect
- * correlated, because we need to scan the output of the subplan
- * for each outer tuple. But if it's an IN (= ANY) test, we might
- * be able to use a hashtable to avoid comparing all the tuples.
+ * We can't convert subplans of ALL_SUBLINK or ANY_SUBLINK types to
+ * initPlans, even when they are uncorrelated or undirect correlated,
+ * because we need to scan the output of the subplan for each outer
+ * tuple. But if it's an IN (= ANY) test, we might be able to use a
+ * hashtable to avoid comparing all the tuples.
*/
if (subplan_is_hashable(slink, node))
node->useHashTable = true;
/*
- * Otherwise, we have the option to tack a MATERIAL node onto the
- * top of the subplan, to reduce the cost of reading it
- * repeatedly. This is pointless for a direct-correlated subplan,
- * since we'd have to recompute its results each time anyway. For
- * uncorrelated/undirect correlated subplans, we add MATERIAL unless
- * the subplan's top plan node would materialize its output anyway.
+ * Otherwise, we have the option to tack a MATERIAL node onto the top
+ * of the subplan, to reduce the cost of reading it repeatedly. This
+ * is pointless for a direct-correlated subplan, since we'd have to
+ * recompute its results each time anyway. For uncorrelated/undirect
+ * correlated subplans, we add MATERIAL unless the subplan's top plan
+ * node would materialize its output anyway.
*/
else if (node->parParam == NIL)
{
PlannerParamItem *pitem = list_nth(PlannerParamList, lfirst_int(l));
/*
- * The Var or Aggref has already been adjusted to have the
- * correct varlevelsup or agglevelsup. We probably don't even
- * need to copy it again, but be safe.
+ * The Var or Aggref has already been adjusted to have the correct
+ * varlevelsup or agglevelsup. We probably don't even need to
+ * copy it again, but be safe.
*/
args = lappend(args, copyObject(pitem->item));
}
*
* Note: we use make_op_expr in case runtime type conversion function
* calls must be inserted for this operator! (But we are not
- * expecting to have to resolve unknown Params, so it's okay to
- * pass a null pstate.)
+ * expecting to have to resolve unknown Params, so it's okay to pass a
+ * null pstate.)
*/
result = lappend(result,
make_op_expr(NULL,
/*
* The sublink type must be "= ANY" --- that is, an IN operator. (We
* require the operator name to be unqualified, which may be overly
- * paranoid, or may not be.) XXX since we also check that the
- * operators are hashable, the test on operator name may be redundant?
+ * paranoid, or may not be.) XXX since we also check that the operators
+ * are hashable, the test on operator name may be redundant?
*/
if (slink->subLinkType != ANY_SUBLINK)
return false;
/*
* The subplan must not have any direct correlation vars --- else we'd
- * have to recompute its output each time, so that the hashtable
- * wouldn't gain anything.
+ * have to recompute its output each time, so that the hashtable wouldn't
+ * gain anything.
*/
if (node->parParam != NIL)
return false;
/*
- * The estimated size of the subquery result must fit in work_mem.
- * (XXX what about hashtable overhead?)
+ * The estimated size of the subquery result must fit in work_mem. (XXX
+ * what about hashtable overhead?)
*/
subquery_size = node->plan->plan_rows *
(MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData)));
return false;
/*
- * The combining operators must be hashable, strict, and
- * self-commutative. The need for hashability is obvious, since we
- * want to use hashing. Without strictness, behavior in the presence
- * of nulls is too unpredictable. (We actually must assume even more
- * than plain strictness, see nodeSubplan.c for details.) And
- * commutativity ensures that the left and right datatypes are the
- * same; this allows us to assume that the combining operators are
- * equality for the righthand datatype, so that they can be used to
- * compare righthand tuples as well as comparing lefthand to righthand
- * tuples. (This last restriction could be relaxed by using two
- * different sets of operators with the hash table, but there is no
- * obvious usefulness to that at present.)
+ * The combining operators must be hashable, strict, and self-commutative.
+ * The need for hashability is obvious, since we want to use hashing.
+ * Without strictness, behavior in the presence of nulls is too
+ * unpredictable. (We actually must assume even more than plain
+ * strictness, see nodeSubplan.c for details.) And commutativity ensures
+ * that the left and right datatypes are the same; this allows us to
+ * assume that the combining operators are equality for the righthand
+ * datatype, so that they can be used to compare righthand tuples as well
+ * as comparing lefthand to righthand tuples. (This last restriction
+ * could be relaxed by using two different sets of operators with the hash
+ * table, but there is no obvious usefulness to that at present.)
*/
foreach(l, slink->operOids)
{
return NULL;
/*
- * The sub-select must not refer to any Vars of the parent query.
- * (Vars of higher levels should be okay, though.)
+ * The sub-select must not refer to any Vars of the parent query. (Vars of
+ * higher levels should be okay, though.)
*/
if (contain_vars_of_level((Node *) subselect, 1))
return NULL;
/*
- * The left-hand expressions must contain some Vars of the current
- * query, else it's not gonna be a join.
+ * The left-hand expressions must contain some Vars of the current query,
+ * else it's not gonna be a join.
*/
left_varnos = pull_varnos((Node *) sublink->lefthand);
if (bms_is_empty(left_varnos))
return NULL;
/*
- * The left-hand expressions mustn't be volatile. (Perhaps we should
- * test the combining operators, too? We'd only need to point the
- * function directly at the sublink ...)
+ * The left-hand expressions mustn't be volatile. (Perhaps we should test
+ * the combining operators, too? We'd only need to point the function
+ * directly at the sublink ...)
*/
if (contain_volatile_functions((Node *) sublink->lefthand))
return NULL;
/*
* Okay, pull up the sub-select into top range table and jointree.
*
- * We rely here on the assumption that the outer query has no references
- * to the inner (necessarily true, other than the Vars that we build
- * below). Therefore this is a lot easier than what
- * pull_up_subqueries has to go through.
+ * We rely here on the assumption that the outer query has no references to
+ * the inner (necessarily true, other than the Vars that we build below).
+ * Therefore this is a lot easier than what pull_up_subqueries has to go
+ * through.
*/
rte = addRangeTableEntryForSubquery(NULL,
subselect,
/*
* Build the result qual expressions. As a side effect,
- * ininfo->sub_targetlist is filled with a list of Vars representing
- * the subselect outputs.
+ * ininfo->sub_targetlist is filled with a list of Vars representing the
+ * subselect outputs.
*/
exprs = convert_sublink_opers(sublink->lefthand,
sublink->operOids,
List *lefthand;
/*
- * First, recursively process the lefthand-side expressions, if
- * any.
+ * First, recursively process the lefthand-side expressions, if any.
*/
locTopQual = false;
lefthand = (List *)
}
/*
- * We should never see a SubPlan expression in the input (since this
- * is the very routine that creates 'em to begin with). We shouldn't
- * find ourselves invoked directly on a Query, either.
+ * We should never see a SubPlan expression in the input (since this is
+ * the very routine that creates 'em to begin with). We shouldn't find
+ * ourselves invoked directly on a Query, either.
*/
Assert(!is_subplan(node));
Assert(!IsA(node, Query));
/*
* Because make_subplan() could return an AND or OR clause, we have to
- * take steps to preserve AND/OR flatness of a qual. We assume the
- * input has been AND/OR flattened and so we need no recursion here.
+ * take steps to preserve AND/OR flatness of a qual. We assume the input
+ * has been AND/OR flattened and so we need no recursion here.
*
* If we recurse down through anything other than an AND node, we are
- * definitely not at top qual level anymore. (Due to the coding here,
- * we will not get called on the List subnodes of an AND, so no check
- * is needed for List.)
+ * definitely not at top qual level anymore. (Due to the coding here, we
+ * will not get called on the List subnodes of an AND, so no check is
+ * needed for List.)
*/
if (and_clause(node))
{
/*
* First, scan the param list to discover the sets of params that are
- * available from outer query levels and my own query level. We do
- * this once to save time in the per-plan recursion steps.
+ * available from outer query levels and my own query level. We do this
+ * once to save time in the per-plan recursion steps.
*/
paramid = 0;
foreach(l, PlannerParamList)
bms_free(valid_params);
/*
- * Finally, attach any initPlans to the topmost plan node,
- * and add their extParams to the topmost node's, too.
+ * Finally, attach any initPlans to the topmost plan node, and add their
+ * extParams to the topmost node's, too.
*
- * We also add the total_cost of each initPlan to the startup cost of
- * the top node. This is a conservative overestimate, since in
- * fact each initPlan might be executed later than plan startup,
- * or even not at all.
+ * We also add the total_cost of each initPlan to the startup cost of the top
+ * node. This is a conservative overestimate, since in fact each initPlan
+ * might be executed later than plan startup, or even not at all.
*/
plan->initPlan = PlannerInitPlan;
PlannerInitPlan = NIL; /* make sure they're not attached twice */
context.outer_params = outer_params;
/*
- * When we call finalize_primnode, context.paramids sets are
- * automatically merged together. But when recursing to self, we have
- * to do it the hard way. We want the paramids set to include params
- * in subplans as well as at this level.
+ * When we call finalize_primnode, context.paramids sets are automatically
+ * merged together. But when recursing to self, we have to do it the hard
+ * way. We want the paramids set to include params in subplans as well as
+ * at this level.
*/
/* Find params in targetlist and qual */
&context);
/*
- * we need not look at indexqualorig, since it will have the
- * same param references as indexqual.
+ * we need not look at indexqualorig, since it will have the same
+ * param references as indexqual.
*/
break;
case T_BitmapIndexScan:
finalize_primnode((Node *) ((BitmapIndexScan *) plan)->indexqual,
&context);
+
/*
- * we need not look at indexqualorig, since it will have the
- * same param references as indexqual.
+ * we need not look at indexqualorig, since it will have the same
+ * param references as indexqual.
*/
break;
case T_SubqueryScan:
/*
- * In a SubqueryScan, SS_finalize_plan has already been run on
- * the subplan by the inner invocation of subquery_planner, so
- * there's no need to do it again. Instead, just pull out the
- * subplan's extParams list, which represents the params it
- * needs from my level and higher levels.
+ * In a SubqueryScan, SS_finalize_plan has already been run on the
+ * subplan by the inner invocation of subquery_planner, so there's
+ * no need to do it again. Instead, just pull out the subplan's
+ * extParams list, which represents the params it needs from my
+ * level and higher levels.
*/
context.paramids = bms_add_members(context.paramids,
- ((SubqueryScan *) plan)->subplan->extParam);
+ ((SubqueryScan *) plan)->subplan->extParam);
break;
case T_FunctionScan:
plan->allParam = context.paramids;
/*
- * For speed at execution time, make sure extParam/allParam are
- * actually NULL if they are empty sets.
+ * For speed at execution time, make sure extParam/allParam are actually
+ * NULL if they are empty sets.
*/
if (bms_is_empty(plan->extParam))
{
/* Add outer-level params needed by the subplan to paramids */
context->paramids = bms_join(context->paramids,
- bms_intersect(subplan->plan->extParam,
- context->outer_params));
+ bms_intersect(subplan->plan->extParam,
+ context->outer_params));
/* fall through to recurse into subplan args */
}
return expression_tree_walker(node, finalize_primnode,
int paramid;
/*
- * Set up for a new level of subquery. This is just to keep
+ * Set up for a new level of subquery. This is just to keep
* SS_finalize_plan from becoming confused.
*/
PlannerQueryLevel++;
node = makeNode(SubPlan);
node->subLinkType = EXPR_SUBLINK;
node->plan = plan;
- node->plan_id = PlannerPlanId++; /* Assign unique ID to this
- * SubPlan */
+ node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */
node->rtable = root->parse->rtable;
PlannerInitPlan = lappend(PlannerInitPlan, node);
/*
- * Make parParam list of params that current query level will pass to
- * this child plan. (In current usage there probably aren't any.)
+ * Make parParam list of params that current query level will pass to this
+ * child plan. (In current usage there probably aren't any.)
*/
tmpset = bms_copy(plan->extParam);
while ((paramid = bms_first_member(tmpset)) >= 0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.30 2005/08/01 20:31:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.31 2005/10/15 02:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Query *subquery = rte->subquery;
/*
- * Is this a subquery RTE, and if so, is the subquery simple
- * enough to pull up? (If not, do nothing at this node.)
+ * Is this a subquery RTE, and if so, is the subquery simple enough to
+ * pull up? (If not, do nothing at this node.)
*
* If we are inside an outer join, only pull up subqueries whose
* targetlists are nullable --- otherwise substituting their tlist
*
* XXX This could be improved by generating pseudo-variables for such
* expressions; we'd have to figure out how to get the pseudo-
- * variables evaluated at the right place in the modified plan
- * tree. Fix it someday.
+ * variables evaluated at the right place in the modified plan tree.
+ * Fix it someday.
*/
if (rte->rtekind == RTE_SUBQUERY &&
is_simple_subquery(subquery) &&
ListCell *rt;
/*
- * Need a modifiable copy of the subquery to hack on. Even if
- * we didn't sometimes choose not to pull up below, we must do
- * this to avoid problems if the same subquery is referenced
- * from multiple jointree items (which can't happen normally,
- * but might after rule rewriting).
+ * Need a modifiable copy of the subquery to hack on. Even if we
+ * didn't sometimes choose not to pull up below, we must do this
+ * to avoid problems if the same subquery is referenced from
+ * multiple jointree items (which can't happen normally, but might
+ * after rule rewriting).
*/
subquery = copyObject(subquery);
/*
* Create a PlannerInfo data structure for this subquery.
*
- * NOTE: the next few steps should match the first processing
- * in subquery_planner(). Can we refactor to avoid code
- * duplication, or would that just make things uglier?
+ * NOTE: the next few steps should match the first processing in
+ * subquery_planner(). Can we refactor to avoid code duplication,
+ * or would that just make things uglier?
*/
subroot = makeNode(PlannerInfo);
subroot->parse = subquery;
/*
- * Pull up any IN clauses within the subquery's WHERE, so that
- * we don't leave unoptimized INs behind.
+ * Pull up any IN clauses within the subquery's WHERE, so that we
+ * don't leave unoptimized INs behind.
*/
subroot->in_info_list = NIL;
if (subquery->hasSubLinks)
subquery->jointree->quals = pull_up_IN_clauses(subroot,
- subquery->jointree->quals);
+ subquery->jointree->quals);
/*
* Recursively pull up the subquery's subqueries, so that this
* routine's processing is complete for its jointree and
* rangetable.
*
- * Note: 'false' is correct here even if we are within an outer
- * join in the upper query; the lower query starts with a
- * clean slate for outer-join semantics.
+ * Note: 'false' is correct here even if we are within an outer join
+ * in the upper query; the lower query starts with a clean slate
+ * for outer-join semantics.
*/
subquery->jointree = (FromExpr *)
pull_up_subqueries(subroot, (Node *) subquery->jointree,
false);
/*
- * Now we must recheck whether the subquery is still simple
- * enough to pull up. If not, abandon processing it.
+ * Now we must recheck whether the subquery is still simple enough
+ * to pull up. If not, abandon processing it.
*
- * We don't really need to recheck all the conditions involved,
- * but it's easier just to keep this "if" looking the same as
- * the one above.
+ * We don't really need to recheck all the conditions involved, but
+ * it's easier just to keep this "if" looking the same as the one
+ * above.
*/
if (is_simple_subquery(subquery) &&
(!below_outer_join || has_nullable_targetlist(subquery)))
/*
* Give up, return unmodified RangeTblRef.
*
- * Note: The work we just did will be redone when the
- * subquery gets planned on its own. Perhaps we could
- * avoid that by storing the modified subquery back into
- * the rangetable, but I'm not gonna risk it now.
+ * Note: The work we just did will be redone when the subquery
+ * gets planned on its own. Perhaps we could avoid that by
+ * storing the modified subquery back into the rangetable, but
+ * I'm not gonna risk it now.
*/
return jtnode;
}
OffsetVarNodes((Node *) subroot->in_info_list, rtoffset, 0);
/*
- * Upper-level vars in subquery are now one level closer to
- * their parent than before.
+ * Upper-level vars in subquery are now one level closer to their
+ * parent than before.
*/
IncrementVarSublevelsUp((Node *) subquery, -1, 1);
IncrementVarSublevelsUp((Node *) subroot->in_info_list, -1, 1);
/*
* Replace all of the top query's references to the subquery's
* outputs with copies of the adjusted subtlist items, being
- * careful not to replace any of the jointree structure.
- * (This'd be a lot cleaner if we could use
- * query_tree_mutator.)
+ * careful not to replace any of the jointree structure. (This'd
+ * be a lot cleaner if we could use query_tree_mutator.)
*/
subtlist = subquery->targetList;
parse->targetList = (List *)
}
/*
- * Now append the adjusted rtable entries to upper query. (We
- * hold off until after fixing the upper rtable entries; no
- * point in running that code on the subquery ones too.)
+ * Now append the adjusted rtable entries to upper query. (We hold
+ * off until after fixing the upper rtable entries; no point in
+ * running that code on the subquery ones too.)
*/
parse->rtable = list_concat(parse->rtable, subquery->rtable);
* already adjusted the marker values, so just list_concat the
* list.)
*
- * Executor can't handle multiple FOR UPDATE/SHARE/NOWAIT flags,
- * so complain if they are valid but different
+ * Executor can't handle multiple FOR UPDATE/SHARE/NOWAIT flags, so
+ * complain if they are valid but different
*/
if (parse->rowMarks && subquery->rowMarks)
{
if (parse->rowNoWait != subquery->rowNoWait)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use both wait and NOWAIT in one query")));
+ errmsg("cannot use both wait and NOWAIT in one query")));
}
parse->rowMarks = list_concat(parse->rowMarks, subquery->rowMarks);
if (subquery->rowMarks)
}
/*
- * We also have to fix the relid sets of any parent
- * InClauseInfo nodes. (This could perhaps be done by
- * ResolveNew, but it would clutter that routine's API
- * unreasonably.)
+ * We also have to fix the relid sets of any parent InClauseInfo
+ * nodes. (This could perhaps be done by ResolveNew, but it would
+ * clutter that routine's API unreasonably.)
*/
if (root->in_info_list)
{
case JOIN_UNION:
/*
- * This is where we fail if upper levels of planner
- * haven't rewritten UNION JOIN as an Append ...
+ * This is where we fail if upper levels of planner haven't
+ * rewritten UNION JOIN as an Append ...
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
return false;
/*
- * Can't pull up a subquery involving grouping, aggregation, sorting,
- * or limiting.
+ * Can't pull up a subquery involving grouping, aggregation, sorting, or
+ * limiting.
*/
if (subquery->hasAggs ||
subquery->groupClause ||
return false;
/*
- * Don't pull up a subquery that has any set-returning functions in
- * its targetlist. Otherwise we might well wind up inserting
- * set-returning functions into places where they mustn't go, such as
- * quals of higher queries.
+ * Don't pull up a subquery that has any set-returning functions in its
+ * targetlist. Otherwise we might well wind up inserting set-returning
+ * functions into places where they mustn't go, such as quals of higher
+ * queries.
*/
if (expression_returns_set((Node *) subquery->targetList))
return false;
/*
* Hack: don't try to pull up a subquery with an empty jointree.
- * query_planner() will correctly generate a Result plan for a
- * jointree that's totally empty, but I don't think the right things
- * happen if an empty FromExpr appears lower down in a jointree. Not
- * worth working hard on this, just to collapse SubqueryScan/Result
- * into Result...
+ * query_planner() will correctly generate a Result plan for a jointree
+ * that's totally empty, but I don't think the right things happen if an
+ * empty FromExpr appears lower down in a jointree. Not worth working hard
+ * on this, just to collapse SubqueryScan/Result into Result...
*/
if (subquery->jointree->fromlist == NIL)
return false;
subtlist, CMD_SELECT, 0);
/*
- * We don't bother to update the colvars list, since it won't be
- * used again ...
+ * We don't bother to update the colvars list, since it won't be used
+ * again ...
*/
}
else
reduce_outer_joins_state *state;
/*
- * To avoid doing strictness checks on more quals than necessary, we
- * want to stop descending the jointree as soon as there are no outer
- * joins below our current point. This consideration forces a
- * two-pass process. The first pass gathers information about which
- * base rels appear below each side of each join clause, and about
- * whether there are outer join(s) below each side of each join
- * clause. The second pass examines qual clauses and changes join
- * types as it descends the tree.
+ * To avoid doing strictness checks on more quals than necessary, we want
+ * to stop descending the jointree as soon as there are no outer joins
+ * below our current point. This consideration forces a two-pass process.
+ * The first pass gathers information about which base rels appear below
+ * each side of each join clause, and about whether there are outer
+ * join(s) below each side of each join clause. The second pass examines
+ * qual clauses and changes join types as it descends the tree.
*/
state = reduce_outer_joins_pass1((Node *) root->parse->jointree);
/*
* If this join is (now) inner, we can add any nonnullability
- * constraints its quals provide to those we got from above.
- * But if it is outer, we can only pass down the local
- * constraints into the nullable side, because an outer join
- * never eliminates any rows from its non-nullable side. If
- * it's a FULL join then it doesn't eliminate anything from
- * either side.
+ * constraints its quals provide to those we got from above. But
+ * if it is outer, we can only pass down the local constraints
+ * into the nullable side, because an outer join never eliminates
+ * any rows from its non-nullable side. If it's a FULL join then
+ * it doesn't eliminate anything from either side.
*/
if (jointype != JOIN_FULL)
{
nonnullable_rels);
}
else
- local_nonnullable = NULL; /* no use in calculating
- * it */
+ local_nonnullable = NULL; /* no use in calculating it */
if (left_state->contains_outer)
{
NullTest *expr = (NullTest *) node;
/*
- * IS NOT NULL can be considered strict, but only at top level;
- * else we might have something like NOT (x IS NOT NULL).
+ * IS NOT NULL can be considered strict, but only at top level; else
+ * we might have something like NOT (x IS NOT NULL).
*/
if (top_level && expr->nulltesttype == IS_NOT_NULL)
result = find_nonnullable_rels((Node *) expr->arg, false);
if (child && IsA(child, FromExpr))
{
/*
- * Yes, so do we want to merge it into parent? Always do
- * so if child has just one element (since that doesn't
- * make the parent's list any longer). Otherwise merge if
- * the resulting join list would be no longer than
+ * Yes, so do we want to merge it into parent? Always do so
+ * if child has just one element (since that doesn't make the
+ * parent's list any longer). Otherwise merge if the
+ * resulting join list would be no longer than
* from_collapse_limit.
*/
FromExpr *subf = (FromExpr *) child;
newlist = list_concat(newlist, subf->fromlist);
/*
- * By now, the quals have been converted to
- * implicit-AND lists, so we just need to join the
- * lists. NOTE: we put the pulled-up quals first.
+ * By now, the quals have been converted to implicit-AND
+ * lists, so we just need to join the lists. NOTE: we put
+ * the pulled-up quals first.
*/
f->quals = (Node *) list_concat((List *) subf->quals,
(List *) f->quals);
j->rarg = simplify_jointree(root, j->rarg);
/*
- * If it is an outer join, we must not flatten it. An inner join
- * is semantically equivalent to a FromExpr; we convert it to one,
+ * If it is an outer join, we must not flatten it. An inner join is
+ * semantically equivalent to a FromExpr; we convert it to one,
* allowing it to be flattened into its parent, if the resulting
* FromExpr would have no more than join_collapse_limit members.
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.50 2005/07/29 21:40:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.51 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return NULL;
/*
- * Push down NOTs. We do this only in the top-level boolean
- * expression, without examining arguments of operators/functions. The
- * main reason for doing this is to expose as much top-level AND/OR
- * structure as we can, so there's no point in descending further.
+ * Push down NOTs. We do this only in the top-level boolean expression,
+ * without examining arguments of operators/functions. The main reason for
+ * doing this is to expose as much top-level AND/OR structure as we can,
+ * so there's no point in descending further.
*/
newqual = find_nots(qual);
/*
* Note: we can destructively concat the subexpression's arglist
* because we know the recursive invocation of pull_ands will have
- * built a new arglist not shared with any other expr. Otherwise
- * we'd need a list_copy here.
+ * built a new arglist not shared with any other expr. Otherwise we'd
+ * need a list_copy here.
*/
if (and_clause(subexpr))
out_list = list_concat(out_list,
- pull_ands(((BoolExpr *) subexpr)->args));
+ pull_ands(((BoolExpr *) subexpr)->args));
else
out_list = lappend(out_list, subexpr);
}
/*
* Note: we can destructively concat the subexpression's arglist
* because we know the recursive invocation of pull_ors will have
- * built a new arglist not shared with any other expr. Otherwise
- * we'd need a list_copy here.
+ * built a new arglist not shared with any other expr. Otherwise we'd
+ * need a list_copy here.
*/
if (or_clause(subexpr))
out_list = list_concat(out_list,
- pull_ors(((BoolExpr *) subexpr)->args));
+ pull_ors(((BoolExpr *) subexpr)->args));
else
out_list = lappend(out_list, subexpr);
}
{
/*
* Another NOT cancels this NOT, so eliminate the NOT and stop
- * negating this branch. But search the subexpression for more
- * NOTs to simplify.
+ * negating this branch. But search the subexpression for more NOTs
+ * to simplify.
*/
return find_nots(get_notclausearg(qual));
}
orlist = lappend(orlist, find_duplicate_ors(lfirst(temp)));
/*
- * Don't need pull_ors() since this routine will never introduce
- * an OR where there wasn't one before.
+ * Don't need pull_ors() since this routine will never introduce an OR
+ * where there wasn't one before.
*/
return process_duplicate_ors(orlist);
}
return linitial(orlist);
/*
- * Choose the shortest AND clause as the reference list --- obviously,
- * any subclause not in this clause isn't in all the clauses. If we
- * find a clause that's not an AND, we can treat it as a one-element
- * AND clause, which necessarily wins as shortest.
+ * Choose the shortest AND clause as the reference list --- obviously, any
+ * subclause not in this clause isn't in all the clauses. If we find a
+ * clause that's not an AND, we can treat it as a one-element AND clause,
+ * which necessarily wins as shortest.
*/
foreach(temp, orlist)
{
reference = list_union(NIL, reference);
/*
- * Check each element of the reference list to see if it's in all the
- * OR clauses. Build a new list of winning clauses.
+ * Check each element of the reference list to see if it's in all the OR
+ * clauses. Build a new list of winning clauses.
*/
winners = NIL;
foreach(temp, reference)
/*
* Generate new OR list consisting of the remaining sub-clauses.
*
- * If any clause degenerates to empty, then we have a situation like (A
- * AND B) OR (A), which can be reduced to just A --- that is, the
- * additional conditions in other arms of the OR are irrelevant.
+ * If any clause degenerates to empty, then we have a situation like (A AND
+ * B) OR (A), which can be reduced to just A --- that is, the additional
+ * conditions in other arms of the OR are irrelevant.
*
- * Note that because we use list_difference, any multiple occurrences of
- * a winning clause in an AND sub-clause will be removed
- * automatically.
+ * Note that because we use list_difference, any multiple occurrences of a
+ * winning clause in an AND sub-clause will be removed automatically.
*/
neworlist = NIL;
foreach(temp, orlist)
}
/*
- * Append reduced OR to the winners list, if it's not degenerate,
- * handling the special case of one element correctly (can that really
- * happen?). Also be careful to maintain AND/OR flatness in case we
- * pulled up a sub-sub-OR-clause.
+ * Append reduced OR to the winners list, if it's not degenerate, handling
+ * the special case of one element correctly (can that really happen?).
+ * Also be careful to maintain AND/OR flatness in case we pulled up a
+ * sub-sub-OR-clause.
*/
if (neworlist != NIL)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.77 2005/06/05 22:32:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.78 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *
preprocess_targetlist(PlannerInfo *root, List *tlist)
{
- Query *parse = root->parse;
- int result_relation = parse->resultRelation;
- List *range_table = parse->rtable;
- CmdType command_type = parse->commandType;
+ Query *parse = root->parse;
+ int result_relation = parse->resultRelation;
+ List *range_table = parse->rtable;
+ CmdType command_type = parse->commandType;
/*
* Sanity check: if there is a result relation, it'd better be a real
}
/*
- * for heap_formtuple to work, the targetlist must match the exact
- * order of the attributes. We also need to fill in any missing
- * attributes. -ay 10/94
+ * for heap_formtuple to work, the targetlist must match the exact order
+ * of the attributes. We also need to fill in any missing attributes.
+ * -ay 10/94
*/
if (command_type == CMD_INSERT || command_type == CMD_UPDATE)
tlist = expand_targetlist(tlist, command_type,
result_relation, range_table);
/*
- * for "update" and "delete" queries, add ctid of the result relation
- * into the target list so that the ctid will propagate through
- * execution and ExecutePlan() will be able to identify the right
- * tuple to replace or delete. This extra field is marked "junk" so
- * that it is not stored back into the tuple.
+ * for "update" and "delete" queries, add ctid of the result relation into
+ * the target list so that the ctid will propagate through execution and
+ * ExecutePlan() will be able to identify the right tuple to replace or
+ * delete. This extra field is marked "junk" so that it is not stored
+ * back into the tuple.
*/
if (command_type == CMD_UPDATE || command_type == CMD_DELETE)
{
true);
/*
- * For an UPDATE, expand_targetlist already created a fresh tlist.
- * For DELETE, better do a listCopy so that we don't destructively
- * modify the original tlist (is this really necessary?).
+ * For an UPDATE, expand_targetlist already created a fresh tlist. For
+ * DELETE, better do a listCopy so that we don't destructively modify
+ * the original tlist (is this really necessary?).
*/
if (command_type == CMD_DELETE)
tlist = list_copy(tlist);
}
/*
- * Add TID targets for rels selected FOR UPDATE/SHARE. The executor
- * uses the TID to know which rows to lock, much as for UPDATE or
- * DELETE.
+ * Add TID targets for rels selected FOR UPDATE/SHARE. The executor uses
+ * the TID to know which rows to lock, much as for UPDATE or DELETE.
*/
if (parse->rowMarks)
{
ListCell *l;
/*
- * We've got trouble if the FOR UPDATE/SHARE appears inside
- * grouping, since grouping renders a reference to individual
- * tuple CTIDs invalid. This is also checked at parse time,
- * but that's insufficient because of rule substitution, query
- * pullup, etc.
+ * We've got trouble if the FOR UPDATE/SHARE appears inside grouping,
+ * since grouping renders a reference to individual tuple CTIDs
+ * invalid. This is also checked at parse time, but that's
+ * insufficient because of rule substitution, query pullup, etc.
*/
CheckSelectLocking(parse, parse->forUpdate);
/*
- * Currently the executor only supports FOR UPDATE/SHARE at top
- * level
+ * Currently the executor only supports FOR UPDATE/SHARE at top level
*/
if (PlannerQueryLevel > 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE is not allowed in subqueries")));
+ errmsg("SELECT FOR UPDATE/SHARE is not allowed in subqueries")));
foreach(l, parse->rowMarks)
{
tlist_item = list_head(tlist);
/*
- * The rewriter should have already ensured that the TLEs are in
- * correct order; but we have to insert TLEs for any missing
- * attributes.
+ * The rewriter should have already ensured that the TLEs are in correct
+ * order; but we have to insert TLEs for any missing attributes.
*
- * Scan the tuple description in the relation's relcache entry to make
- * sure we have all the user attributes in the right order. We assume
- * that the rewriter already acquired at least AccessShareLock on the
- * relation, so we need no lock here.
+ * Scan the tuple description in the relation's relcache entry to make sure
+ * we have all the user attributes in the right order. We assume that the
+ * rewriter already acquired at least AccessShareLock on the relation, so
+ * we need no lock here.
*/
rel = heap_open(getrelid(result_relation, range_table), NoLock);
* Didn't find a matching tlist entry, so make one.
*
* For INSERT, generate a NULL constant. (We assume the rewriter
- * would have inserted any available default value.) Also, if
- * the column isn't dropped, apply any domain constraints that
- * might exist --- this is to catch domain NOT NULL.
+ * would have inserted any available default value.) Also, if the
+ * column isn't dropped, apply any domain constraints that might
+ * exist --- this is to catch domain NOT NULL.
*
- * For UPDATE, generate a Var reference to the existing value of
- * the attribute, so that it gets copied to the new tuple. But
- * generate a NULL for dropped columns (we want to drop any
- * old values).
+ * For UPDATE, generate a Var reference to the existing value of the
+ * attribute, so that it gets copied to the new tuple. But
+ * generate a NULL for dropped columns (we want to drop any old
+ * values).
*
- * When generating a NULL constant for a dropped column, we label
- * it INT4 (any other guaranteed-to-exist datatype would do as
- * well). We can't label it with the dropped column's
- * datatype since that might not exist anymore. It does not
- * really matter what we claim the type is, since NULL is NULL
- * --- its representation is datatype-independent. This could
- * perhaps confuse code comparing the finished plan to the
- * target relation, however.
+ * When generating a NULL constant for a dropped column, we label it
+ * INT4 (any other guaranteed-to-exist datatype would do as well).
+ * We can't label it with the dropped column's datatype since that
+ * might not exist anymore. It does not really matter what we
+ * claim the type is, since NULL is NULL --- its representation is
+ * datatype-independent. This could perhaps confuse code
+ * comparing the finished plan to the target relation, however.
*/
Oid atttype = att_tup->atttypid;
int32 atttypmod = att_tup->atttypmod;
}
/*
- * The remaining tlist entries should be resjunk; append them all to
- * the end of the new tlist, making sure they have resnos higher than
- * the last real attribute. (Note: although the rewriter already did
- * such renumbering, we have to do it again here in case we are doing
- * an UPDATE in a table with dropped columns, or an inheritance child
- * table with extra columns.)
+ * The remaining tlist entries should be resjunk; append them all to the
+ * end of the new tlist, making sure they have resnos higher than the last
+ * real attribute. (Note: although the rewriter already did such
+ * renumbering, we have to do it again here in case we are doing an UPDATE
+ * in a table with dropped columns, or an inheritance child table with
+ * extra columns.)
*/
while (tlist_item)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.126 2005/08/02 20:27:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.127 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} adjust_inherited_attrs_context;
static Plan *recurse_set_operations(Node *setOp, PlannerInfo *root,
- double tuple_fraction,
- List *colTypes, bool junkOK,
- int flag, List *refnames_tlist,
- List **sortClauses);
+ double tuple_fraction,
+ List *colTypes, bool junkOK,
+ int flag, List *refnames_tlist,
+ List **sortClauses);
static Plan *generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
- double tuple_fraction,
- List *refnames_tlist, List **sortClauses);
+ double tuple_fraction,
+ List *refnames_tlist, List **sortClauses);
static Plan *generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
List *refnames_tlist, List **sortClauses);
static List *recurse_union_children(Node *setOp, PlannerInfo *root,
- double tuple_fraction,
- SetOperationStmt *top_union,
- List *refnames_tlist);
+ double tuple_fraction,
+ SetOperationStmt *top_union,
+ List *refnames_tlist);
static List *generate_setop_tlist(List *colTypes, int flag,
Index varno,
bool hack_constants,
Assert(parse->distinctClause == NIL);
/*
- * Find the leftmost component Query. We need to use its column names
- * for all generated tlists (else SELECT INTO won't work right).
+ * Find the leftmost component Query. We need to use its column names for
+ * all generated tlists (else SELECT INTO won't work right).
*/
node = topop->larg;
while (node && IsA(node, SetOperationStmt))
Assert(leftmostQuery != NULL);
/*
- * Recurse on setOperations tree to generate plans for set ops. The
- * final output plan should have just the column types shown as the
- * output from the top-level node, plus possibly resjunk working
- * columns (we can rely on upper-level nodes to deal with that).
+ * Recurse on setOperations tree to generate plans for set ops. The final
+ * output plan should have just the column types shown as the output from
+ * the top-level node, plus possibly resjunk working columns (we can rely
+ * on upper-level nodes to deal with that).
*/
return recurse_set_operations((Node *) topop, root, tuple_fraction,
topop->colTypes, true, -1,
subplan);
/*
- * We don't bother to determine the subquery's output ordering
- * since it won't be reflected in the set-op result anyhow.
+ * We don't bother to determine the subquery's output ordering since
+ * it won't be reflected in the set-op result anyhow.
*/
*sortClauses = NIL;
* output columns.
*
* XXX you don't really want to know about this: setrefs.c will apply
- * replace_vars_with_subplan_refs() to the Result node's tlist.
- * This would fail if the Vars generated by generate_setop_tlist()
- * were not exactly equal() to the corresponding tlist entries of
- * the subplan. However, since the subplan was generated by
- * generate_union_plan() or generate_nonunion_plan(), and hence
- * its tlist was generated by generate_append_tlist(), this will
- * work. We just tell generate_setop_tlist() to use varno 0.
+ * replace_vars_with_subplan_refs() to the Result node's tlist. This
+ * would fail if the Vars generated by generate_setop_tlist() were not
+ * exactly equal() to the corresponding tlist entries of the subplan.
+ * However, since the subplan was generated by generate_union_plan()
+ * or generate_nonunion_plan(), and hence its tlist was generated by
+ * generate_append_tlist(), this will work. We just tell
+ * generate_setop_tlist() to use varno 0.
*/
if (flag >= 0 ||
!tlist_same_datatypes(plan->targetlist, colTypes, junkOK))
/*
* If plain UNION, tell children to fetch all tuples.
*
- * Note: in UNION ALL, we pass the top-level tuple_fraction unmodified
- * to each arm of the UNION ALL. One could make a case for reducing
- * the tuple fraction for later arms (discounting by the expected size
- * of the earlier arms' results) but it seems not worth the trouble.
- * The normal case where tuple_fraction isn't already zero is a LIMIT
- * at top level, and passing it down as-is is usually enough to get the
- * desired result of preferring fast-start plans.
+ * Note: in UNION ALL, we pass the top-level tuple_fraction unmodified to
+ * each arm of the UNION ALL. One could make a case for reducing the
+ * tuple fraction for later arms (discounting by the expected size of the
+ * earlier arms' results) but it seems not worth the trouble. The normal
+ * case where tuple_fraction isn't already zero is a LIMIT at top level,
+ * and passing it down as-is is usually enough to get the desired result
+ * of preferring fast-start plans.
*/
if (!op->all)
tuple_fraction = 0.0;
/*
- * If any of my children are identical UNION nodes (same op, all-flag,
- * and colTypes) then they can be merged into this node so that we
- * generate only one Append and Sort for the lot. Recurse to find
- * such nodes and compute their children's plans.
+ * If any of my children are identical UNION nodes (same op, all-flag, and
+ * colTypes) then they can be merged into this node so that we generate
+ * only one Append and Sort for the lot. Recurse to find such nodes and
+ * compute their children's plans.
*/
planlist = list_concat(recurse_union_children(op->larg, root,
tuple_fraction,
* Generate tlist for Append plan node.
*
* The tlist for an Append plan isn't important as far as the Append is
- * concerned, but we must make it look real anyway for the benefit of
- * the next plan level up.
+ * concerned, but we must make it look real anyway for the benefit of the
+ * next plan level up.
*/
tlist = generate_append_tlist(op->colTypes, false,
planlist, refnames_tlist);
plan = (Plan *) make_append(planlist, false, tlist);
/*
- * For UNION ALL, we just need the Append plan. For UNION, need to
- * add Sort and Unique nodes to produce unique output.
+ * For UNION ALL, we just need the Append plan. For UNION, need to add
+ * Sort and Unique nodes to produce unique output.
*/
if (!op->all)
{
/* Recurse on children, ensuring their outputs are marked */
lplan = recurse_set_operations(op->larg, root,
- 0.0 /* all tuples needed */,
+ 0.0 /* all tuples needed */ ,
op->colTypes, false, 0,
refnames_tlist,
&child_sortclauses);
rplan = recurse_set_operations(op->rarg, root,
- 0.0 /* all tuples needed */,
+ 0.0 /* all tuples needed */ ,
op->colTypes, false, 1,
refnames_tlist,
&child_sortclauses);
* Generate tlist for Append plan node.
*
* The tlist for an Append plan isn't important as far as the Append is
- * concerned, but we must make it look real anyway for the benefit of
- * the next plan level up. In fact, it has to be real enough that the
- * flag column is shown as a variable not a constant, else setrefs.c
- * will get confused.
+ * concerned, but we must make it look real anyway for the benefit of the
+ * next plan level up. In fact, it has to be real enough that the flag
+ * column is shown as a variable not a constant, else setrefs.c will get
+ * confused.
*/
tlist = generate_append_tlist(op->colTypes, true,
planlist, refnames_tlist);
/*
* Not same, so plan this child separately.
*
- * Note we disallow any resjunk columns in child results. This is
- * necessary since the Append node that implements the union won't do
- * any projection, and upper levels will get confused if some of our
- * output tuples have junk and some don't. This case only arises when
- * we have an EXCEPT or INTERSECT as child, else there won't be
- * resjunk anyway.
+ * Note we disallow any resjunk columns in child results. This is necessary
+ * since the Append node that implements the union won't do any
+ * projection, and upper levels will get confused if some of our output
+ * tuples have junk and some don't. This case only arises when we have an
+ * EXCEPT or INTERSECT as child, else there won't be resjunk anyway.
*/
return list_make1(recurse_set_operations(setOp, root,
tuple_fraction,
Assert(!reftle->resjunk);
/*
- * Generate columns referencing input columns and having
- * appropriate data types and column names. Insert datatype
- * coercions where necessary.
+ * Generate columns referencing input columns and having appropriate
+ * data types and column names. Insert datatype coercions where
+ * necessary.
*
- * HACK: constants in the input's targetlist are copied up as-is
- * rather than being referenced as subquery outputs. This is
- * mainly to ensure that when we try to coerce them to the output
- * column's datatype, the right things happen for UNKNOWN
- * constants. But do this only at the first level of
- * subquery-scan plans; we don't want phony constants appearing in
- * the output tlists of upper-level nodes!
+ * HACK: constants in the input's targetlist are copied up as-is rather
+ * than being referenced as subquery outputs. This is mainly to
+ * ensure that when we try to coerce them to the output column's
+ * datatype, the right things happen for UNKNOWN constants. But do
+ * this only at the first level of subquery-scan plans; we don't want
+ * phony constants appearing in the output tlists of upper-level
+ * nodes!
*/
if (hack_constants && inputtle->expr && IsA(inputtle->expr, Const))
expr = (Node *) inputtle->expr;
List *rels_list;
ListCell *l;
- /*
+ /*
* We build a list starting with the given rel and adding all direct and
* indirect children. We can use a single list as both the record of
* already-found rels and the agenda of rels yet to be scanned for more
currentchildren = find_inheritance_children(currentrel);
/*
- * Add to the queue only those children not already seen. This
- * avoids making duplicate entries in case of multiple inheritance
- * paths from the same parent. (It'll also keep us from getting
- * into an infinite loop, though theoretically there can't be any
- * cycles in the inheritance graph anyway.)
+ * Add to the queue only those children not already seen. This avoids
+ * making duplicate entries in case of multiple inheritance paths from
+ * the same parent. (It'll also keep us from getting into an infinite
+ * loop, though theoretically there can't be any cycles in the
+ * inheritance graph anyway.)
*/
rels_list = list_concat_unique_oid(rels_list, currentchildren);
}
/*
* Check that there's at least one descendant, else treat as no-child
- * case. This could happen despite above has_subclass() check, if
- * table once had a child but no longer does.
+ * case. This could happen despite above has_subclass() check, if table
+ * once had a child but no longer does.
*/
if (list_length(inhOIDs) < 2)
{
Index childRTindex;
/*
- * It is possible that the parent table has children that are
- * temp tables of other backends. We cannot safely access such
- * tables (because of buffering issues), and the best thing to do
- * seems to be to silently ignore them.
+ * It is possible that the parent table has children that are temp
+ * tables of other backends. We cannot safely access such tables
+ * (because of buffering issues), and the best thing to do seems to be
+ * to silently ignore them.
*/
if (childOID != parentOID &&
isOtherTempNamespace(get_rel_namespace(childOID)))
continue;
/*
- * Build an RTE for the child, and attach to query's rangetable
- * list. We copy most fields of the parent's RTE, but replace
- * relation OID, and set inh = false.
+ * Build an RTE for the child, and attach to query's rangetable list.
+ * We copy most fields of the parent's RTE, but replace relation OID,
+ * and set inh = false.
*/
childrte = copyObject(rte);
childrte->relid = childOID;
/*
* If all the children were temp tables, pretend it's a non-inheritance
- * situation. The duplicate RTE we added for the parent table is harmless.
+ * situation. The duplicate RTE we added for the parent table is
+ * harmless.
*/
if (list_length(inhRTIs) < 2)
{
}
/*
- * The executor will check the parent table's access permissions when
- * it examines the parent's inheritlist entry. There's no need to
- * check twice, so turn off access check bits in the original RTE.
- * (If we are invoked more than once, extra copies of the child RTEs
- * will also not cause duplicate permission checks.)
+ * The executor will check the parent table's access permissions when it
+ * examines the parent's inheritlist entry. There's no need to check
+ * twice, so turn off access check bits in the original RTE. (If we are
+ * invoked more than once, extra copies of the child RTEs will also not
+ * cause duplicate permission checks.)
*/
rte->requiredPerms = 0;
}
/*
- * We assume that by now the planner has acquired at least
- * AccessShareLock on both rels, and so we need no additional lock
- * now.
+ * We assume that by now the planner has acquired at least AccessShareLock
+ * on both rels, and so we need no additional lock now.
*/
oldrelation = heap_open(old_relid, NoLock);
newrelation = heap_open(new_relid, NoLock);
JoinExpr *j;
j = (JoinExpr *) expression_tree_mutator(node,
- adjust_inherited_attrs_mutator,
+ adjust_inherited_attrs_mutator,
(void *) context);
/* now fix JoinExpr's rtindex */
if (j->rtindex == context->old_rt_index)
InClauseInfo *ininfo;
ininfo = (InClauseInfo *) expression_tree_mutator(node,
- adjust_inherited_attrs_mutator,
- (void *) context);
+ adjust_inherited_attrs_mutator,
+ (void *) context);
/* now fix InClauseInfo's relid sets */
ininfo->lefthand = adjust_relid_set(ininfo->lefthand,
context->old_rt_index,
/*
* BUT: although we don't need to recurse into subplans, we do need to
* make sure that they are copied, not just referenced as
- * expression_tree_mutator will do by default. Otherwise we'll have
- * the same subplan node referenced from each arm of the inheritance
- * APPEND plan, which will cause trouble in the executor. This is a
- * kluge that should go away when we redesign querytrees.
+ * expression_tree_mutator will do by default. Otherwise we'll have the
+ * same subplan node referenced from each arm of the inheritance APPEND
+ * plan, which will cause trouble in the executor. This is a kluge that
+ * should go away when we redesign querytrees.
*/
if (is_subplan(node))
{
/*
* If we changed anything, re-sort the tlist by resno, and make sure
* resjunk entries have resnos above the last real resno. The sort
- * algorithm is a bit stupid, but for such a seldom-taken path, small
- * is probably better than fast.
+ * algorithm is a bit stupid, but for such a seldom-taken path, small is
+ * probably better than fast.
*/
if (!changed_it)
return tlist;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.200 2005/07/03 21:14:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.201 2005/10/15 02:49:21 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
static Node *substitute_actual_parameters(Node *expr, int nargs, List *args,
int *usecounts);
static Node *substitute_actual_parameters_mutator(Node *node,
- substitute_actual_parameters_context *context);
+ substitute_actual_parameters_context *context);
static void sql_inline_error_callback(void *arg);
static Expr *evaluate_expr(Expr *expr, Oid result_type);
make_ands_implicit(Expr *clause)
{
/*
- * NB: because the parser sets the qual field to NULL in a query that
- * has no WHERE clause, we must consider a NULL input clause as TRUE,
- * even though one might more reasonably think it FALSE. Grumble. If
- * this causes trouble, consider changing the parser's behavior.
+ * NB: because the parser sets the qual field to NULL in a query that has
+ * no WHERE clause, we must consider a NULL input clause as TRUE, even
+ * though one might more reasonably think it FALSE. Grumble. If this
+ * causes trouble, consider changing the parser's behavior.
*/
if (clause == NULL)
return NIL; /* NULL -> NIL list == TRUE */
if (IsA(node, Aggref))
{
Assert(((Aggref *) node)->agglevelsup == 0);
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
}
Assert(!IsA(node, SubLink));
return expression_tree_walker(node, contain_agg_clause_walker, context);
/*
* If the transition type is pass-by-value then it doesn't add
- * anything to the required size of the hashtable. If it is
- * pass-by-reference then we have to add the estimated size of
- * the value itself, plus palloc overhead.
+ * anything to the required size of the hashtable. If it is
+ * pass-by-reference then we have to add the estimated size of the
+ * value itself, plus palloc overhead.
*/
if (!get_typbyval(aggtranstype))
{
if (contain_agg_clause((Node *) aggref->target))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregate function calls may not be nested")));
+ errmsg("aggregate function calls may not be nested")));
/*
* Having checked that, we need not recurse into the argument.
return false;
if (IsA(node, SubPlan) ||
IsA(node, SubLink))
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
return expression_tree_walker(node, contain_subplans_walker, context);
}
{
/*
* We could implement this check in one recursive scan. But since the
- * check for volatile functions is both moderately expensive and
- * unlikely to fail, it seems better to look for Vars first and only
- * check for volatile functions if we find no Vars.
+ * check for volatile functions is both moderately expensive and unlikely
+ * to fail, it seems better to look for Vars first and only check for
+ * volatile functions if we find no Vars.
*/
if (!contain_var_clause(clause) &&
!contain_volatile_functions(clause))
/*
* If the DISTINCT list contains all the nonjunk targetlist items, and
- * nothing else (ie, no junk tlist items), then it's a simple
- * DISTINCT, else it's DISTINCT ON. We do not require the lists to be
- * in the same order (since the parser may have adjusted the DISTINCT
- * clause ordering to agree with ORDER BY). Furthermore, a
- * non-DISTINCT junk tlist item that is in the sortClause is also
- * evidence of DISTINCT ON, since we don't allow ORDER BY on junk
- * tlist items when plain DISTINCT is used.
+ * nothing else (ie, no junk tlist items), then it's a simple DISTINCT,
+ * else it's DISTINCT ON. We do not require the lists to be in the same
+ * order (since the parser may have adjusted the DISTINCT clause ordering
+ * to agree with ORDER BY). Furthermore, a non-DISTINCT junk tlist item
+ * that is in the sortClause is also evidence of DISTINCT ON, since we
+ * don't allow ORDER BY on junk tlist items when plain DISTINCT is used.
*
* This code assumes that the DISTINCT list is valid, ie, all its entries
* match some entry of the tlist.
*
* Currently the extra steps that are taken in this mode are:
* 1. Substitute values for Params, where a bound Param value has been made
- * available by the caller of planner().
+ * available by the caller of planner().
* 2. Fold stable, as well as immutable, functions to constants.
*--------------------
*/
if (paramInfo)
{
/*
- * Found it, so return a Const representing the param
- * value. Note that we don't copy pass-by-ref datatypes,
- * so the Const will only be valid as long as the bound
- * parameter list exists. This is okay for intended uses
- * of estimate_expression_value().
+ * Found it, so return a Const representing the param value.
+ * Note that we don't copy pass-by-ref datatypes, so the Const
+ * will only be valid as long as the bound parameter list
+ * exists. This is okay for intended uses of
+ * estimate_expression_value().
*/
int16 typLen;
bool typByVal;
/*
* Reduce constants in the FuncExpr's arguments. We know args is
- * either NIL or a List node, so we can call
- * expression_tree_mutator directly rather than recursing to self.
+ * either NIL or a List node, so we can call expression_tree_mutator
+ * directly rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
- eval_const_expressions_mutator,
+ eval_const_expressions_mutator,
(void *) context);
/*
- * Code for op/func reduction is pretty bulky, so split it out as
- * a separate function.
+ * Code for op/func reduction is pretty bulky, so split it out as a
+ * separate function.
*/
simple = simplify_function(expr->funcid, expr->funcresulttype, args,
true, context);
/*
* The expression cannot be simplified any further, so build and
- * return a replacement FuncExpr node using the
- * possibly-simplified arguments.
+ * return a replacement FuncExpr node using the possibly-simplified
+ * arguments.
*/
newexpr = makeNode(FuncExpr);
newexpr->funcid = expr->funcid;
OpExpr *newexpr;
/*
- * Reduce constants in the OpExpr's arguments. We know args is
- * either NIL or a List node, so we can call
- * expression_tree_mutator directly rather than recursing to self.
+ * Reduce constants in the OpExpr's arguments. We know args is either
+ * NIL or a List node, so we can call expression_tree_mutator directly
+ * rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
- eval_const_expressions_mutator,
+ eval_const_expressions_mutator,
(void *) context);
/*
- * Need to get OID of underlying function. Okay to scribble on
- * input to this extent.
+ * Need to get OID of underlying function. Okay to scribble on input
+ * to this extent.
*/
set_opfuncid(expr);
/*
- * Code for op/func reduction is pretty bulky, so split it out as
- * a separate function.
+ * Code for op/func reduction is pretty bulky, so split it out as a
+ * separate function.
*/
simple = simplify_function(expr->opfuncid, expr->opresulttype, args,
true, context);
return (Node *) simple;
/*
- * If the operator is boolean equality, we know how to simplify
- * cases involving one constant and one non-constant argument.
+ * If the operator is boolean equality, we know how to simplify cases
+ * involving one constant and one non-constant argument.
*/
if (expr->opno == BooleanEqualOperator)
{
DistinctExpr *newexpr;
/*
- * Reduce constants in the DistinctExpr's arguments. We know args
- * is either NIL or a List node, so we can call
- * expression_tree_mutator directly rather than recursing to self.
+ * Reduce constants in the DistinctExpr's arguments. We know args is
+ * either NIL or a List node, so we can call expression_tree_mutator
+ * directly rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
- eval_const_expressions_mutator,
+ eval_const_expressions_mutator,
(void *) context);
/*
* We must do our own check for NULLs because DistinctExpr has
- * different results for NULL input than the underlying operator
- * does.
+ * different results for NULL input than the underlying operator does.
*/
foreach(arg, args)
{
/* (NOT okay to try to inline it, though!) */
/*
- * Need to get OID of underlying function. Okay to scribble
- * on input to this extent.
+ * Need to get OID of underlying function. Okay to scribble on
+ * input to this extent.
*/
- set_opfuncid((OpExpr *) expr); /* rely on struct
- * equivalence */
+ set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
/*
- * Code for op/func reduction is pretty bulky, so split it out
- * as a separate function.
+ * Code for op/func reduction is pretty bulky, so split it out as
+ * a separate function.
*/
simple = simplify_function(expr->opfuncid, expr->opresulttype,
args, false, context);
bool forceTrue = false;
newargs = simplify_or_arguments(expr->args, context,
- &haveNull, &forceTrue);
+ &haveNull, &forceTrue);
if (forceTrue)
return makeBoolConst(true, false);
if (haveNull)
bool forceFalse = false;
newargs = simplify_and_arguments(expr->args, context,
- &haveNull, &forceFalse);
+ &haveNull, &forceFalse);
if (forceFalse)
return makeBoolConst(false, false);
if (haveNull)
/*
* Return a SubPlan unchanged --- too late to do anything with it.
*
- * XXX should we ereport() here instead? Probably this routine
- * should never be invoked after SubPlan creation.
+ * XXX should we ereport() here instead? Probably this routine should
+ * never be invoked after SubPlan creation.
*/
return node;
}
if (IsA(node, RelabelType))
{
/*
- * If we can simplify the input to a constant, then we don't need
- * the RelabelType node anymore: just change the type field of the
- * Const node. Otherwise, must copy the RelabelType node.
+ * If we can simplify the input to a constant, then we don't need the
+ * RelabelType node anymore: just change the type field of the Const
+ * node. Otherwise, must copy the RelabelType node.
*/
RelabelType *relabel = (RelabelType *) node;
Node *arg;
context);
/*
- * If we find stacked RelabelTypes (eg, from foo :: int :: oid) we
- * can discard all but the top one.
+ * If we find stacked RelabelTypes (eg, from foo :: int :: oid) we can
+ * discard all but the top one.
*/
while (arg && IsA(arg, RelabelType))
arg = (Node *) ((RelabelType *) arg)->arg;
con->consttype = relabel->resulttype;
/*
- * relabel's resulttypmod is discarded, which is OK for now;
- * if the type actually needs a runtime length coercion then
- * there should be a function call to do it just above this
- * node.
+ * relabel's resulttypmod is discarded, which is OK for now; if
+ * the type actually needs a runtime length coercion then there
+ * should be a function call to do it just above this node.
*/
return (Node *) con;
}
/*
* Found a TRUE condition, so none of the remaining alternatives
- * can be reached. We treat the result as the default result.
+ * can be reached. We treat the result as the default result.
*/
defresult = caseresult;
break;
if (IsA(node, CaseTestExpr))
{
/*
- * If we know a constant test value for the current CASE
- * construct, substitute it for the placeholder. Else just
- * return the placeholder as-is.
+ * If we know a constant test value for the current CASE construct,
+ * substitute it for the placeholder. Else just return the
+ * placeholder as-is.
*/
if (context->case_val)
return copyObject(context->case_val);
if (IsA(node, FieldSelect))
{
/*
- * We can optimize field selection from a whole-row Var into a
- * simple Var. (This case won't be generated directly by the
- * parser, because ParseComplexProjection short-circuits it. But
- * it can arise while simplifying functions.) Also, we can
- * optimize field selection from a RowExpr construct.
+ * We can optimize field selection from a whole-row Var into a simple
+ * Var. (This case won't be generated directly by the parser, because
+ * ParseComplexProjection short-circuits it. But it can arise while
+ * simplifying functions.) Also, we can optimize field selection from
+ * a RowExpr construct.
*
- * We must however check that the declared type of the field is still
- * the same as when the FieldSelect was created --- this can
- * change if someone did ALTER COLUMN TYPE on the rowtype.
+ * We must however check that the declared type of the field is still the
+ * same as when the FieldSelect was created --- this can change if
+ * someone did ALTER COLUMN TYPE on the rowtype.
*/
FieldSelect *fselect = (FieldSelect *) node;
FieldSelect *newfselect;
fselect->fieldnum <= list_length(rowexpr->args))
{
Node *fld = (Node *) list_nth(rowexpr->args,
- fselect->fieldnum - 1);
+ fselect->fieldnum - 1);
if (rowtype_field_matches(rowexpr->row_typeid,
fselect->fieldnum,
/*
* For any node type not handled above, we recurse using
- * expression_tree_mutator, which will copy the node unchanged but try
- * to simplify its arguments (if any) using this routine. For example:
- * we cannot eliminate an ArrayRef node, but we might be able to
- * simplify constant expressions in its subscripts.
+ * expression_tree_mutator, which will copy the node unchanged but try to
+ * simplify its arguments (if any) using this routine. For example: we
+ * cannot eliminate an ArrayRef node, but we might be able to simplify
+ * constant expressions in its subscripts.
*/
return expression_tree_mutator(node, eval_const_expressions_mutator,
(void *) context);
/*
* Since the parser considers OR to be a binary operator, long OR lists
* become deeply nested expressions. We must flatten these into long
- * argument lists of a single OR operator. To avoid blowing out the stack
+ * argument lists of a single OR operator. To avoid blowing out the stack
* with recursion of eval_const_expressions, we resort to some tenseness
* here: we keep a list of not-yet-processed inputs, and handle flattening
* of nested ORs by prepending to the to-do list instead of recursing.
/* flatten nested ORs as per above comment */
if (or_clause(arg))
{
- List *subargs = list_copy(((BoolExpr *) arg)->args);
+ List *subargs = list_copy(((BoolExpr *) arg)->args);
/* overly tense code to avoid leaking unused list header */
if (!unprocessed_args)
unprocessed_args = subargs;
else
{
- List *oldhdr = unprocessed_args;
+ List *oldhdr = unprocessed_args;
unprocessed_args = list_concat(subargs, unprocessed_args);
pfree(oldhdr);
arg = eval_const_expressions_mutator(arg, context);
/*
- * It is unlikely but not impossible for simplification of a
- * non-OR clause to produce an OR. Recheck, but don't be
- * too tense about it since it's not a mainstream case.
- * In particular we don't worry about const-simplifying
- * the input twice.
+ * It is unlikely but not impossible for simplification of a non-OR
+ * clause to produce an OR. Recheck, but don't be too tense about it
+ * since it's not a mainstream case. In particular we don't worry
+ * about const-simplifying the input twice.
*/
if (or_clause(arg))
{
- List *subargs = list_copy(((BoolExpr *) arg)->args);
+ List *subargs = list_copy(((BoolExpr *) arg)->args);
unprocessed_args = list_concat(subargs, unprocessed_args);
continue;
}
/*
- * OK, we have a const-simplified non-OR argument. Process it
- * per comments above.
+ * OK, we have a const-simplified non-OR argument. Process it per
+ * comments above.
*/
if (IsA(arg, Const))
{
/* flatten nested ANDs as per above comment */
if (and_clause(arg))
{
- List *subargs = list_copy(((BoolExpr *) arg)->args);
+ List *subargs = list_copy(((BoolExpr *) arg)->args);
/* overly tense code to avoid leaking unused list header */
if (!unprocessed_args)
unprocessed_args = subargs;
else
{
- List *oldhdr = unprocessed_args;
+ List *oldhdr = unprocessed_args;
unprocessed_args = list_concat(subargs, unprocessed_args);
pfree(oldhdr);
arg = eval_const_expressions_mutator(arg, context);
/*
- * It is unlikely but not impossible for simplification of a
- * non-AND clause to produce an AND. Recheck, but don't be
- * too tense about it since it's not a mainstream case.
- * In particular we don't worry about const-simplifying
- * the input twice.
+ * It is unlikely but not impossible for simplification of a non-AND
+ * clause to produce an AND. Recheck, but don't be too tense about it
+ * since it's not a mainstream case. In particular we don't worry
+ * about const-simplifying the input twice.
*/
if (and_clause(arg))
{
- List *subargs = list_copy(((BoolExpr *) arg)->args);
+ List *subargs = list_copy(((BoolExpr *) arg)->args);
unprocessed_args = list_concat(subargs, unprocessed_args);
continue;
}
/*
- * OK, we have a const-simplified non-AND argument. Process it
- * per comments above.
+ * OK, we have a const-simplified non-AND argument. Process it per
+ * comments above.
*/
if (IsA(arg, Const))
{
{
Assert(!((Const *) leftop)->constisnull);
if (DatumGetBool(((Const *) leftop)->constvalue))
- return rightop; /* true = foo */
+ return rightop; /* true = foo */
else
return make_notclause(rightop); /* false = foo */
}
{
Assert(!((Const *) rightop)->constisnull);
if (DatumGetBool(((Const *) rightop)->constvalue))
- return leftop; /* foo = true */
+ return leftop; /* foo = true */
else
return make_notclause(leftop); /* foo = false */
}
Expr *newexpr;
/*
- * We have two strategies for simplification: either execute the
- * function to deliver a constant result, or expand in-line the body
- * of the function definition (which only works for simple
- * SQL-language functions, but that is a common case). In either case
- * we need access to the function's pg_proc tuple, so fetch it just
- * once to use in both attempts.
+ * We have two strategies for simplification: either execute the function
+ * to deliver a constant result, or expand in-line the body of the
+ * function definition (which only works for simple SQL-language
+ * functions, but that is a common case). In either case we need access
+ * to the function's pg_proc tuple, so fetch it just once to use in both
+ * attempts.
*/
func_tuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(funcid),
return NULL;
/*
- * Can't simplify if it returns RECORD. The immediate problem is that
- * it will be needing an expected tupdesc which we can't supply here.
+ * Can't simplify if it returns RECORD. The immediate problem is that it
+ * will be needing an expected tupdesc which we can't supply here.
*
* In the case where it has OUT parameters, it could get by without an
* expected tupdesc, but we still have issues: get_expr_result_type()
- * doesn't know how to extract type info from a RECORD constant, and
- * in the case of a NULL function result there doesn't seem to be any
- * clean way to fix that. In view of the likelihood of there being
- * still other gotchas, seems best to leave the function call unreduced.
+ * doesn't know how to extract type info from a RECORD constant, and in
+ * the case of a NULL function result there doesn't seem to be any clean
+ * way to fix that. In view of the likelihood of there being still other
+ * gotchas, seems best to leave the function call unreduced.
*/
if (funcform->prorettype == RECORDOID)
return NULL;
}
/*
- * If the function is strict and has a constant-NULL input, it will
- * never be called at all, so we can replace the call by a NULL
- * constant, even if there are other inputs that aren't constant, and
- * even if the function is not otherwise immutable.
+ * If the function is strict and has a constant-NULL input, it will never
+ * be called at all, so we can replace the call by a NULL constant, even
+ * if there are other inputs that aren't constant, and even if the
+ * function is not otherwise immutable.
*/
if (funcform->proisstrict && has_null_input)
return (Expr *) makeNullConst(result_type);
return NULL;
/*
- * Ordinarily we are only allowed to simplify immutable functions.
- * But for purposes of estimation, we consider it okay to simplify
- * functions that are merely stable; the risk that the result might
- * change from planning time to execution time is worth taking in
- * preference to not being able to estimate the value at all.
+ * Ordinarily we are only allowed to simplify immutable functions. But for
+ * purposes of estimation, we consider it okay to simplify functions that
+ * are merely stable; the risk that the result might change from planning
+ * time to execution time is worth taking in preference to not being able
+ * to estimate the value at all.
*/
if (funcform->provolatile == PROVOLATILE_IMMUTABLE)
- /* okay */ ;
+ /* okay */ ;
else if (context->estimate && funcform->provolatile == PROVOLATILE_STABLE)
- /* okay */ ;
+ /* okay */ ;
else
return NULL;
int i;
/*
- * Forget it if the function is not SQL-language or has other
- * showstopper properties. (The nargs check is just paranoia.)
+ * Forget it if the function is not SQL-language or has other showstopper
+ * properties. (The nargs check is just paranoia.)
*/
if (funcform->prolang != SQLlanguageId ||
funcform->prosecdef ||
return NULL;
/*
- * Setup error traceback support for ereport(). This is so that we
- * can finger the function that bad information came from.
+ * Setup error traceback support for ereport(). This is so that we can
+ * finger the function that bad information came from.
*/
sqlerrcontext.callback = sql_inline_error_callback;
sqlerrcontext.arg = func_tuple;
error_context_stack = &sqlerrcontext;
/*
- * Make a temporary memory context, so that we don't leak all the
- * stuff that parsing might create.
+ * Make a temporary memory context, so that we don't leak all the stuff
+ * that parsing might create.
*/
mycxt = AllocSetContextCreate(CurrentMemoryContext,
"inline_function",
src = DatumGetCString(DirectFunctionCall1(textout, tmp));
/*
- * We just do parsing and parse analysis, not rewriting, because
- * rewriting will not affect table-free-SELECT-only queries, which is
- * all that we care about. Also, we can punt as soon as we detect
- * more than one command in the function body.
+ * We just do parsing and parse analysis, not rewriting, because rewriting
+ * will not affect table-free-SELECT-only queries, which is all that we
+ * care about. Also, we can punt as soon as we detect more than one
+ * command in the function body.
*/
raw_parsetree_list = pg_parse_query(src);
if (list_length(raw_parsetree_list) != 1)
newexpr = (Node *) ((TargetEntry *) linitial(querytree->targetList))->expr;
/*
- * If the function has any arguments declared as polymorphic types,
- * then it wasn't type-checked at definition time; must do so now.
- * (This will raise an error if wrong, but that's okay since the
- * function would fail at runtime anyway. Note we do not try this
- * until we have verified that no rewriting was needed; that's
- * probably not important, but let's be careful.)
+ * If the function has any arguments declared as polymorphic types, then
+ * it wasn't type-checked at definition time; must do so now. (This will
+ * raise an error if wrong, but that's okay since the function would fail
+ * at runtime anyway. Note we do not try this until we have verified that
+ * no rewriting was needed; that's probably not important, but let's be
+ * careful.)
*/
if (polymorphic)
(void) check_sql_fn_retval(funcid, result_type, querytree_list, NULL);
/*
- * Additional validity checks on the expression. It mustn't return a
- * set, and it mustn't be more volatile than the surrounding function
- * (this is to avoid breaking hacks that involve pretending a function
- * is immutable when it really ain't). If the surrounding function is
- * declared strict, then the expression must contain only strict
- * constructs and must use all of the function parameters (this is
- * overkill, but an exact analysis is hard).
+ * Additional validity checks on the expression. It mustn't return a set,
+ * and it mustn't be more volatile than the surrounding function (this is
+ * to avoid breaking hacks that involve pretending a function is immutable
+ * when it really ain't). If the surrounding function is declared strict,
+ * then the expression must contain only strict constructs and must use
+ * all of the function parameters (this is overkill, but an exact analysis
+ * is hard).
*/
if (expression_returns_set(newexpr))
goto fail;
goto fail;
/*
- * We may be able to do it; there are still checks on parameter usage
- * to make, but those are most easily done in combination with the
- * actual substitution of the inputs. So start building expression
- * with inputs substituted.
+ * We may be able to do it; there are still checks on parameter usage to
+ * make, but those are most easily done in combination with the actual
+ * substitution of the inputs. So start building expression with inputs
+ * substituted.
*/
usecounts = (int *) palloc0(funcform->pronargs * sizeof(int));
newexpr = substitute_actual_parameters(newexpr, funcform->pronargs,
QualCost eval_cost;
/*
- * We define "expensive" as "contains any subplan or more than
- * 10 operators". Note that the subplan search has to be done
+ * We define "expensive" as "contains any subplan or more than 10
+ * operators". Note that the subplan search has to be done
* explicitly, since cost_qual_eval() will barf on unplanned
* subselects.
*/
}
/*
- * Whew --- we can make the substitution. Copy the modified
- * expression out of the temporary memory context, and clean up.
+ * Whew --- we can make the substitution. Copy the modified expression
+ * out of the temporary memory context, and clean up.
*/
MemoryContextSwitchTo(oldcxt);
MemoryContextDelete(mycxt);
/*
- * Recursively try to simplify the modified expression. Here we must
- * add the current function to the context list of active functions.
+ * Recursively try to simplify the modified expression. Here we must add
+ * the current function to the context list of active functions.
*/
context->active_fns = lcons_oid(funcid, context->active_fns);
newexpr = eval_const_expressions_mutator(newexpr, context);
static Node *
substitute_actual_parameters_mutator(Node *node,
- substitute_actual_parameters_context *context)
+ substitute_actual_parameters_context *context)
{
if (node == NULL)
return NULL;
/*
* And evaluate it.
*
- * It is OK to use a default econtext because none of the ExecEvalExpr()
- * code used in this situation will use econtext. That might seem
- * fortuitous, but it's not so unreasonable --- a constant expression
- * does not depend on context, by definition, n'est ce pas?
+ * It is OK to use a default econtext because none of the ExecEvalExpr() code
+ * used in this situation will use econtext. That might seem fortuitous,
+ * but it's not so unreasonable --- a constant expression does not depend
+ * on context, by definition, n'est ce pas?
*/
const_val = ExecEvalExprSwitchContext(exprstate,
GetPerTupleExprContext(estate),
ListCell *temp;
/*
- * The walker has already visited the current node, and so we need
- * only recurse into any sub-nodes it has.
+ * The walker has already visited the current node, and so we need only
+ * recurse into any sub-nodes it has.
*
- * We assume that the walker is not interested in List nodes per se, so
- * when we expect a List we just recurse directly to self without
- * bothering to call the walker.
+ * We assume that the walker is not interested in List nodes per se, so when
+ * we expect a List we just recurse directly to self without bothering to
+ * call the walker.
*/
if (node == NULL)
return false;
return true;
/*
- * Also invoke the walker on the sublink's Query node, so
- * it can recurse into the sub-query if it wants to.
+ * Also invoke the walker on the sublink's Query node, so it
+ * can recurse into the sub-query if it wants to.
*/
return walker(sublink->subselect, context);
}
void *context)
{
/*
- * The mutator has already decided not to modify the current node, but
- * we must call the mutator for any sub-nodes.
+ * The mutator has already decided not to modify the current node, but we
+ * must call the mutator for any sub-nodes.
*/
#define FLATCOPY(newnode, node, nodetype) \
MUTATE(newnode->lefthand, sublink->lefthand, List *);
/*
- * Also invoke the mutator on the sublink's Query node, so
- * it can recurse into the sub-query if it wants to.
+ * Also invoke the mutator on the sublink's Query node, so it
+ * can recurse into the sub-query if it wants to.
*/
MUTATE(newnode->subselect, sublink->subselect, Node *);
return (Node *) newnode;
case T_List:
{
/*
- * We assume the mutator isn't interested in the list
- * nodes per se, so just invoke it on each list element.
- * NOTE: this would fail badly on a list with integer
- * elements!
+ * We assume the mutator isn't interested in the list nodes
+ * per se, so just invoke it on each list element. NOTE: this
+ * would fail badly on a list with integer elements!
*/
List *resultlist;
ListCell *temp;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.124 2005/07/22 19:12:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.125 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return +1;
/*
- * If paths have the same startup cost (not at all unlikely),
- * order them by total cost.
+ * If paths have the same startup cost (not at all unlikely), order
+ * them by total cost.
*/
if (path1->total_cost < path2->total_cost)
return -1;
return -1;
/*
- * If paths have the same startup cost (not at all unlikely),
- * order them by total cost.
+ * If paths have the same startup cost (not at all unlikely), order
+ * them by total cost.
*/
if (path1->total_cost > path2->total_cost * 1.01)
return +1;
void
add_path(RelOptInfo *parent_rel, Path *new_path)
{
- bool accept_new = true; /* unless we find a superior old
- * path */
+ bool accept_new = true; /* unless we find a superior old path */
ListCell *insert_after = NULL; /* where to insert new item */
ListCell *p1_prev = NULL;
ListCell *p1;
/*
- * This is a convenient place to check for query cancel --- no part
- * of the planner goes very long without calling add_path().
+ * This is a convenient place to check for query cancel --- no part of the
+ * planner goes very long without calling add_path().
*/
CHECK_FOR_INTERRUPTS();
/*
- * Loop to check proposed new path against old paths. Note it is
- * possible for more than one old path to be tossed out because
- * new_path dominates it.
+ * Loop to check proposed new path against old paths. Note it is possible
+ * for more than one old path to be tossed out because new_path dominates
+ * it.
*/
p1 = list_head(parent_rel->pathlist); /* cannot use foreach here */
while (p1 != NULL)
int costcmp;
/*
- * As of Postgres 8.0, we use fuzzy cost comparison to avoid
- * wasting cycles keeping paths that are really not significantly
- * different in cost.
+ * As of Postgres 8.0, we use fuzzy cost comparison to avoid wasting
+ * cycles keeping paths that are really not significantly different in
+ * cost.
*/
costcmp = compare_fuzzy_path_costs(new_path, old_path, TOTAL_COST);
/*
- * If the two paths compare differently for startup and total
- * cost, then we want to keep both, and we can skip the (much
- * slower) comparison of pathkeys. If they compare the same,
- * proceed with the pathkeys comparison. Note: this test relies
- * on the fact that compare_fuzzy_path_costs will only return 0 if
- * both costs are effectively equal (and, therefore, there's no
- * need to call it twice in that case).
+ * If the two paths compare differently for startup and total cost,
+ * then we want to keep both, and we can skip the (much slower)
+ * comparison of pathkeys. If they compare the same, proceed with the
+ * pathkeys comparison. Note: this test relies on the fact that
+ * compare_fuzzy_path_costs will only return 0 if both costs are
+ * effectively equal (and, therefore, there's no need to call it twice
+ * in that case).
*/
if (costcmp == 0 ||
costcmp == compare_fuzzy_path_costs(new_path, old_path,
else
{
/*
- * Same pathkeys, and fuzzily the same cost, so
- * keep just one --- but we'll do an exact cost
- * comparison to decide which.
+ * Same pathkeys, and fuzzily the same cost, so keep
+ * just one --- but we'll do an exact cost comparison
+ * to decide which.
*/
if (compare_path_costs(new_path, old_path,
TOTAL_COST) < 0)
remove_old = true; /* new dominates old */
else
- accept_new = false; /* old equals or dominates
- * new */
+ accept_new = false; /* old equals or dominates new */
}
break;
case PATHKEYS_BETTER1:
{
parent_rel->pathlist = list_delete_cell(parent_rel->pathlist,
p1, p1_prev);
+
/*
* Delete the data pointed-to by the deleted cell, if possible
*/
/*
* For a join inner scan, there's no point in marking the path with any
* pathkeys, since it will only ever be used as the inner path of a
- * nestloop, and so its ordering does not matter. For the same reason
- * we don't really care what order it's scanned in. (We could expect
- * the caller to supply the correct values, but it's easier to force
- * it here.)
+ * nestloop, and so its ordering does not matter. For the same reason we
+ * don't really care what order it's scanned in. (We could expect the
+ * caller to supply the correct values, but it's easier to force it here.)
*/
if (isjoininner)
{
/*
* We must compute the estimated number of output rows for the
* indexscan. This is less than rel->rows because of the additional
- * selectivity of the join clauses. Since clause_groups may
- * contain both restriction and join clauses, we have to do a set
- * union to get the full set of clauses that must be considered to
- * compute the correct selectivity. (Without the union operation,
- * we might have some restriction clauses appearing twice, which'd
- * mislead clauselist_selectivity into double-counting their
- * selectivity. However, since RestrictInfo nodes aren't copied when
- * linking them into different lists, it should be sufficient to use
- * pointer comparison to remove duplicates.)
+ * selectivity of the join clauses. Since clause_groups may contain
+ * both restriction and join clauses, we have to do a set union to get
+ * the full set of clauses that must be considered to compute the
+ * correct selectivity. (Without the union operation, we might have
+ * some restriction clauses appearing twice, which'd mislead
+ * clauselist_selectivity into double-counting their selectivity.
+ * However, since RestrictInfo nodes aren't copied when linking them
+ * into different lists, it should be sufficient to use pointer
+ * comparison to remove duplicates.)
*
* Always assume the join type is JOIN_INNER; even if some of the join
* clauses come from other contexts, that's not our problem.
pathnode->rows = rel->tuples *
clauselist_selectivity(root,
allclauses,
- rel->relid, /* do not use 0! */
+ rel->relid, /* do not use 0! */
JOIN_INNER);
/* Like costsize.c, force estimate to be at least one row */
pathnode->rows = clamp_row_est(pathnode->rows);
else
{
/*
- * The number of rows is the same as the parent rel's estimate,
- * since this isn't a join inner indexscan.
+ * The number of rows is the same as the parent rel's estimate, since
+ * this isn't a join inner indexscan.
*/
pathnode->rows = rel->rows;
}
pathnode->path.pathtype = T_BitmapHeapScan;
pathnode->path.parent = rel;
- pathnode->path.pathkeys = NIL; /* always unordered */
+ pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->bitmapqual = bitmapqual;
pathnode->isjoininner = isjoininner;
* We must compute the estimated number of output rows for the
* indexscan. This is less than rel->rows because of the additional
* selectivity of the join clauses. We make use of the selectivity
- * estimated for the bitmap to do this; this isn't really quite
- * right since there may be restriction conditions not included
- * in the bitmap ...
+ * estimated for the bitmap to do this; this isn't really quite right
+ * since there may be restriction conditions not included in the
+ * bitmap ...
*/
Cost indexTotalCost;
Selectivity indexSelectivity;
else
{
/*
- * The number of rows is the same as the parent rel's estimate,
- * since this isn't a join inner indexscan.
+ * The number of rows is the same as the parent rel's estimate, since
+ * this isn't a join inner indexscan.
*/
pathnode->rows = rel->rows;
}
pathnode->path.pathtype = T_BitmapAnd;
pathnode->path.parent = rel;
- pathnode->path.pathkeys = NIL; /* always unordered */
+ pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->bitmapquals = bitmapquals;
pathnode->path.pathtype = T_BitmapOr;
pathnode->path.parent = rel;
- pathnode->path.pathkeys = NIL; /* always unordered */
+ pathnode->path.pathkeys = NIL; /* always unordered */
pathnode->bitmapquals = bitmapquals;
return (UniquePath *) rel->cheapest_unique_path;
/*
- * We must ensure path struct is allocated in same context as parent
- * rel; otherwise GEQO memory management causes trouble. (Compare
+ * We must ensure path struct is allocated in same context as parent rel;
+ * otherwise GEQO memory management causes trouble. (Compare
* best_inner_indexscan().)
*/
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
pathnode->path.parent = rel;
/*
- * Treat the output as always unsorted, since we don't necessarily
- * have pathkeys to represent it.
+ * Treat the output as always unsorted, since we don't necessarily have
+ * pathkeys to represent it.
*/
pathnode->path.pathkeys = NIL;
pathnode->subpath = subpath;
/*
- * Try to identify the targetlist that will actually be unique-ified.
- * In current usage, this routine is only used for sub-selects of IN
- * clauses, so we should be able to find the tlist in in_info_list.
+ * Try to identify the targetlist that will actually be unique-ified. In
+ * current usage, this routine is only used for sub-selects of IN clauses,
+ * so we should be able to find the tlist in in_info_list.
*/
sub_targetlist = NIL;
foreach(l, root->in_info_list)
}
/*
- * If the input is a subquery whose output must be unique already,
- * then we don't need to do anything. The test for uniqueness has
- * to consider exactly which columns we are extracting; for example
- * "SELECT DISTINCT x,y" doesn't guarantee that x alone is distinct.
- * So we cannot check for this optimization unless we found our own
- * targetlist above, and it consists only of simple Vars referencing
- * subquery outputs. (Possibly we could do something with expressions
- * in the subquery outputs, too, but for now keep it simple.)
+ * If the input is a subquery whose output must be unique already, then we
+ * don't need to do anything. The test for uniqueness has to consider
+ * exactly which columns we are extracting; for example "SELECT DISTINCT
+ * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
+ * this optimization unless we found our own targetlist above, and it
+ * consists only of simple Vars referencing subquery outputs. (Possibly
+ * we could do something with expressions in the subquery outputs, too,
+ * but for now keep it simple.)
*/
if (sub_targetlist && rel->rtekind == RTE_SUBQUERY)
{
RangeTblEntry *rte = rt_fetch(rel->relid, root->parse->rtable);
- List *sub_tlist_colnos;
+ List *sub_tlist_colnos;
sub_tlist_colnos = translate_sub_tlist(sub_targetlist, rel->relid);
rel->width);
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples. (XXX
- * probably this is an overestimate.) This should agree with
- * make_unique.
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples. (XXX probably this is
+ * an overestimate.) This should agree with make_unique.
*/
sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
/*
- * Is it safe to use a hashed implementation? If so, estimate and
- * compare costs. We only try this if we know the targetlist for sure
- * (else we can't be sure about the datatypes involved).
+ * Is it safe to use a hashed implementation? If so, estimate and compare
+ * costs. We only try this if we know the targetlist for sure (else we
+ * can't be sure about the datatypes involved).
*/
pathnode->umethod = UNIQUE_PATH_SORT;
if (enable_hashagg && sub_targetlist && hash_safe_tlist(sub_targetlist))
{
/*
- * Estimate the overhead per hashtable entry at 64 bytes (same as
- * in planner.c).
+ * Estimate the overhead per hashtable entry at 64 bytes (same as in
+ * planner.c).
*/
int hashentrysize = rel->width + 64;
foreach(l, tlist)
{
- Var *var = (Var *) lfirst(l);
+ Var *var = (Var *) lfirst(l);
if (!var || !IsA(var, Var) ||
var->varno != relid)
else
{
/*
- * If we have no GROUP BY, but do have aggregates or HAVING, then
- * the result is at most one row so it's surely unique.
+ * If we have no GROUP BY, but do have aggregates or HAVING, then the
+ * result is at most one row so it's surely unique.
*/
if (query->hasAggs || query->havingQual)
return true;
MergePath *pathnode = makeNode(MergePath);
/*
- * If the given paths are already well enough ordered, we can skip
- * doing an explicit sort.
+ * If the given paths are already well enough ordered, we can skip doing
+ * an explicit sort.
*/
if (outersortkeys &&
pathkeys_contained_in(outersortkeys, outer_path->pathkeys))
innersortkeys = NIL;
/*
- * If we are not sorting the inner path, we may need a materialize
- * node to ensure it can be marked/restored. (Sort does support
- * mark/restore, so no materialize is needed in that case.)
+ * If we are not sorting the inner path, we may need a materialize node to
+ * ensure it can be marked/restored. (Sort does support mark/restore, so
+ * no materialize is needed in that case.)
*
- * Since the inner side must be ordered, and only Sorts and IndexScans
- * can create order to begin with, you might think there's no problem
- * --- but you'd be wrong. Nestloop and merge joins can *preserve*
- * the order of their inputs, so they can be selected as the input of
- * a mergejoin, and they don't support mark/restore at present.
+ * Since the inner side must be ordered, and only Sorts and IndexScans can
+ * create order to begin with, you might think there's no problem --- but
+ * you'd be wrong. Nestloop and merge joins can *preserve* the order of
+ * their inputs, so they can be selected as the input of a mergejoin, and
+ * they don't support mark/restore at present.
*/
if (innersortkeys == NIL &&
!ExecSupportsMarkRestore(inner_path->pathtype))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.113 2005/07/23 21:05:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.114 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void estimate_rel_size(Relation rel, int32 *attr_widths,
- BlockNumber *pages, double *tuples);
+ BlockNumber *pages, double *tuples);
/*
/*
* Normally, we can assume the rewriter already acquired at least
- * AccessShareLock on each relation used in the query. However this
- * will not be the case for relations added to the query because they
- * are inheritance children of some relation mentioned explicitly.
- * For them, this is the first access during the parse/rewrite/plan
- * pipeline, and so we need to obtain and keep a suitable lock.
+ * AccessShareLock on each relation used in the query. However this will
+ * not be the case for relations added to the query because they are
+ * inheritance children of some relation mentioned explicitly. For them,
+ * this is the first access during the parse/rewrite/plan pipeline, and so
+ * we need to obtain and keep a suitable lock.
*
- * XXX really, a suitable lock is RowShareLock if the relation is
- * an UPDATE/DELETE target, and AccessShareLock otherwise. However
- * we cannot easily tell here which to get, so for the moment just
- * get AccessShareLock always. The executor will get the right lock
- * when it runs, which means there is a very small chance of deadlock
- * trying to upgrade our lock.
+ * XXX really, a suitable lock is RowShareLock if the relation is an
+ * UPDATE/DELETE target, and AccessShareLock otherwise. However we cannot
+ * easily tell here which to get, so for the moment just get
+ * AccessShareLock always. The executor will get the right lock when it
+ * runs, which means there is a very small chance of deadlock trying to
+ * upgrade our lock.
*/
if (rel->reloptkind == RELOPT_BASEREL)
relation = heap_open(relationObjectId, NoLock);
&rel->pages, &rel->tuples);
/*
- * Make list of indexes. Ignore indexes on system catalogs if told
- * to.
+ * Make list of indexes. Ignore indexes on system catalogs if told to.
*/
if (IsIgnoringSystemIndexes() && IsSystemClass(relation->rd_rel))
hasindex = false;
/*
* Extract info from the relation descriptor for the index.
*
- * Note that we take no lock on the index; we assume our lock on
- * the parent table will protect the index's schema information.
- * When and if the executor actually uses the index, it will take
- * a lock as needed to protect the access to the index contents.
+ * Note that we take no lock on the index; we assume our lock on the
+ * parent table will protect the index's schema information. When
+ * and if the executor actually uses the index, it will take a
+ * lock as needed to protect the access to the index contents.
*/
indexRelation = index_open(indexoid);
index = indexRelation->rd_index;
info->ncolumns = ncolumns = index->indnatts;
/*
- * Need to make classlist and ordering arrays large enough to
- * put a terminating 0 at the end of each one.
+ * Need to make classlist and ordering arrays large enough to put
+ * a terminating 0 at the end of each one.
*/
info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
info->classlist = (Oid *) palloc0(sizeof(Oid) * (ncolumns + 1));
info->amoptionalkey = indexRelation->rd_am->amoptionalkey;
/*
- * Fetch the ordering operators associated with the index, if
- * any.
+ * Fetch the ordering operators associated with the index, if any.
*/
amorderstrategy = indexRelation->rd_am->amorderstrategy;
if (amorderstrategy != 0)
/*
* Fetch the index expressions and predicate, if any. We must
* modify the copies we obtain from the relcache to have the
- * correct varno for the parent relation, so that they match
- * up correctly against qual clauses.
+ * correct varno for the parent relation, so that they match up
+ * correctly against qual clauses.
*/
info->indexprs = RelationGetIndexExpressions(indexRelation);
info->indpred = RelationGetIndexPredicate(indexRelation);
info->unique = index->indisunique;
/*
- * Estimate the index size. If it's not a partial index, we
- * lock the number-of-tuples estimate to equal the parent table;
- * if it is partial then we have to use the same methods as we
- * would for a table, except we can be sure that the index is
- * not larger than the table.
+ * Estimate the index size. If it's not a partial index, we lock
+ * the number-of-tuples estimate to equal the parent table; if it
+ * is partial then we have to use the same methods as we would for
+ * a table, except we can be sure that the index is not larger
+ * than the table.
*/
if (info->indpred == NIL)
{
estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples)
{
- BlockNumber curpages;
- BlockNumber relpages;
+ BlockNumber curpages;
+ BlockNumber relpages;
double reltuples;
double density;
/*
* HACK: if the relation has never yet been vacuumed, use a
- * minimum estimate of 10 pages. This emulates a desirable
- * aspect of pre-8.0 behavior, which is that we wouldn't assume
- * a newly created relation is really small, which saves us from
- * making really bad plans during initial data loading. (The
- * plans are not wrong when they are made, but if they are cached
- * and used again after the table has grown a lot, they are bad.)
- * It would be better to force replanning if the table size has
- * changed a lot since the plan was made ... but we don't
- * currently have any infrastructure for redoing cached plans at
- * all, so we have to kluge things here instead.
+ * minimum estimate of 10 pages. This emulates a desirable aspect
+ * of pre-8.0 behavior, which is that we wouldn't assume a newly
+ * created relation is really small, which saves us from making
+ * really bad plans during initial data loading. (The plans are
+ * not wrong when they are made, but if they are cached and used
+ * again after the table has grown a lot, they are bad.) It would
+ * be better to force replanning if the table size has changed a
+ * lot since the plan was made ... but we don't currently have any
+ * infrastructure for redoing cached plans at all, so we have to
+ * kluge things here instead.
*
- * We approximate "never vacuumed" by "has relpages = 0", which
- * means this will also fire on genuinely empty relations. Not
- * great, but fortunately that's a seldom-seen case in the real
- * world, and it shouldn't degrade the quality of the plan too
- * much anyway to err in this direction.
+ * We approximate "never vacuumed" by "has relpages = 0", which means
+ * this will also fire on genuinely empty relations. Not great,
+ * but fortunately that's a seldom-seen case in the real world,
+ * and it shouldn't degrade the quality of the plan too much
+ * anyway to err in this direction.
*/
if (curpages < 10 && rel->rd_rel->relpages == 0)
curpages = 10;
/* coerce values in pg_class to more desirable types */
relpages = (BlockNumber) rel->rd_rel->relpages;
reltuples = (double) rel->rd_rel->reltuples;
+
/*
* If it's an index, discount the metapage. This is a kluge
* because it assumes more than it ought to about index contents;
* When we have no data because the relation was truncated,
* estimate tuple width from attribute datatypes. We assume
* here that the pages are completely full, which is OK for
- * tables (since they've presumably not been VACUUMed yet)
- * but is probably an overestimate for indexes. Fortunately
+ * tables (since they've presumably not been VACUUMed yet) but
+ * is probably an overestimate for indexes. Fortunately
* get_relation_info() can clamp the overestimate to the
* parent table's size.
*
* Note: this code intentionally disregards alignment
- * considerations, because (a) that would be gilding the
- * lily considering how crude the estimate is, and (b)
- * it creates platform dependencies in the default plans
- * which are kind of a headache for regression testing.
+ * considerations, because (a) that would be gilding the lily
+ * considering how crude the estimate is, and (b) it creates
+ * platform dependencies in the default plans which are kind
+ * of a headache for regression testing.
*/
- int32 tuple_width = 0;
- int i;
+ int32 tuple_width = 0;
+ int i;
for (i = 1; i <= RelationGetNumberOfAttributes(rel); i++)
{
constr = relation->rd_att->constr;
if (constr != NULL)
{
- int num_check = constr->num_check;
- int i;
+ int num_check = constr->num_check;
+ int i;
for (i = 0; i < num_check; i++)
{
- Node *cexpr;
+ Node *cexpr;
cexpr = stringToNode(constr->check[i].ccbin);
ChangeVarNodes(cexpr, 1, varno, 0);
/*
- * Finally, convert to implicit-AND format (that is, a List)
- * and append the resulting item(s) to our output list.
+ * Finally, convert to implicit-AND format (that is, a List) and
+ * append the resulting item(s) to our output list.
*/
result = list_concat(result,
make_ands_implicit((Expr *) cexpr));
break;
case RTE_FUNCTION:
- expandRTE(rte, varno, 0, true /* include dropped */,
+ expandRTE(rte, varno, 0, true /* include dropped */ ,
NULL, &colvars);
foreach(l, colvars)
{
var = (Var *) lfirst(l);
+
/*
* A non-Var in expandRTE's output means a dropped column;
* must punt.
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
/*
- * Note: ignore partial indexes, since they don't allow us to
- * conclude that all attr values are distinct. We don't take any
- * interest in expressional indexes either. Also, a multicolumn
- * unique index doesn't allow us to conclude that just the
- * specified attr is unique.
+ * Note: ignore partial indexes, since they don't allow us to conclude
+ * that all attr values are distinct. We don't take any interest in
+ * expressional indexes either. Also, a multicolumn unique index
+ * doesn't allow us to conclude that just the specified attr is
+ * unique.
*/
if (index->unique &&
index->ncolumns == 1 &&
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.3 2005/10/06 16:01:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.4 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool predicate_implied_by_simple_clause(Expr *predicate, Node *clause);
static bool predicate_refuted_by_simple_clause(Expr *predicate, Node *clause);
static bool btree_predicate_proof(Expr *predicate, Node *clause,
- bool refute_it);
+ bool refute_it);
/*
/*
* In all cases where the predicate is an AND-clause,
* predicate_implied_by_recurse() will prefer to iterate over the
- * predicate's components. So we can just do that to start with here,
- * and eliminate the need for predicate_implied_by_recurse() to handle
- * a bare List on the predicate side.
+ * predicate's components. So we can just do that to start with here, and
+ * eliminate the need for predicate_implied_by_recurse() to handle a bare
+ * List on the predicate side.
*
* Logic is: restriction must imply each of the AND'ed predicate items.
*/
return false; /* no restriction: refutation must fail */
/*
- * Unlike the implication case, predicate_refuted_by_recurse needs to
- * be able to see the top-level AND structure on both sides --- otherwise
- * it will fail to handle the case where one restriction clause is an OR
- * that can refute the predicate AND as a whole, but not each predicate
- * clause separately.
+ * Unlike the implication case, predicate_refuted_by_recurse needs to be
+ * able to see the top-level AND structure on both sides --- otherwise it
+ * will fail to handle the case where one restriction clause is an OR that
+ * can refute the predicate AND as a whole, but not each predicate clause
+ * separately.
*/
return predicate_refuted_by_recurse((Node *) restrictinfo_list,
(Node *) predicate_list);
* OR-expr A => AND-expr B iff: A => each of B's components
* OR-expr A => OR-expr B iff: each of A's components => any of B's
*
- * An "atom" is anything other than an AND or OR node. Notice that we don't
+ * An "atom" is anything other than an AND or OR node. Notice that we don't
* have any special logic to handle NOT nodes; these should have been pushed
* down or eliminated where feasible by prepqual.c.
*
* under the assumption that both inputs have been AND/OR flattened.
*
* A bare List node on the restriction side is interpreted as an AND clause,
- * in order to handle the top-level restriction List properly. However we
+ * in order to handle the top-level restriction List properly. However we
* need not consider a List on the predicate side since predicate_implied_by()
* already expanded it.
*
if (or_clause(predicate))
{
/*
- * OR-clause => OR-clause if each of A's items implies any of
- * B's items. Messy but can't do it any more simply.
+ * OR-clause => OR-clause if each of A's items implies any of B's
+ * items. Messy but can't do it any more simply.
*/
foreach(item, ((BoolExpr *) clause)->args)
{
break;
}
if (item2 == NULL)
- return false; /* doesn't imply any of B's */
+ return false; /* doesn't imply any of B's */
}
return true;
}
*
* When the predicate is of the form "foo IS NULL", we can conclude that
* the predicate is refuted if the clause is a strict operator or function
- * that has "foo" as an input. See notes for implication case.
+ * that has "foo" as an input. See notes for implication case.
*
* Finally, we may be able to deduce something using knowledge about btree
* operator classes; this is encapsulated in btree_predicate_proof().
/*
* The target operator:
*
- * LT LE EQ GE GT NE
+ * LT LE EQ GE GT NE
*/
- {BTGE, BTGE, 0 , 0 , 0 , BTGE}, /* LT */
- {BTGT, BTGE, 0 , 0 , 0 , BTGT}, /* LE */
- {BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
- {0 , 0 , 0 , BTLE, BTLT, BTLT}, /* GE */
- {0 , 0 , 0 , BTLE, BTLE, BTLE}, /* GT */
- {0 , 0 , 0 , 0 , 0 , BTEQ} /* NE */
+ {BTGE, BTGE, 0, 0, 0, BTGE}, /* LT */
+ {BTGT, BTGE, 0, 0, 0, BTGT}, /* LE */
+ {BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
+ {0, 0, 0, BTLE, BTLT, BTLT}, /* GE */
+ {0, 0, 0, BTLE, BTLE, BTLE}, /* GT */
+ {0, 0, 0, 0, 0, BTEQ} /* NE */
};
static const StrategyNumber BT_refute_table[6][6] = {
/*
* The target operator:
*
- * LT LE EQ GE GT NE
+ * LT LE EQ GE GT NE
*/
- {0 , 0 , BTGE, BTGE, BTGE, 0 }, /* LT */
- {0 , 0 , BTGT, BTGT, BTGE, 0 }, /* LE */
- {BTLE, BTLT, BTNE, BTGT, BTGE, BTEQ}, /* EQ */
- {BTLE, BTLT, BTLT, 0 , 0 , 0 }, /* GE */
- {BTLE, BTLE, BTLE, 0 , 0 , 0 }, /* GT */
- {0 , 0 , BTEQ, 0 , 0 , 0 } /* NE */
+ {0, 0, BTGE, BTGE, BTGE, 0}, /* LT */
+ {0, 0, BTGT, BTGT, BTGE, 0}, /* LE */
+ {BTLE, BTLT, BTNE, BTGT, BTGE, BTEQ}, /* EQ */
+ {BTLE, BTLT, BTLT, 0, 0, 0}, /* GE */
+ {BTLE, BTLE, BTLE, 0, 0, 0}, /* GT */
+ {0, 0, BTEQ, 0, 0, 0} /* NE */
};
MemoryContext oldcontext;
/*
- * Both expressions must be binary opclauses with a
- * Const on one side, and identical subexpressions on the other sides.
- * Note we don't have to think about binary relabeling of the Const
- * node, since that would have been folded right into the Const.
+ * Both expressions must be binary opclauses with a Const on one side, and
+ * identical subexpressions on the other sides. Note we don't have to
+ * think about binary relabeling of the Const node, since that would have
+ * been folded right into the Const.
*
- * If either Const is null, we also fail right away; this assumes that
- * the test operator will always be strict.
+ * If either Const is null, we also fail right away; this assumes that the
+ * test operator will always be strict.
*/
if (!is_opclause(predicate))
return false;
return false;
/*
- * Check for matching subexpressions on the non-Const sides. We used
- * to only allow a simple Var, but it's about as easy to allow any
- * expression. Remember we already know that the pred expression does
- * not contain any non-immutable functions, so identical expressions
- * should yield identical results.
+ * Check for matching subexpressions on the non-Const sides. We used to
+ * only allow a simple Var, but it's about as easy to allow any
+ * expression. Remember we already know that the pred expression does not
+ * contain any non-immutable functions, so identical expressions should
+ * yield identical results.
*/
if (!equal(pred_var, clause_var))
return false;
*
* We must find a btree opclass that contains both operators, else the
* implication can't be determined. Also, the pred_op has to be of
- * default subtype (implying left and right input datatypes are the
- * same); otherwise it's unsafe to put the pred_const on the left side
- * of the test. Also, the opclass must contain a suitable test
- * operator matching the clause_const's type (which we take to mean
- * that it has the same subtype as the original clause_operator).
+ * default subtype (implying left and right input datatypes are the same);
+ * otherwise it's unsafe to put the pred_const on the left side of the
+ * test. Also, the opclass must contain a suitable test operator matching
+ * the clause_const's type (which we take to mean that it has the same
+ * subtype as the original clause_operator).
*
* If there are multiple matching opclasses, assume we can use any one to
- * determine the logical relationship of the two operators and the
- * correct corresponding test operator. This should work for any
- * logically consistent opclasses.
+ * determine the logical relationship of the two operators and the correct
+ * corresponding test operator. This should work for any logically
+ * consistent opclasses.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(pred_op),
0, 0, 0);
/*
- * If we couldn't find any opclass containing the pred_op, perhaps it
- * is a <> operator. See if it has a negator that is in an opclass.
+ * If we couldn't find any opclass containing the pred_op, perhaps it is a
+ * <> operator. See if it has a negator that is in an opclass.
*/
pred_op_negated = false;
if (catlist->n_members == 0)
pred_op_negated = true;
ReleaseSysCacheList(catlist);
catlist = SearchSysCacheList(AMOPOPID, 1,
- ObjectIdGetDatum(pred_op_negator),
+ ObjectIdGetDatum(pred_op_negator),
0, 0, 0);
}
}
}
/*
- * From the same opclass, find a strategy number for the
- * clause_op, if possible
+ * From the same opclass, find a strategy number for the clause_op, if
+ * possible
*/
clause_tuple = SearchSysCache(AMOPOPID,
ObjectIdGetDatum(clause_op),
else if (OidIsValid(clause_op_negator))
{
clause_tuple = SearchSysCache(AMOPOPID,
- ObjectIdGetDatum(clause_op_negator),
+ ObjectIdGetDatum(clause_op_negator),
ObjectIdGetDatum(opclass_id),
0, 0);
if (HeapTupleIsValid(clause_tuple))
}
/*
- * See if opclass has an operator for the test strategy and the
- * clause datatype.
+ * See if opclass has an operator for the test strategy and the clause
+ * datatype.
*/
if (test_strategy == BTNE)
{
*
* Note that we require only the test_op to be immutable, not the
* original clause_op. (pred_op is assumed to have been checked
- * immutable by the caller.) Essentially we are assuming that
- * the opclass is consistent even if it contains operators that
- * are merely stable.
+ * immutable by the caller.) Essentially we are assuming that the
+ * opclass is consistent even if it contains operators that are
+ * merely stable.
*/
if (op_volatile(test_op) == PROVOLATILE_IMMUTABLE)
{
/* And execute it. */
test_result = ExecEvalExprSwitchContext(test_exprstate,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull, NULL);
/* Get back to outer memory context */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.71 2005/07/28 22:27:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.72 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} JoinHashEntry;
static RelOptInfo *make_reloptinfo(PlannerInfo *root, int relid,
- RelOptKind reloptkind);
+ RelOptKind reloptkind);
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
- RelOptInfo *input_rel);
+ RelOptInfo *input_rel);
static List *build_joinrel_restrictlist(PlannerInfo *root,
RelOptInfo *joinrel,
RelOptInfo *outer_rel,
/* Add the finished struct to the base_rel_array */
if (relid >= root->base_rel_array_size)
{
- int oldsize = root->base_rel_array_size;
- int newsize;
+ int oldsize = root->base_rel_array_size;
+ int newsize;
newsize = Max(oldsize * 2, relid + 1);
root->base_rel_array = (RelOptInfo **)
hashtab = hash_create("JoinRelHashTable",
256L,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
/* Insert all the already-existing joinrels */
foreach(l, root->join_rel_list)
find_join_rel(PlannerInfo *root, Relids relids)
{
/*
- * Switch to using hash lookup when list grows "too long". The threshold
+ * Switch to using hash lookup when list grows "too long". The threshold
* is arbitrary and is known only here.
*/
if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
/*
* Use either hashtable lookup or linear search, as appropriate.
*
- * Note: the seemingly redundant hashkey variable is used to avoid
- * taking the address of relids; unless the compiler is exceedingly
- * smart, doing so would force relids out of a register and thus
- * probably slow down the list-search case.
+ * Note: the seemingly redundant hashkey variable is used to avoid taking the
+ * address of relids; unless the compiler is exceedingly smart, doing so
+ * would force relids out of a register and thus probably slow down the
+ * list-search case.
*/
if (root->join_rel_hash)
{
if (joinrel)
{
/*
- * Yes, so we only need to figure the restrictlist for this
- * particular pair of component relations.
+ * Yes, so we only need to figure the restrictlist for this particular
+ * pair of component relations.
*/
if (restrictlist_ptr)
*restrictlist_ptr = build_joinrel_restrictlist(root,
joinrel->index_inner_paths = NIL;
/*
- * Create a new tlist containing just the vars that need to be output
- * from this join (ie, are needed for higher joinclauses or final
- * output).
+ * Create a new tlist containing just the vars that need to be output from
+ * this join (ie, are needed for higher joinclauses or final output).
*
- * NOTE: the tlist order for a join rel will depend on which pair of
- * outer and inner rels we first try to build it from. But the
- * contents should be the same regardless.
+ * NOTE: the tlist order for a join rel will depend on which pair of outer
+ * and inner rels we first try to build it from. But the contents should
+ * be the same regardless.
*/
build_joinrel_tlist(root, joinrel, outer_rel);
build_joinrel_tlist(root, joinrel, inner_rel);
/*
* Construct restrict and join clause lists for the new joinrel. (The
- * caller might or might not need the restrictlist, but I need it
- * anyway for set_joinrel_size_estimates().)
+ * caller might or might not need the restrictlist, but I need it anyway
+ * for set_joinrel_size_estimates().)
*/
restrictlist = build_joinrel_restrictlist(root,
joinrel,
jointype, restrictlist);
/*
- * Add the joinrel to the query's joinrel list, and store it into
- * the auxiliary hashtable if there is one. NB: GEQO requires us
- * to append the new joinrel to the end of the list!
+ * Add the joinrel to the query's joinrel list, and store it into the
+ * auxiliary hashtable if there is one. NB: GEQO requires us to append
+ * the new joinrel to the end of the list!
*/
root->join_rel_list = lappend(root->join_rel_list, joinrel);
* Collect all the clauses that syntactically belong at this level.
*/
rlist = list_concat(subbuild_joinrel_restrictlist(joinrel,
- outer_rel->joininfo),
+ outer_rel->joininfo),
subbuild_joinrel_restrictlist(joinrel,
- inner_rel->joininfo));
+ inner_rel->joininfo));
/*
* Eliminate duplicate and redundant clauses.
*
- * We must eliminate duplicates, since we will see many of the same
- * clauses arriving from both input relations. Also, if a clause is a
- * mergejoinable clause, it's possible that it is redundant with
- * previous clauses (see optimizer/README for discussion). We detect
- * that case and omit the redundant clause from the result list.
+ * We must eliminate duplicates, since we will see many of the same clauses
+ * arriving from both input relations. Also, if a clause is a
+ * mergejoinable clause, it's possible that it is redundant with previous
+ * clauses (see optimizer/README for discussion). We detect that case and
+ * omit the redundant clause from the result list.
*/
result = remove_redundant_join_clauses(root, rlist,
IS_OUTER_JOIN(jointype));
if (bms_is_subset(rinfo->required_relids, joinrel->relids))
{
/*
- * This clause becomes a restriction clause for the joinrel,
- * since it refers to no outside rels. We don't bother to
- * check for duplicates here --- build_joinrel_restrictlist
- * will do that.
+ * This clause becomes a restriction clause for the joinrel, since
+ * it refers to no outside rels. We don't bother to check for
+ * duplicates here --- build_joinrel_restrictlist will do that.
*/
restrictlist = lappend(restrictlist, rinfo);
}
else
{
/*
- * This clause is still a join clause at this level, so we
- * ignore it in this routine.
+ * This clause is still a join clause at this level, so we ignore
+ * it in this routine.
*/
}
}
if (bms_is_subset(rinfo->required_relids, joinrel->relids))
{
/*
- * This clause becomes a restriction clause for the joinrel,
- * since it refers to no outside rels. So we can ignore it
- * in this routine.
+ * This clause becomes a restriction clause for the joinrel, since
+ * it refers to no outside rels. So we can ignore it in this
+ * routine.
*/
}
else
{
/*
- * This clause is still a join clause at this level, so add
- * it to the joininfo list for the joinrel, being careful to
- * eliminate duplicates. (Since RestrictInfo nodes are normally
+ * This clause is still a join clause at this level, so add it to
+ * the joininfo list for the joinrel, being careful to eliminate
+ * duplicates. (Since RestrictInfo nodes are normally
* multiply-linked rather than copied, pointer equality should be
* a sufficient test. If two equal() nodes should happen to sneak
* in, no great harm is done --- they'll be detected by
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.40 2005/10/13 00:06:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.41 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
make_restrictinfo(Expr *clause, bool is_pushed_down, Relids required_relids)
{
/*
- * If it's an OR clause, build a modified copy with RestrictInfos
- * inserted above each subclause of the top-level AND/OR structure.
+ * If it's an OR clause, build a modified copy with RestrictInfos inserted
+ * above each subclause of the top-level AND/OR structure.
*/
if (or_clause((Node *) clause))
return (RestrictInfo *) make_sub_restrictinfos(clause, is_pushed_down);
/*
* There may well be redundant quals among the subplans, since a
* top-level WHERE qual might have gotten used to form several
- * different index quals. We don't try exceedingly hard to
- * eliminate redundancies, but we do eliminate obvious duplicates
- * by using list_concat_unique.
+ * different index quals. We don't try exceedingly hard to eliminate
+ * redundancies, but we do eliminate obvious duplicates by using
+ * list_concat_unique.
*/
result = NIL;
foreach(l, apath->bitmapquals)
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
{
/*
* If we find a qual-less subscan, it represents a constant
- * TRUE, and hence the OR result is also constant TRUE, so
- * we can stop here.
+ * TRUE, and hence the OR result is also constant TRUE, so we
+ * can stop here.
*/
return NIL;
}
}
/*
- * Avoid generating one-element ORs, which could happen
- * due to redundancy elimination.
+ * Avoid generating one-element ORs, which could happen due to
+ * redundancy elimination.
*/
if (list_length(withris) <= 1)
result = withris;
}
else if (IsA(bitmapqual, IndexPath))
{
- IndexPath *ipath = (IndexPath *) bitmapqual;
+ IndexPath *ipath = (IndexPath *) bitmapqual;
result = list_copy(ipath->indexclauses);
if (include_predicates && ipath->indexinfo->indpred != NIL)
{
foreach(l, ipath->indexinfo->indpred)
{
- Expr *pred = (Expr *) lfirst(l);
+ Expr *pred = (Expr *) lfirst(l);
/*
- * We know that the index predicate must have been implied
- * by the query condition as a whole, but it may or may not
- * be implied by the conditions that got pushed into the
- * bitmapqual. Avoid generating redundant conditions.
+ * We know that the index predicate must have been implied by
+ * the query condition as a whole, but it may or may not be
+ * implied by the conditions that got pushed into the
+ * bitmapqual. Avoid generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), result))
result = lappend(result,
restrictinfo->can_join = false; /* may get set below */
/*
- * If it's a binary opclause, set up left/right relids info. In any
- * case set up the total clause relids info.
+ * If it's a binary opclause, set up left/right relids info. In any case
+ * set up the total clause relids info.
*/
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
{
restrictinfo->right_relids = pull_varnos(get_rightop(clause));
restrictinfo->clause_relids = bms_union(restrictinfo->left_relids,
- restrictinfo->right_relids);
+ restrictinfo->right_relids);
/*
* Does it look like a normal join clause, i.e., a binary operator
- * relating expressions that come from distinct relations? If so
- * we might be able to use it in a join algorithm. Note that this
- * is a purely syntactic test that is made regardless of context.
+ * relating expressions that come from distinct relations? If so we
+ * might be able to use it in a join algorithm. Note that this is a
+ * purely syntactic test that is made regardless of context.
*/
if (!bms_is_empty(restrictinfo->left_relids) &&
!bms_is_empty(restrictinfo->right_relids) &&
restrictinfo->required_relids = restrictinfo->clause_relids;
/*
- * Fill in all the cacheable fields with "not yet set" markers. None
- * of these will be computed until/unless needed. Note in particular
- * that we don't mark a binary opclause as mergejoinable or
- * hashjoinable here; that happens only if it appears in the right
- * context (top level of a joinclause list).
+ * Fill in all the cacheable fields with "not yet set" markers. None of
+ * these will be computed until/unless needed. Note in particular that we
+ * don't mark a binary opclause as mergejoinable or hashjoinable here;
+ * that happens only if it appears in the right context (top level of a
+ * joinclause list).
*/
restrictinfo->eval_cost.startup = -1;
restrictinfo->this_selec = -1;
QualCost cost;
/*
- * If there are any redundant clauses, we want to eliminate the ones
- * that are more expensive in favor of the ones that are less so. Run
+ * If there are any redundant clauses, we want to eliminate the ones that
+ * are more expensive in favor of the ones that are less so. Run
* cost_qual_eval() to ensure the eval_cost fields are set up.
*/
cost_qual_eval(&cost, restrictinfo_list);
/*
- * We don't have enough knowledge yet to be able to estimate the
- * number of times a clause might be evaluated, so it's hard to weight
- * the startup and per-tuple costs appropriately. For now just weight
- * 'em the same.
+ * We don't have enough knowledge yet to be able to estimate the number of
+ * times a clause might be evaluated, so it's hard to weight the startup
+ * and per-tuple costs appropriately. For now just weight 'em the same.
*/
#define CLAUSECOST(r) ((r)->eval_cost.startup + (r)->eval_cost.per_tuple)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.69 2005/04/06 16:34:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.70 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
TargetEntry *tle;
- tle = makeTargetEntry(copyObject(var), /* copy needed?? */
+ tle = makeTargetEntry(copyObject(var), /* copy needed?? */
next_resno++,
NULL,
false);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.65 2005/06/05 22:32:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.66 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
context.sublevels_up = 0;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
query_or_expression_tree_walker(node,
pull_varnos_walker,
context.sublevels_up = levelsup;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_walker(node,
contain_var_reference_walker,
if (IsA(node, Var))
{
if (((Var *) node)->varlevelsup == 0)
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
return false;
}
return expression_tree_walker(node, contain_var_clause_walker, context);
int sublevels_up = levelsup;
return query_or_expression_tree_walker(node,
- contain_vars_above_level_walker,
+ contain_vars_above_level_walker,
(void *) &sublevels_up,
0);
}
context->min_varlevel = varlevelsup;
/*
- * As soon as we find a local variable, we can abort the
- * tree traversal, since min_varlevel is then certainly 0.
+ * As soon as we find a local variable, we can abort the tree
+ * traversal, since min_varlevel is then certainly 0.
*/
if (varlevelsup == 0)
return true;
}
/*
- * An Aggref must be treated like a Var of its level. Normally we'd
- * get the same result from looking at the Vars in the aggregate's
- * argument, but this fails in the case of a Var-less aggregate call
- * (COUNT(*)).
+ * An Aggref must be treated like a Var of its level. Normally we'd get
+ * the same result from looking at the Vars in the aggregate's argument,
+ * but this fails in the case of a Var-less aggregate call (COUNT(*)).
*/
if (IsA(node, Aggref))
{
context->min_varlevel = agglevelsup;
/*
- * As soon as we find a local aggregate, we can abort the
- * tree traversal, since min_varlevel is then certainly 0.
+ * As soon as we find a local aggregate, we can abort the tree
+ * traversal, since min_varlevel is then certainly 0.
*/
if (agglevelsup == 0)
return true;
newvar = (Node *) list_nth(rte->joinaliasvars, var->varattno - 1);
/*
- * If we are expanding an alias carried down from an upper query,
- * must adjust its varlevelsup fields.
+ * If we are expanding an alias carried down from an upper query, must
+ * adjust its varlevelsup fields.
*/
if (context->sublevels_up != 0)
{
InClauseInfo *ininfo;
ininfo = (InClauseInfo *) expression_tree_mutator(node,
- flatten_join_alias_vars_mutator,
- (void *) context);
+ flatten_join_alias_vars_mutator,
+ (void *) context);
/* now fix InClauseInfo's relid sets */
if (context->sublevels_up == 0)
{
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.325 2005/10/02 23:50:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.326 2005/10/15 02:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *indexes; /* CREATE INDEX items */
List *triggers; /* CREATE TRIGGER items */
List *grants; /* GRANT items */
- List *fwconstraints; /* Forward referencing FOREIGN KEY
- * constraints */
+ List *fwconstraints; /* Forward referencing FOREIGN KEY constraints */
List *alters; /* Generated ALTER items (from the above) */
List *ixconstraints; /* index-creating constraints */
List *blist; /* "before list" of things to do before
* creating the schema */
- List *alist; /* "after list" of things to do after
- * creating the schema */
+ List *alist; /* "after list" of things to do after creating
+ * the schema */
} CreateSchemaStmtContext;
/* State shared by transformCreateStmt and its subroutines */
List *ixconstraints; /* index-creating constraints */
List *blist; /* "before list" of things to do before
* creating the table */
- List *alist; /* "after list" of things to do after
- * creating the table */
+ List *alist; /* "after list" of things to do after creating
+ * the table */
IndexStmt *pkey; /* PRIMARY KEY index, if any */
} CreateStmtContext;
static void release_pstate_resources(ParseState *pstate);
static FromExpr *makeFromExpr(List *fromlist, Node *quals);
static bool check_parameter_resolution_walker(Node *node,
- check_parameter_resolution_context *context);
+ check_parameter_resolution_context *context);
/*
result = list_concat(result, parse_sub_analyze(lfirst(l), pstate));
/*
- * Make sure that only the original query is marked original. We have
- * to do this explicitly since recursive calls of do_parse_analyze
- * will have marked some of the added-on queries as "original". Also
- * mark only the original query as allowed to set the command-result
- * tag.
+ * Make sure that only the original query is marked original. We have to
+ * do this explicitly since recursive calls of do_parse_analyze will have
+ * marked some of the added-on queries as "original". Also mark only the
+ * original query as allowed to set the command-result tag.
*/
foreach(l, result)
{
(SelectStmt *) parseTree);
else
result = transformSetOperationStmt(pstate,
- (SelectStmt *) parseTree);
+ (SelectStmt *) parseTree);
break;
case T_DeclareCursorStmt:
result = transformDeclareCursorStmt(pstate,
- (DeclareCursorStmt *) parseTree);
+ (DeclareCursorStmt *) parseTree);
break;
default:
/*
- * other statements don't require any transformation-- just
- * return the original parsetree, yea!
+ * other statements don't require any transformation-- just return
+ * the original parsetree, yea!
*/
result = makeNode(Query);
result->commandType = CMD_UTILITY;
result->canSetTag = true;
/*
- * Check that we did not produce too many resnos; at the very
- * least we cannot allow more than 2^16, since that would exceed
- * the range of a AttrNumber. It seems safest to use
- * MaxTupleAttributeNumber.
+ * Check that we did not produce too many resnos; at the very least we
+ * cannot allow more than 2^16, since that would exceed the range of a
+ * AttrNumber. It seems safest to use MaxTupleAttributeNumber.
*/
if (pstate->p_next_resno - 1 > MaxTupleAttributeNumber)
ereport(ERROR,
extras_before, extras_after);
/*
- * If a list of column names was given, run through and insert these
- * into the actual query tree. - thomas 2000-03-08
+ * If a list of column names was given, run through and insert these into
+ * the actual query tree. - thomas 2000-03-08
*
- * Outer loop is over targetlist to make it easier to skip junk
- * targetlist entries.
+ * Outer loop is over targetlist to make it easier to skip junk targetlist
+ * entries.
*/
if (stmt->aliases != NIL)
{
/* set up range table with just the result rel */
qry->resultRelation = setTargetTable(pstate, stmt->relation,
- interpretInhOption(stmt->relation->inhOpt),
+ interpretInhOption(stmt->relation->inhOpt),
true,
ACL_DELETE);
qry->distinctClause = NIL;
/*
- * The USING clause is non-standard SQL syntax, and is equivalent
- * in functionality to the FROM list that can be specified for
- * UPDATE. The USING keyword is used rather than FROM because FROM
- * is already a keyword in the DELETE syntax.
+ * The USING clause is non-standard SQL syntax, and is equivalent in
+ * functionality to the FROM list that can be specified for UPDATE. The
+ * USING keyword is used rather than FROM because FROM is already a
+ * keyword in the DELETE syntax.
*/
transformFromClause(pstate, stmt->usingClause);
/*
* If a non-nil rangetable/namespace was passed in, and we are doing
* INSERT/SELECT, arrange to pass the rangetable/namespace down to the
- * SELECT. This can only happen if we are inside a CREATE RULE, and
- * in that case we want the rule's OLD and NEW rtable entries to
- * appear as part of the SELECT's rtable, not as outer references for
- * it. (Kluge!) The SELECT's joinlist is not affected however. We
- * must do this before adding the target table to the INSERT's rtable.
+ * SELECT. This can only happen if we are inside a CREATE RULE, and in
+ * that case we want the rule's OLD and NEW rtable entries to appear as
+ * part of the SELECT's rtable, not as outer references for it. (Kluge!)
+ * The SELECT's joinlist is not affected however. We must do this before
+ * adding the target table to the INSERT's rtable.
*/
if (stmt->selectStmt)
{
}
/*
- * Must get write lock on INSERT target table before scanning SELECT,
- * else we will grab the wrong kind of initial lock if the target
- * table is also mentioned in the SELECT part. Note that the target
- * table is not added to the joinlist or namespace.
+ * Must get write lock on INSERT target table before scanning SELECT, else
+ * we will grab the wrong kind of initial lock if the target table is also
+ * mentioned in the SELECT part. Note that the target table is not added
+ * to the joinlist or namespace.
*/
qry->resultRelation = setTargetTable(pstate, stmt->relation,
false, false, ACL_INSERT);
if (stmt->selectStmt)
{
/*
- * We make the sub-pstate a child of the outer pstate so that it
- * can see any Param definitions supplied from above. Since the
- * outer pstate's rtable and namespace are presently empty, there
- * are no side-effects of exposing names the sub-SELECT shouldn't
- * be able to see.
+ * We make the sub-pstate a child of the outer pstate so that it can
+ * see any Param definitions supplied from above. Since the outer
+ * pstate's rtable and namespace are presently empty, there are no
+ * side-effects of exposing names the sub-SELECT shouldn't be able to
+ * see.
*/
ParseState *sub_pstate = make_parsestate(pstate);
RangeTblEntry *rte;
/*
* Process the source SELECT.
*
- * It is important that this be handled just like a standalone
- * SELECT; otherwise the behavior of SELECT within INSERT might be
- * different from a stand-alone SELECT. (Indeed, Postgres up
- * through 6.5 had bugs of just that nature...)
+ * It is important that this be handled just like a standalone SELECT;
+ * otherwise the behavior of SELECT within INSERT might be different
+ * from a stand-alone SELECT. (Indeed, Postgres up through 6.5 had
+ * bugs of just that nature...)
*/
sub_pstate->p_rtable = sub_rtable;
sub_pstate->p_relnamespace = sub_relnamespace;
sub_pstate->p_varnamespace = sub_varnamespace;
/*
- * Note: we are not expecting that extras_before and extras_after
- * are going to be used by the transformation of the SELECT
- * statement.
+ * Note: we are not expecting that extras_before and extras_after are
+ * going to be used by the transformation of the SELECT statement.
*/
selectQuery = transformStmt(sub_pstate, stmt->selectStmt,
extras_before, extras_after);
errmsg("INSERT ... SELECT may not specify INTO")));
/*
- * Make the source be a subquery in the INSERT's rangetable, and
- * add it to the INSERT's joinlist.
+ * Make the source be a subquery in the INSERT's rangetable, and add
+ * it to the INSERT's joinlist.
*/
rte = addRangeTableEntryForSubquery(pstate,
selectQuery,
if (tle->resjunk)
continue;
if (tle->expr &&
- (IsA(tle->expr, Const) || IsA(tle->expr, Param)) &&
+ (IsA(tle->expr, Const) ||IsA(tle->expr, Param)) &&
exprType((Node *) tle->expr) == UNKNOWNOID)
expr = tle->expr;
else
else
{
/*
- * For INSERT ... VALUES, transform the given list of values to
- * form a targetlist for the INSERT.
+ * For INSERT ... VALUES, transform the given list of values to form a
+ * targetlist for the INSERT.
*/
qry->targetList = transformTargetList(pstate, stmt->targetList);
}
if (icols == NULL || attnos == NULL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("INSERT has more expressions than target columns")));
+ errmsg("INSERT has more expressions than target columns")));
col = (ResTarget *) lfirst(icols);
Assert(IsA(col, ResTarget));
if (stmt->cols != NIL && (icols != NULL || attnos != NULL))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("INSERT has more target columns than expressions")));
+ errmsg("INSERT has more target columns than expressions")));
/* done building the range table and jointree */
qry->rtable = pstate->p_rtable;
cxt.hasoids = interpretOidsOption(stmt->hasoids);
/*
- * Run through each primary element in the table creation clause.
- * Separate column defs from constraints, and do preliminary analysis.
+ * Run through each primary element in the table creation clause. Separate
+ * column defs from constraints, and do preliminary analysis.
*/
foreach(elements, stmt->tableElts)
{
*
* Although we use ChooseRelationName, it's not guaranteed that the
* selected sequence name won't conflict; given sufficiently long
- * field names, two different serial columns in the same table
- * could be assigned the same sequence name, and we'd not notice
- * since we aren't creating the sequence quite yet. In practice
- * this seems quite unlikely to be a problem, especially since few
- * people would need two serial columns in one table.
+ * field names, two different serial columns in the same table could
+ * be assigned the same sequence name, and we'd not notice since we
+ * aren't creating the sequence quite yet. In practice this seems
+ * quite unlikely to be a problem, especially since few people would
+ * need two serial columns in one table.
*/
snamespaceid = RangeVarGetCreationNamespace(cxt->relation);
snamespace = get_namespace_name(snamespaceid);
cxt->relation->relname, column->colname)));
/*
- * Build a CREATE SEQUENCE command to create the sequence object,
- * and add it to the list of things to be done before this
- * CREATE/ALTER TABLE.
+ * Build a CREATE SEQUENCE command to create the sequence object, and
+ * add it to the list of things to be done before this CREATE/ALTER
+ * TABLE.
*/
seqstmt = makeNode(CreateSeqStmt);
seqstmt->sequence = makeRangeVar(snamespace, sname);
/*
* Create appropriate constraints for SERIAL. We do this in full,
- * rather than shortcutting, so that we will detect any
- * conflicting constraints the user wrote (like a different
- * DEFAULT).
+ * rather than shortcutting, so that we will detect any conflicting
+ * constraints the user wrote (like a different DEFAULT).
*
* Create an expression tree representing the function call
- * nextval('sequencename'). We cannot reduce the raw tree
- * to cooked form until after the sequence is created, but
- * there's no need to do so.
+ * nextval('sequencename'). We cannot reduce the raw tree to cooked
+ * form until after the sequence is created, but there's no need to do
+ * so.
*/
qstring = quote_qualified_identifier(snamespace, sname);
snamenode = makeNode(A_Const);
constraint = lfirst(clist);
/*
- * If this column constraint is a FOREIGN KEY constraint, then we
- * fill in the current attribute's name and throw it into the list
- * of FK constraints to be processed later.
+ * If this column constraint is a FOREIGN KEY constraint, then we fill
+ * in the current attribute's name and throw it into the list of FK
+ * constraints to be processed later.
*/
if (IsA(constraint, FkConstraint))
{
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"",
- column->colname, cxt->relation->relname)));
+ column->colname, cxt->relation->relname)));
column->is_not_null = FALSE;
saw_nullable = true;
break;
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"",
- column->colname, cxt->relation->relname)));
+ column->colname, cxt->relation->relname)));
column->is_not_null = TRUE;
saw_nullable = true;
break;
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("multiple default values specified for column \"%s\" of table \"%s\"",
- column->colname, cxt->relation->relname)));
+ column->colname, cxt->relation->relname)));
column->raw_default = constraint->raw_expr;
Assert(constraint->cooked_expr == NULL);
break;
/*
* Create a new inherited column.
*
- * For constraints, ONLY the NOT NULL constraint is inherited by the
- * new column definition per SQL99.
+ * For constraints, ONLY the NOT NULL constraint is inherited by the new
+ * column definition per SQL99.
*/
def = makeNode(ColumnDef);
def->colname = pstrdup(attributeName);
Assert(this_default != NULL);
/*
- * If default expr could contain any vars, we'd need to fix
- * 'em, but it can't; so default is ready to apply to child.
+ * If default expr could contain any vars, we'd need to fix 'em,
+ * but it can't; so default is ready to apply to child.
*/
def->cooked_default = pstrdup(this_default);
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing
- * the parent before the child is committed.
+ * commit. That will prevent someone else from deleting or ALTERing the
+ * parent before the child is committed.
*/
heap_close(relation, NoLock);
}
ListCell *l;
/*
- * Run through the constraints that need to generate an index. For
- * PRIMARY KEY, mark each column as NOT NULL and create an index. For
- * UNIQUE, create an index as for PRIMARY KEY, but do not insist on
- * NOT NULL.
+ * Run through the constraints that need to generate an index. For PRIMARY
+ * KEY, mark each column as NOT NULL and create an index. For UNIQUE,
+ * create an index as for PRIMARY KEY, but do not insist on NOT NULL.
*/
foreach(listptr, cxt->ixconstraints)
{
cxt->pkey = index;
/*
- * In ALTER TABLE case, a primary index might already exist,
- * but DefineIndex will check for it.
+ * In ALTER TABLE case, a primary index might already exist, but
+ * DefineIndex will check for it.
*/
}
index->isconstraint = true;
index->whereClause = NULL;
/*
- * Make sure referenced keys exist. If we are making a PRIMARY
- * KEY index, also make sure they are NOT NULL, if possible.
- * (Although we could leave it to DefineIndex to mark the columns
- * NOT NULL, it's more efficient to get it right the first time.)
+ * Make sure referenced keys exist. If we are making a PRIMARY KEY
+ * index, also make sure they are NOT NULL, if possible. (Although we
+ * could leave it to DefineIndex to mark the columns NOT NULL, it's
+ * more efficient to get it right the first time.)
*/
foreach(keys, constraint->keys)
{
else if (SystemAttributeByName(key, cxt->hasoids) != NULL)
{
/*
- * column will be a system column in the new table, so
- * accept it. System columns can't ever be null, so no
- * need to worry about PRIMARY/NOT NULL constraint.
+ * column will be a system column in the new table, so accept
+ * it. System columns can't ever be null, so no need to worry
+ * about PRIMARY/NOT NULL constraint.
*/
found = true;
}
if (rel->rd_rel->relkind != RELKIND_RELATION)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("inherited relation \"%s\" is not a table",
- inh->relname)));
+ errmsg("inherited relation \"%s\" is not a table",
+ inh->relname)));
for (count = 0; count < rel->rd_att->natts; count++)
{
Form_pg_attribute inhattr = rel->rd_att->attrs[count];
/*
* We currently have no easy way to force an
- * inherited column to be NOT NULL at
- * creation, if its parent wasn't so already.
- * We leave it to DefineIndex to fix things up
- * in this case.
+ * inherited column to be NOT NULL at creation, if
+ * its parent wasn't so already. We leave it to
+ * DefineIndex to fix things up in this case.
*/
break;
}
}
/*
- * In the ALTER TABLE case, don't complain about index keys
- * not created in the command; they may well exist already.
- * DefineIndex will complain about them if not, and will also
- * take care of marking them NOT NULL.
+ * In the ALTER TABLE case, don't complain about index keys not
+ * created in the command; they may well exist already.
+ * DefineIndex will complain about them if not, and will also take
+ * care of marking them NOT NULL.
*/
if (!found && !cxt->isalter)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- key)));
+ errmsg("column \"%s\" named in key does not exist",
+ key)));
/* Check for PRIMARY KEY(foo, foo) */
foreach(columns, index->indexParams)
}
/*
- * Scan the index list and remove any redundant index specifications.
- * This can happen if, for instance, the user writes UNIQUE PRIMARY
- * KEY. A strict reading of SQL92 would suggest raising an error
- * instead, but that strikes me as too anal-retentive. - tgl
- * 2001-02-14
+ * Scan the index list and remove any redundant index specifications. This
+ * can happen if, for instance, the user writes UNIQUE PRIMARY KEY. A
+ * strict reading of SQL92 would suggest raising an error instead, but
+ * that strikes me as too anal-retentive. - tgl 2001-02-14
*
- * XXX in ALTER TABLE case, it'd be nice to look for duplicate
- * pre-existing indexes, too.
+ * XXX in ALTER TABLE case, it'd be nice to look for duplicate pre-existing
+ * indexes, too.
*/
cxt->alist = NIL;
if (cxt->pkey != NULL)
}
/*
- * For CREATE TABLE or ALTER TABLE ADD COLUMN, gin up an ALTER TABLE
- * ADD CONSTRAINT command to execute after the basic command is
- * complete. (If called from ADD CONSTRAINT, that routine will add the
- * FK constraints to its own subcommand list.)
+ * For CREATE TABLE or ALTER TABLE ADD COLUMN, gin up an ALTER TABLE ADD
+ * CONSTRAINT command to execute after the basic command is complete. (If
+ * called from ADD CONSTRAINT, that routine will add the FK constraints to
+ * its own subcommand list.)
*
* Note: the ADD CONSTRAINT command must also execute after any index
* creation commands. Thus, this should run after
if (stmt->whereClause)
{
/*
- * Put the parent table into the rtable so that the WHERE clause
- * can refer to its fields without qualification. Note that this
- * only works if the parent table already exists --- so we can't
- * easily support predicates on indexes created implicitly by
- * CREATE TABLE. Fortunately, that's not necessary.
+ * Put the parent table into the rtable so that the WHERE clause can
+ * refer to its fields without qualification. Note that this only
+ * works if the parent table already exists --- so we can't easily
+ * support predicates on indexes created implicitly by CREATE TABLE.
+ * Fortunately, that's not necessary.
*/
rte = addRangeTableEntry(pstate, stmt->relation, NULL, false, true);
ielem->expr = transformExpr(pstate, ielem->expr);
/*
- * We check only that the result type is legitimate; this is
- * for consistency with what transformWhereClause() checks for
- * the predicate. DefineIndex() will make more checks.
+ * We check only that the result type is legitimate; this is for
+ * consistency with what transformWhereClause() checks for the
+ * predicate. DefineIndex() will make more checks.
*/
if (expression_returns_set(ielem->expr))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("index expression may not return a set")));
+ errmsg("index expression may not return a set")));
}
}
rel = heap_openrv(stmt->relation, AccessExclusiveLock);
/*
- * NOTE: 'OLD' must always have a varno equal to 1 and 'NEW' equal to
- * 2. Set up their RTEs in the main pstate for use in parsing the
- * rule qualification.
+ * NOTE: 'OLD' must always have a varno equal to 1 and 'NEW' equal to 2.
+ * Set up their RTEs in the main pstate for use in parsing the rule
+ * qualification.
*/
Assert(pstate->p_rtable == NIL);
oldrte = addRangeTableEntryForRelation(pstate, rel,
newrte->requiredPerms = 0;
/*
- * They must be in the namespace too for lookup purposes, but only add
- * the one(s) that are relevant for the current kind of rule. In an
- * UPDATE rule, quals must refer to OLD.field or NEW.field to be
- * unambiguous, but there's no need to be so picky for INSERT &
- * DELETE. We do not add them to the joinlist.
+ * They must be in the namespace too for lookup purposes, but only add the
+ * one(s) that are relevant for the current kind of rule. In an UPDATE
+ * rule, quals must refer to OLD.field or NEW.field to be unambiguous, but
+ * there's no need to be so picky for INSERT & DELETE. We do not add them
+ * to the joinlist.
*/
switch (stmt->event)
{
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("rule WHERE condition may not contain aggregate functions")));
+ errmsg("rule WHERE condition may not contain aggregate functions")));
/* save info about sublinks in where clause */
qry->hasSubLinks = pstate->p_hasSubLinks;
/*
- * 'instead nothing' rules with a qualification need a query
- * rangetable so the rewrite handler can add the negated rule
- * qualification to the original query. We create a query with the new
- * command type CMD_NOTHING here that is treated specially by the
- * rewrite system.
+ * 'instead nothing' rules with a qualification need a query rangetable so
+ * the rewrite handler can add the negated rule qualification to the
+ * original query. We create a query with the new command type CMD_NOTHING
+ * here that is treated specially by the rewrite system.
*/
if (stmt->actions == NIL)
{
has_new;
/*
- * Set up OLD/NEW in the rtable for this statement. The
- * entries are added only to relnamespace, not varnamespace,
- * because we don't want them to be referred to by unqualified
- * field names nor "*" in the rule actions. We decide later
- * whether to put them in the joinlist.
+ * Set up OLD/NEW in the rtable for this statement. The entries
+ * are added only to relnamespace, not varnamespace, because we
+ * don't want them to be referred to by unqualified field names
+ * nor "*" in the rule actions. We decide later whether to put
+ * them in the joinlist.
*/
oldrte = addRangeTableEntryForRelation(sub_pstate, rel,
makeAlias("*OLD*", NIL),
extras_before, extras_after);
/*
- * We cannot support utility-statement actions (eg NOTIFY)
- * with nonempty rule WHERE conditions, because there's no way
- * to make the utility action execute conditionally.
+ * We cannot support utility-statement actions (eg NOTIFY) with
+ * nonempty rule WHERE conditions, because there's no way to make
+ * the utility action execute conditionally.
*/
if (top_subqry->commandType == CMD_UTILITY &&
stmt->whereClause != NULL)
errmsg("rules with WHERE conditions may only have SELECT, INSERT, UPDATE, or DELETE actions")));
/*
- * If the action is INSERT...SELECT, OLD/NEW have been pushed
- * down into the SELECT, and that's what we need to look at.
- * (Ugly kluge ... try to fix this when we redesign
- * querytrees.)
+ * If the action is INSERT...SELECT, OLD/NEW have been pushed down
+ * into the SELECT, and that's what we need to look at. (Ugly
+ * kluge ... try to fix this when we redesign querytrees.)
*/
sub_qry = getInsertSelectQuery(top_subqry, NULL);
/*
- * If the sub_qry is a setop, we cannot attach any
- * qualifications to it, because the planner won't notice
- * them. This could perhaps be relaxed someday, but for now,
- * we may as well reject such a rule immediately.
+ * If the sub_qry is a setop, we cannot attach any qualifications
+ * to it, because the planner won't notice them. This could
+ * perhaps be relaxed someday, but for now, we may as well reject
+ * such a rule immediately.
*/
if (sub_qry->setOperations != NULL && stmt->whereClause != NULL)
ereport(ERROR,
case CMD_SELECT:
if (has_old)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON SELECT rule may not use OLD")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON SELECT rule may not use OLD")));
if (has_new)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON SELECT rule may not use NEW")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON SELECT rule may not use NEW")));
break;
case CMD_UPDATE:
/* both are OK */
case CMD_INSERT:
if (has_old)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON INSERT rule may not use OLD")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON INSERT rule may not use OLD")));
break;
case CMD_DELETE:
if (has_new)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON DELETE rule may not use NEW")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON DELETE rule may not use NEW")));
break;
default:
elog(ERROR, "unrecognized event type: %d",
}
/*
- * For efficiency's sake, add OLD to the rule action's
- * jointree only if it was actually referenced in the
- * statement or qual.
+ * For efficiency's sake, add OLD to the rule action's jointree
+ * only if it was actually referenced in the statement or qual.
*
- * For INSERT, NEW is not really a relation (only a reference to
- * the to-be-inserted tuple) and should never be added to the
+ * For INSERT, NEW is not really a relation (only a reference to the
+ * to-be-inserted tuple) and should never be added to the
* jointree.
*
* For UPDATE, we treat NEW as being another kind of reference to
- * OLD, because it represents references to *transformed*
- * tuples of the existing relation. It would be wrong to
- * enter NEW separately in the jointree, since that would
- * cause a double join of the updated relation. It's also
- * wrong to fail to make a jointree entry if only NEW and not
- * OLD is mentioned.
+ * OLD, because it represents references to *transformed* tuples
+ * of the existing relation. It would be wrong to enter NEW
+ * separately in the jointree, since that would cause a double
+ * join of the updated relation. It's also wrong to fail to make
+ * a jointree entry if only NEW and not OLD is mentioned.
*/
if (has_old || (has_new && stmt->event == CMD_UPDATE))
{
/*
- * If sub_qry is a setop, manipulating its jointree will
- * do no good at all, because the jointree is dummy. (This
- * should be a can't-happen case because of prior tests.)
+ * If sub_qry is a setop, manipulating its jointree will do no
+ * good at all, because the jointree is dummy. (This should be
+ * a can't-happen case because of prior tests.)
*/
if (sub_qry->setOperations != NULL)
ereport(ERROR,
qry->commandType = CMD_SELECT;
/*
- * Find leftmost leaf SelectStmt; extract the one-time-only items from
- * it and from the top-level node.
+ * Find leftmost leaf SelectStmt; extract the one-time-only items from it
+ * and from the top-level node.
*/
leftmostSelect = stmt->larg;
while (leftmostSelect && leftmostSelect->op != SETOP_NONE)
leftmostSelect->intoColNames = NIL;
/*
- * These are not one-time, exactly, but we want to process them here
- * and not let transformSetOperationTree() see them --- else it'll
- * just recurse right back here!
+ * These are not one-time, exactly, but we want to process them here and
+ * not let transformSetOperationTree() see them --- else it'll just
+ * recurse right back here!
*/
sortClause = stmt->sortClause;
limitOffset = stmt->limitOffset;
/*
* Generate dummy targetlist for outer query using column names of
* leftmost select and common datatypes of topmost set operation. Also
- * make lists of the dummy vars and their names for use in parsing
- * ORDER BY.
+ * make lists of the dummy vars and their names for use in parsing ORDER
+ * BY.
*
- * Note: we use leftmostRTI as the varno of the dummy variables. It
- * shouldn't matter too much which RT index they have, as long as they
- * have one that corresponds to a real RT entry; else funny things may
- * happen when the tree is mashed by rule rewriting.
+ * Note: we use leftmostRTI as the varno of the dummy variables. It shouldn't
+ * matter too much which RT index they have, as long as they have one that
+ * corresponds to a real RT entry; else funny things may happen when the
+ * tree is mashed by rule rewriting.
*/
qry->targetList = NIL;
targetvars = NIL;
/*
* Handle SELECT INTO/CREATE TABLE AS.
*
- * Any column names from CREATE TABLE AS need to be attached to both the
- * top level and the leftmost subquery. We do not do this earlier
- * because we do *not* want the targetnames list to be affected.
+ * Any column names from CREATE TABLE AS need to be attached to both the top
+ * level and the leftmost subquery. We do not do this earlier because we
+ * do *not* want the targetnames list to be affected.
*/
qry->into = into;
if (intoColNames)
}
/*
- * As a first step towards supporting sort clauses that are
- * expressions using the output columns, generate a varnamespace entry
- * that makes the output columns visible. A Join RTE node is handy
- * for this, since we can easily control the Vars generated upon
- * matches.
+ * As a first step towards supporting sort clauses that are expressions
+ * using the output columns, generate a varnamespace entry that makes the
+ * output columns visible. A Join RTE node is handy for this, since we
+ * can easily control the Vars generated upon matches.
*
- * Note: we don't yet do anything useful with such cases, but at least
- * "ORDER BY upper(foo)" will draw the right error message rather than
- * "foo not found".
+ * Note: we don't yet do anything useful with such cases, but at least "ORDER
+ * BY upper(foo)" will draw the right error message rather than "foo not
+ * found".
*/
jrte = addRangeTableEntryForJoin(NULL,
targetnames,
pstate->p_rtable = list_make1(jrte);
sv_relnamespace = pstate->p_relnamespace;
- pstate->p_relnamespace = NIL; /* no qualified names allowed */
+ pstate->p_relnamespace = NIL; /* no qualified names allowed */
sv_varnamespace = pstate->p_varnamespace;
pstate->p_varnamespace = list_make1(jrte);
/*
* For now, we don't support resjunk sort clauses on the output of a
* setOperation tree --- you can only use the SQL92-spec options of
- * selecting an output column by name or number. Enforce by checking
- * that transformSortClause doesn't add any items to tlist.
+ * selecting an output column by name or number. Enforce by checking that
+ * transformSortClause doesn't add any items to tlist.
*/
tllen = list_length(qry->targetList);
qry->sortClause = transformSortClause(pstate,
sortClause,
&qry->targetList,
- false /* no unknowns expected */ );
+ false /* no unknowns expected */ );
pstate->p_rtable = sv_rtable;
pstate->p_relnamespace = sv_relnamespace;
/*
* If an internal node of a set-op tree has ORDER BY, UPDATE, or LIMIT
- * clauses attached, we need to treat it like a leaf node to generate
- * an independent sub-Query tree. Otherwise, it can be represented by
- * a SetOperationStmt node underneath the parent Query.
+ * clauses attached, we need to treat it like a leaf node to generate an
+ * independent sub-Query tree. Otherwise, it can be represented by a
+ * SetOperationStmt node underneath the parent Query.
*/
if (stmt->op == SETOP_NONE)
{
/*
* Transform SelectStmt into a Query.
*
- * Note: previously transformed sub-queries don't affect the parsing
- * of this sub-query, because they are not in the toplevel
- * pstate's namespace list.
+ * Note: previously transformed sub-queries don't affect the parsing of
+ * this sub-query, because they are not in the toplevel pstate's
+ * namespace list.
*/
selectList = parse_sub_analyze((Node *) stmt, pstate);
Assert(IsA(selectQuery, Query));
/*
- * Check for bogus references to Vars on the current query level
- * (but upper-level references are okay). Normally this can't
- * happen because the namespace will be empty, but it could happen
- * if we are inside a rule.
+ * Check for bogus references to Vars on the current query level (but
+ * upper-level references are okay). Normally this can't happen
+ * because the namespace will be empty, but it could happen if we are
+ * inside a rule.
*/
if (pstate->p_relnamespace || pstate->p_varnamespace)
{
false);
/*
- * Return a RangeTblRef to replace the SelectStmt in the set-op
- * tree.
+ * Return a RangeTblRef to replace the SelectStmt in the set-op tree.
*/
rtr = makeNode(RangeTblRef);
/* assume new rte is at end */
if (list_length(lcoltypes) != list_length(rcoltypes))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("each %s query must have the same number of columns",
- context)));
+ errmsg("each %s query must have the same number of columns",
+ context)));
op->colTypes = NIL;
forboth(l, lcoltypes, r, rcoltypes)
if (list_length(src) > list_length(dst))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("CREATE TABLE AS specifies too many column names")));
+ errmsg("CREATE TABLE AS specifies too many column names")));
forboth(dst_item, dst, src_item, src)
{
pstate->p_is_update = true;
qry->resultRelation = setTargetTable(pstate, stmt->relation,
- interpretInhOption(stmt->relation->inhOpt),
+ interpretInhOption(stmt->relation->inhOpt),
true,
ACL_UPDATE);
/*
- * the FROM clause is non-standard SQL syntax. We used to be able to
- * do this with REPLACE in POSTQUEL so we keep the feature.
+ * the FROM clause is non-standard SQL syntax. We used to be able to do
+ * this with REPLACE in POSTQUEL so we keep the feature.
*/
transformFromClause(pstate, stmt->fromClause);
if (tle->resjunk)
{
/*
- * Resjunk nodes need no additional processing, but be sure
- * they have resnos that do not match any target columns; else
- * rewriter or planner might get confused. They don't need a
- * resname either.
+ * Resjunk nodes need no additional processing, but be sure they
+ * have resnos that do not match any target columns; else rewriter
+ * or planner might get confused. They don't need a resname
+ * either.
*/
tle->resno = (AttrNumber) pstate->p_next_resno++;
tle->resname = NULL;
cxt.pkey = NULL;
/*
- * The only subtypes that currently require parse transformation
- * handling are ADD COLUMN and ADD CONSTRAINT. These largely re-use
- * code from CREATE TABLE.
+ * The only subtypes that currently require parse transformation handling
+ * are ADD COLUMN and ADD CONSTRAINT. These largely re-use code from
+ * CREATE TABLE.
*/
foreach(lcmd, stmt->cmds)
{
}
/*
- * All constraints are processed in other ways. Remove
- * the original list
+ * All constraints are processed in other ways. Remove the
+ * original list
*/
def->constraints = NIL;
case AT_AddConstraint:
/*
- * The original AddConstraint cmd node doesn't go to
- * newcmds
+ * The original AddConstraint cmd node doesn't go to newcmds
*/
if (IsA(cmd->def, Constraint))
case AT_ProcessedConstraint:
/*
- * Already-transformed ADD CONSTRAINT, so just make it
- * look like the standard case.
+ * Already-transformed ADD CONSTRAINT, so just make it look
+ * like the standard case.
*/
cmd->subtype = AT_AddConstraint;
newcmds = lappend(newcmds, cmd);
transformFKConstraints(pstate, &cxt, skipValidation, true);
/*
- * Push any index-creation commands into the ALTER, so that they can
- * be scheduled nicely by tablecmds.c.
+ * Push any index-creation commands into the ALTER, so that they can be
+ * scheduled nicely by tablecmds.c.
*/
foreach(l, cxt.alist)
{
if (nparams != nexpected)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("wrong number of parameters for prepared statement \"%s\"",
- stmt->name),
+ errmsg("wrong number of parameters for prepared statement \"%s\"",
+ stmt->name),
errdetail("Expected %d parameters but got %d.",
nexpected, nparams)));
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in EXECUTE parameter")));
+ errmsg("cannot use subquery in EXECUTE parameter")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
i,
format_type_be(given_type_id),
format_type_be(expected_type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
lfirst(l) = expr;
i++;
if (qry->setOperations)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
- errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", operation)));
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", operation)));
if (qry->distinctClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
- errmsg("%s is not allowed with DISTINCT clause", operation)));
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ errmsg("%s is not allowed with DISTINCT clause", operation)));
if (qry->groupClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
- errmsg("%s is not allowed with GROUP BY clause", operation)));
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ errmsg("%s is not allowed with GROUP BY clause", operation)));
if (qry->havingQual != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
errmsg("%s is not allowed with HAVING clause", operation)));
if (qry->hasAggs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL command, like SELECT FOR UPDATE */
- errmsg("%s is not allowed with aggregate functions", operation)));
+ /* translator: %s is a SQL command, like SELECT FOR UPDATE */
+ errmsg("%s is not allowed with aggregate functions", operation)));
}
/*
if (lc->forUpdate != qry->forUpdate)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use both FOR UPDATE and FOR SHARE in one query")));
+ errmsg("cannot use both FOR UPDATE and FOR SHARE in one query")));
if (lc->nowait != qry->rowNoWait)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/* make a clause we can pass down to subqueries to select all rels */
allrels = makeNode(LockingClause);
- allrels->lockedRels = NIL; /* indicates all rels */
+ allrels->lockedRels = NIL; /* indicates all rels */
allrels->forUpdate = lc->forUpdate;
allrels->nowait = lc->nowait;
case RTE_SUBQUERY:
/*
- * FOR UPDATE/SHARE of subquery is propagated to all
- * of subquery's rels
+ * FOR UPDATE/SHARE of subquery is propagated to all of
+ * subquery's rels
*/
transformLockingClause(rte->subquery, allrels);
break;
break;
case RTE_JOIN:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE cannot be applied to a join")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE/SHARE cannot be applied to a join")));
break;
case RTE_SPECIAL:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE cannot be applied to NEW or OLD")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE/SHARE cannot be applied to NEW or OLD")));
break;
case RTE_FUNCTION:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE cannot be applied to a function")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE/SHARE cannot be applied to a function")));
break;
default:
elog(ERROR, "unrecognized RTE type: %d",
!IsA(lastprimarynode, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("misplaced NOT DEFERRABLE clause")));
+ errmsg("misplaced NOT DEFERRABLE clause")));
if (saw_deferrability)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
!IsA(lastprimarynode, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("misplaced INITIALLY DEFERRED clause")));
+ errmsg("misplaced INITIALLY DEFERRED clause")));
if (saw_initially)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
((FkConstraint *) lastprimarynode)->initdeferred = true;
/*
- * If only INITIALLY DEFERRED appears, assume
- * DEFERRABLE
+ * If only INITIALLY DEFERRED appears, assume DEFERRABLE
*/
if (!saw_deferrability)
((FkConstraint *) lastprimarynode)->deferrable = true;
!IsA(lastprimarynode, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("misplaced INITIALLY IMMEDIATE clause")));
+ errmsg("misplaced INITIALLY IMMEDIATE clause")));
if (saw_initially)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
cxt.alist = NIL;
/*
- * Run through each schema element in the schema element list.
- * Separate statements by type, and do preliminary analysis.
+ * Run through each schema element in the schema element list. Separate
+ * statements by type, and do preliminary analysis.
*/
foreach(elements, stmt->schemaElts)
{
*/
static bool
check_parameter_resolution_walker(Node *node,
- check_parameter_resolution_context *context)
+ check_parameter_resolution_context *context)
{
if (node == NULL)
return false;
if (param->paramtype != context->paramTypes[paramno - 1])
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
- errmsg("could not determine data type of parameter $%d",
- paramno)));
+ errmsg("could not determine data type of parameter $%d",
+ paramno)));
}
return false;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/keywords.c,v 1.165 2005/08/23 22:40:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/keywords.c,v 1.166 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return NULL;
/*
- * Apply an ASCII-only downcasing. We must not use tolower() since it
- * may produce the wrong translation in some locales (eg, Turkish).
+ * Apply an ASCII-only downcasing. We must not use tolower() since it may
+ * produce the wrong translation in some locales (eg, Turkish).
*/
for (i = 0; i < len; i++)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_agg.c,v 1.69 2005/06/05 22:32:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_agg.c,v 1.70 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* The aggregate's level is the same as the level of the lowest-level
- * variable or aggregate in its argument; or if it contains no
- * variables at all, we presume it to be local.
+ * variable or aggregate in its argument; or if it contains no variables
+ * at all, we presume it to be local.
*/
min_varlevel = find_minimum_var_level((Node *) agg->target);
/*
- * An aggregate can't directly contain another aggregate call of the
- * same level (though outer aggs are okay). We can skip this check if
- * we didn't find any local vars or aggs.
+ * An aggregate can't directly contain another aggregate call of the same
+ * level (though outer aggs are okay). We can skip this check if we
+ * didn't find any local vars or aggs.
*/
if (min_varlevel == 0)
{
if (checkExprHasAggs((Node *) agg->target))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregate function calls may not be nested")));
+ errmsg("aggregate function calls may not be nested")));
}
if (min_varlevel < 0)
/*
* No aggregates allowed in GROUP BY clauses, either.
*
- * While we are at it, build a list of the acceptable GROUP BY
- * expressions for use by check_ungrouped_columns().
+ * While we are at it, build a list of the acceptable GROUP BY expressions
+ * for use by check_ungrouped_columns().
*/
foreach(l, qry->groupClause)
{
if (checkExprHasAggs(expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregates not allowed in GROUP BY clause")));
+ errmsg("aggregates not allowed in GROUP BY clause")));
groupClauses = lcons(expr, groupClauses);
}
/*
- * If there are join alias vars involved, we have to flatten them to
- * the underlying vars, so that aliased and unaliased vars will be
- * correctly taken as equal. We can skip the expense of doing this if
- * no rangetable entries are RTE_JOIN kind.
+ * If there are join alias vars involved, we have to flatten them to the
+ * underlying vars, so that aliased and unaliased vars will be correctly
+ * taken as equal. We can skip the expense of doing this if no rangetable
+ * entries are RTE_JOIN kind.
*/
hasJoinRTEs = false;
foreach(l, pstate->p_rtable)
/*
* We use the planner's flatten_join_alias_vars routine to do the
- * flattening; it wants a PlannerInfo root node, which fortunately
- * can be mostly dummy.
+ * flattening; it wants a PlannerInfo root node, which fortunately can be
+ * mostly dummy.
*/
if (hasJoinRTEs)
{
root->hasJoinRTEs = true;
groupClauses = (List *) flatten_join_alias_vars(root,
- (Node *) groupClauses);
+ (Node *) groupClauses);
}
else
root = NULL; /* keep compiler quiet */
/*
- * Detect whether any of the grouping expressions aren't simple Vars;
- * if they're all Vars then we don't have to work so hard in the
- * recursive scans. (Note we have to flatten aliases before this.)
+ * Detect whether any of the grouping expressions aren't simple Vars; if
+ * they're all Vars then we don't have to work so hard in the recursive
+ * scans. (Note we have to flatten aliases before this.)
*/
have_non_var_grouping = false;
foreach(l, groupClauses)
return false; /* constants are always acceptable */
/*
- * If we find an aggregate call of the original level, do not recurse
- * into its arguments; ungrouped vars in the arguments are not an
- * error. We can also skip looking at the arguments of aggregates of
- * higher levels, since they could not possibly contain Vars that are
- * of concern to us (see transformAggregateCall). We do need to look
- * into the arguments of aggregates of lower levels, however.
+ * If we find an aggregate call of the original level, do not recurse into
+ * its arguments; ungrouped vars in the arguments are not an error. We can
+ * also skip looking at the arguments of aggregates of higher levels,
+ * since they could not possibly contain Vars that are of concern to us
+ * (see transformAggregateCall). We do need to look into the arguments of
+ * aggregates of lower levels, however.
*/
if (IsA(node, Aggref) &&
(int) ((Aggref *) node)->agglevelsup >= context->sublevels_up)
return false;
/*
- * If we have any GROUP BY items that are not simple Vars, check to
- * see if subexpression as a whole matches any GROUP BY item. We need
- * to do this at every recursion level so that we recognize GROUPed-BY
- * expressions before reaching variables within them. But this only
- * works at the outer query level, as noted above.
+ * If we have any GROUP BY items that are not simple Vars, check to see if
+ * subexpression as a whole matches any GROUP BY item. We need to do this
+ * at every recursion level so that we recognize GROUPed-BY expressions
+ * before reaching variables within them. But this only works at the outer
+ * query level, as noted above.
*/
if (context->have_non_var_grouping && context->sublevels_up == 0)
{
/*
* If we have an ungrouped Var of the original query level, we have a
- * failure. Vars below the original query level are not a problem,
- * and neither are Vars from above it. (If such Vars are ungrouped as
- * far as their own query level is concerned, that's someone else's
- * problem...)
+ * failure. Vars below the original query level are not a problem, and
+ * neither are Vars from above it. (If such Vars are ungrouped as far as
+ * their own query level is concerned, that's someone else's problem...)
*/
if (IsA(node, Var))
{
/* Found an ungrouped local variable; generate error message */
Assert(var->varno > 0 &&
- (int) var->varno <= list_length(context->pstate->p_rtable));
+ (int) var->varno <= list_length(context->pstate->p_rtable));
rte = rt_fetch(var->varno, context->pstate->p_rtable);
attname = get_rte_attribute_name(rte, var->varattno);
if (context->sublevels_up == 0)
transfn_nargs = get_func_nargs(transfn_oid);
/*
- * Build arg list to use in the transfn FuncExpr node. We really only
- * care that transfn can discover the actual argument types at runtime
- * using get_fn_expr_argtype(), so it's okay to use Param nodes that
- * don't correspond to any real Param.
+ * Build arg list to use in the transfn FuncExpr node. We really only care
+ * that transfn can discover the actual argument types at runtime using
+ * get_fn_expr_argtype(), so it's okay to use Param nodes that don't
+ * correspond to any real Param.
*/
arg0 = makeNode(Param);
arg0->paramkind = PARAM_EXEC;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.142 2005/06/05 00:38:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.143 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ListCell *fl;
/*
- * The grammar will have produced a list of RangeVars,
- * RangeSubselects, RangeFunctions, and/or JoinExprs. Transform each
- * one (possibly adding entries to the rtable), check for duplicate
- * refnames, and then add it to the joinlist and namespaces.
+ * The grammar will have produced a list of RangeVars, RangeSubselects,
+ * RangeFunctions, and/or JoinExprs. Transform each one (possibly adding
+ * entries to the rtable), check for duplicate refnames, and then add it
+ * to the joinlist and namespaces.
*/
foreach(fl, frmList)
{
heap_close(pstate->p_target_relation, NoLock);
/*
- * Open target rel and grab suitable lock (which we will hold till end
- * of transaction).
+ * Open target rel and grab suitable lock (which we will hold till end of
+ * transaction).
*
* analyze.c will eventually do the corresponding heap_close(), but *not*
* release the lock.
Assert(rte == rt_fetch(rtindex, pstate->p_rtable));
/*
- * Override addRangeTableEntry's default ACL_SELECT permissions check,
- * and instead mark target table as requiring exactly the specified
+ * Override addRangeTableEntry's default ACL_SELECT permissions check, and
+ * instead mark target table as requiring exactly the specified
* permissions.
*
- * If we find an explicit reference to the rel later during parse
- * analysis, scanRTEForColumn will add the ACL_SELECT bit back again.
- * That can't happen for INSERT but it is possible for UPDATE and
- * DELETE.
+ * If we find an explicit reference to the rel later during parse analysis,
+ * scanRTEForColumn will add the ACL_SELECT bit back again. That can't
+ * happen for INSERT but it is possible for UPDATE and DELETE.
*/
rte->requiredPerms = requiredPerms;
*rvars;
/*
- * We cheat a little bit here by building an untransformed operator
- * tree whose leaves are the already-transformed Vars. This is OK
- * because transformExpr() won't complain about already-transformed
- * subnodes.
+ * We cheat a little bit here by building an untransformed operator tree
+ * whose leaves are the already-transformed Vars. This is OK because
+ * transformExpr() won't complain about already-transformed subnodes.
*/
forboth(lvars, leftVars, rvars, rightVars)
{
}
/*
- * Since the references are already Vars, and are certainly from the
- * input relations, we don't have to go through the same pushups that
- * transformJoinOnClause() does. Just invoke transformExpr() to fix
- * up the operators, and we're done.
+ * Since the references are already Vars, and are certainly from the input
+ * relations, we don't have to go through the same pushups that
+ * transformJoinOnClause() does. Just invoke transformExpr() to fix up
+ * the operators, and we're done.
*/
result = transformExpr(pstate, result);
int varno;
/*
- * This is a tad tricky, for two reasons. First, the namespace that
- * the join expression should see is just the two subtrees of the JOIN
- * plus any outer references from upper pstate levels. So,
- * temporarily set this pstate's namespace accordingly. (We need not
- * check for refname conflicts, because transformFromClauseItem()
- * already did.) NOTE: this code is OK only because the ON clause
- * can't legally alter the namespace by causing implicit relation refs
- * to be added.
+ * This is a tad tricky, for two reasons. First, the namespace that the
+ * join expression should see is just the two subtrees of the JOIN plus
+ * any outer references from upper pstate levels. So, temporarily set
+ * this pstate's namespace accordingly. (We need not check for refname
+ * conflicts, because transformFromClauseItem() already did.) NOTE: this
+ * code is OK only because the ON clause can't legally alter the namespace
+ * by causing implicit relation refs to be added.
*/
save_relnamespace = pstate->p_relnamespace;
save_varnamespace = pstate->p_varnamespace;
/*
* Second, we need to check that the ON condition doesn't refer to any
- * rels outside the input subtrees of the JOIN. It could do that
- * despite our hack on the namespace if it uses fully-qualified names.
- * So, grovel through the transformed clause and make sure there are
- * no bogus references. (Outer references are OK, and are ignored
- * here.)
+ * rels outside the input subtrees of the JOIN. It could do that despite
+ * our hack on the namespace if it uses fully-qualified names. So, grovel
+ * through the transformed clause and make sure there are no bogus
+ * references. (Outer references are OK, and are ignored here.)
*/
clause_varnos = pull_varnos(result);
clause_varnos = bms_del_members(clause_varnos, containedRels);
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("JOIN/ON clause refers to \"%s\", which is not part of JOIN",
- rt_fetch(varno, pstate->p_rtable)->eref->aliasname)));
+ errmsg("JOIN/ON clause refers to \"%s\", which is not part of JOIN",
+ rt_fetch(varno, pstate->p_rtable)->eref->aliasname)));
}
bms_free(clause_varnos);
RangeTblEntry *rte;
/*
- * mark this entry to indicate it comes from the FROM clause. In SQL,
- * the target list can only refer to range variables specified in the
- * from clause but we follow the more powerful POSTQUEL semantics and
+ * mark this entry to indicate it comes from the FROM clause. In SQL, the
+ * target list can only refer to range variables specified in the from
+ * clause but we follow the more powerful POSTQUEL semantics and
* automatically generate the range variable if not specified. However
* there are times we need to know whether the entries are legitimate.
*/
RangeTblEntry *rte;
/*
- * We require user to supply an alias for a subselect, per SQL92. To
- * relax this, we'd have to be prepared to gin up a unique alias for
- * an unlabeled subselect.
+ * We require user to supply an alias for a subselect, per SQL92. To relax
+ * this, we'd have to be prepared to gin up a unique alias for an
+ * unlabeled subselect.
*/
if (r->alias == NULL)
ereport(ERROR,
parsetrees = parse_sub_analyze(r->subquery, pstate);
/*
- * Check that we got something reasonable. Most of these conditions
- * are probably impossible given restrictions of the grammar, but
- * check 'em anyway.
+ * Check that we got something reasonable. Most of these conditions are
+ * probably impossible given restrictions of the grammar, but check 'em
+ * anyway.
*/
if (list_length(parsetrees) != 1)
elog(ERROR, "unexpected parse analysis result for subquery in FROM");
errmsg("subquery in FROM may not have SELECT INTO")));
/*
- * The subquery cannot make use of any variables from FROM items
- * created earlier in the current query. Per SQL92, the scope of a
- * FROM item does not include other FROM items. Formerly we hacked
- * the namespace so that the other variables weren't even visible, but
- * it seems more useful to leave them visible and give a specific
- * error message.
+ * The subquery cannot make use of any variables from FROM items created
+ * earlier in the current query. Per SQL92, the scope of a FROM item does
+ * not include other FROM items. Formerly we hacked the namespace so that
+ * the other variables weren't even visible, but it seems more useful to
+ * leave them visible and give a specific error message.
*
* XXX this will need further work to support SQL99's LATERAL() feature,
* wherein such references would indeed be legal.
*
- * We can skip groveling through the subquery if there's not anything
- * visible in the current query. Also note that outer references are
- * OK.
+ * We can skip groveling through the subquery if there's not anything visible
+ * in the current query. Also note that outer references are OK.
*/
if (pstate->p_relnamespace || pstate->p_varnamespace)
{
/*
* Get function name for possible use as alias. We use the same
- * transformation rules as for a SELECT output expression. For a
- * FuncCall node, the result will be the function name, but it is
- * possible for the grammar to hand back other node types.
+ * transformation rules as for a SELECT output expression. For a FuncCall
+ * node, the result will be the function name, but it is possible for the
+ * grammar to hand back other node types.
*/
funcname = FigureColname(r->funccallnode);
/*
* The function parameters cannot make use of any variables from other
* FROM items. (Compare to transformRangeSubselect(); the coding is
- * different though because we didn't parse as a sub-select with its
- * own level of namespace.)
+ * different though because we didn't parse as a sub-select with its own
+ * level of namespace.)
*
* XXX this will need further work to support SQL99's LATERAL() feature,
* wherein such references would indeed be legal.
}
/*
- * Disallow aggregate functions in the expression. (No reason to
- * postpone this check until parseCheckAggregates.)
+ * Disallow aggregate functions in the expression. (No reason to postpone
+ * this check until parseCheckAggregates.)
*/
if (pstate->p_hasAggs)
{
}
/*
- * If a coldeflist is supplied, ensure it defines a legal set of names
- * (no duplicates) and datatypes (no pseudo-types, for instance).
+ * If a coldeflist is supplied, ensure it defines a legal set of names (no
+ * duplicates) and datatypes (no pseudo-types, for instance).
*/
if (r->coldeflist)
{
* (We could extract this from the function return node, but it saves cycles
* to pass it back separately.)
*
- * *top_rti: receives the rangetable index of top_rte. (Ditto.)
+ * *top_rti: receives the rangetable index of top_rte. (Ditto.)
*
* *relnamespace: receives a List of the RTEs exposed as relation names
* by this item.
/* Plain relation reference */
RangeTblRef *rtr;
RangeTblEntry *rte;
- int rtindex;
+ int rtindex;
rte = transformTableEntry(pstate, (RangeVar *) n);
/* assume new rte is at end */
/* sub-SELECT is like a plain relation */
RangeTblRef *rtr;
RangeTblEntry *rte;
- int rtindex;
+ int rtindex;
rte = transformRangeSubselect(pstate, (RangeSubselect *) n);
/* assume new rte is at end */
/* function is like a plain relation */
RangeTblRef *rtr;
RangeTblEntry *rte;
- int rtindex;
+ int rtindex;
rte = transformRangeFunction(pstate, (RangeFunction *) n);
/* assume new rte is at end */
&r_containedRels);
/*
- * Check for conflicting refnames in left and right subtrees. Must
- * do this because higher levels will assume I hand back a self-
+ * Check for conflicting refnames in left and right subtrees. Must do
+ * this because higher levels will assume I hand back a self-
* consistent namespace subtree.
*/
checkNameSpaceConflicts(pstate, l_relnamespace, r_relnamespace);
/*
* Natural join does not explicitly specify columns; must generate
- * columns to join. Need to run through the list of columns from
- * each table or join result and match up the column names. Use
- * the first table, and check every column in the second table for
- * a match. (We'll check that the matches were unique later on.)
- * The result of this step is a list of column names just like an
- * explicitly-written USING list.
+ * columns to join. Need to run through the list of columns from each
+ * table or join result and match up the column names. Use the first
+ * table, and check every column in the second table for a match.
+ * (We'll check that the matches were unique later on.) The result of
+ * this step is a list of column names just like an explicitly-written
+ * USING list.
*/
if (j->isNatural)
{
if (j->using)
{
/*
- * JOIN/USING (or NATURAL JOIN, as transformed above).
- * Transform the list into an explicit ON-condition, and
- * generate a list of merged result columns.
+ * JOIN/USING (or NATURAL JOIN, as transformed above). Transform
+ * the list into an explicit ON-condition, and generate a list of
+ * merged result columns.
*/
List *ucols = j->using;
List *l_usingvars = NIL;
*top_rti = j->rtindex;
/*
- * Prepare returned namespace list. If the JOIN has an alias
- * then it hides the contained RTEs as far as the relnamespace
- * goes; otherwise, put the contained RTEs and *not* the JOIN
- * into relnamespace.
+ * Prepare returned namespace list. If the JOIN has an alias then it
+ * hides the contained RTEs as far as the relnamespace goes;
+ * otherwise, put the contained RTEs and *not* the JOIN into
+ * relnamespace.
*/
if (j->alias)
{
}
/*
- * Insert coercion functions if needed. Note that a difference in
- * typmod can only happen if input has typmod but outcoltypmod is -1.
- * In that case we insert a RelabelType to clearly mark that result's
- * typmod is not same as input. We never need coerce_type_typmod.
+ * Insert coercion functions if needed. Note that a difference in typmod
+ * can only happen if input has typmod but outcoltypmod is -1. In that
+ * case we insert a RelabelType to clearly mark that result's typmod is
+ * not same as input. We never need coerce_type_typmod.
*/
if (l_colvar->vartype != outcoltype)
l_node = coerce_type(pstate, (Node *) l_colvar, l_colvar->vartype,
case JOIN_FULL:
{
/*
- * Here we must build a COALESCE expression to ensure that
- * the join output is non-null if either input is.
+ * Here we must build a COALESCE expression to ensure that the
+ * join output is non-null if either input is.
*/
CoalesceExpr *c = makeNode(CoalesceExpr);
qual = coerce_to_integer(pstate, qual, constructName);
/*
- * LIMIT can't refer to any vars or aggregates of the current query;
- * we don't allow subselects either (though that case would at least
- * be sensible)
+ * LIMIT can't refer to any vars or aggregates of the current query; we
+ * don't allow subselects either (though that case would at least be
+ * sensible)
*/
if (contain_vars_of_level(qual, 0))
{
{
/*
* In GROUP BY, we must prefer a match against a FROM-clause
- * column to one against the targetlist. Look to see if there
- * is a matching column. If so, fall through to let
- * transformExpr() do the rest. NOTE: if name could refer
- * ambiguously to more than one column name exposed by FROM,
- * colNameToVar will ereport(ERROR). That's just what we want
- * here.
+ * column to one against the targetlist. Look to see if there is
+ * a matching column. If so, fall through to let transformExpr()
+ * do the rest. NOTE: if name could refer ambiguously to more
+ * than one column name exposed by FROM, colNameToVar will
+ * ereport(ERROR). That's just what we want here.
*
- * Small tweak for 7.4.3: ignore matches in upper query levels.
- * This effectively changes the search order for bare names to
- * (1) local FROM variables, (2) local targetlist aliases, (3)
- * outer FROM variables, whereas before it was (1) (3) (2).
- * SQL92 and SQL99 do not allow GROUPing BY an outer
- * reference, so this breaks no cases that are legal per spec,
- * and it seems a more self-consistent behavior.
+ * Small tweak for 7.4.3: ignore matches in upper query levels. This
+ * effectively changes the search order for bare names to (1)
+ * local FROM variables, (2) local targetlist aliases, (3) outer
+ * FROM variables, whereas before it was (1) (3) (2). SQL92 and
+ * SQL99 do not allow GROUPing BY an outer reference, so this
+ * breaks no cases that are legal per spec, and it seems a more
+ * self-consistent behavior.
*/
if (colNameToVar(pstate, name, true) != NULL)
name = NULL;
}
/*
- * If no matches, construct a new target entry which is appended to
- * the end of the target list. This target is given resjunk = TRUE so
- * that it will not be projected into the final tuple.
+ * If no matches, construct a new target entry which is appended to the
+ * end of the target list. This target is given resjunk = TRUE so that it
+ * will not be projected into the final tuple.
*/
target_result = transformTargetEntry(pstate, node, expr, NULL, true);
/*
* If the GROUP BY clause matches the ORDER BY clause, we want to
- * adopt the ordering operators from the latter rather than using
- * the default ops. This allows "GROUP BY foo ORDER BY foo DESC"
- * to be done with only one sort step. Note we are assuming that
- * any user-supplied ordering operator will bring equal values
- * together, which is all that GROUP BY needs.
+ * adopt the ordering operators from the latter rather than using the
+ * default ops. This allows "GROUP BY foo ORDER BY foo DESC" to be
+ * done with only one sort step. Note we are assuming that any
+ * user-supplied ordering operator will bring equal values together,
+ * which is all that GROUP BY needs.
*/
if (sortItem &&
((SortClause *) lfirst(sortItem))->tleSortGroupRef ==
/* We had SELECT DISTINCT */
/*
- * All non-resjunk elements from target list that are not already
- * in the sort list should be added to it. (We don't really care
- * what order the DISTINCT fields are checked in, so we can leave
- * the user's ORDER BY spec alone, and just add additional sort
- * keys to it to ensure that all targetlist items get sorted.)
+ * All non-resjunk elements from target list that are not already in
+ * the sort list should be added to it. (We don't really care what
+ * order the DISTINCT fields are checked in, so we can leave the
+ * user's ORDER BY spec alone, and just add additional sort keys to it
+ * to ensure that all targetlist items get sorted.)
*/
*sortClause = addAllTargetsToSortList(pstate,
*sortClause,
/*
* Now, DISTINCT list consists of all non-resjunk sortlist items.
* Actually, all the sortlist items had better be non-resjunk!
- * Otherwise, user wrote SELECT DISTINCT with an ORDER BY item
- * that does not appear anywhere in the SELECT targetlist, and we
- * can't implement that with only one sorting pass...
+ * Otherwise, user wrote SELECT DISTINCT with an ORDER BY item that
+ * does not appear anywhere in the SELECT targetlist, and we can't
+ * implement that with only one sorting pass...
*/
foreach(slitem, *sortClause)
{
* If the user writes both DISTINCT ON and ORDER BY, then the two
* expression lists must match (until one or the other runs out).
* Otherwise the ORDER BY requires a different sort order than the
- * DISTINCT does, and we can't implement that with only one sort
- * pass (and if we do two passes, the results will be rather
+ * DISTINCT does, and we can't implement that with only one sort pass
+ * (and if we do two passes, the results will be rather
* unpredictable). However, it's OK to have more DISTINCT ON
- * expressions than ORDER BY expressions; we can just add the
- * extra DISTINCT values to the sort list, much as we did above
- * for ordinary DISTINCT fields.
+ * expressions than ORDER BY expressions; we can just add the extra
+ * DISTINCT values to the sort list, much as we did above for ordinary
+ * DISTINCT fields.
*
- * Actually, it'd be OK for the common prefixes of the two lists to
- * match in any order, but implementing that check seems like more
- * trouble than it's worth.
+ * Actually, it'd be OK for the common prefixes of the two lists to match
+ * in any order, but implementing that check seems like more trouble
+ * than it's worth.
*/
ListCell *nextsortlist = list_head(*sortClause);
else
{
*sortClause = addTargetToSortList(pstate, tle,
- *sortClause, *targetlist,
+ *sortClause, *targetlist,
SORTBY_ASC, NIL, true);
/*
- * Probably, the tle should always have been added at the
- * end of the sort list ... but search to be safe.
+ * Probably, the tle should always have been added at the end
+ * of the sort list ... but search to be safe.
*/
foreach(slitem, *sortClause)
{
Index maxRef;
ListCell *l;
- if (tle->ressortgroupref) /* already has one? */
+ if (tle->ressortgroupref) /* already has one? */
return tle->ressortgroupref;
/* easiest way to pick an unused refnumber: max used + 1 */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.131 2005/06/04 19:19:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.132 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ccontext, cformat);
/*
- * If the target is a fixed-length type, it may need a length coercion
- * as well as a type coercion. If we find ourselves adding both,
- * force the inner coercion node to implicit display form.
+ * If the target is a fixed-length type, it may need a length coercion as
+ * well as a type coercion. If we find ourselves adding both, force the
+ * inner coercion node to implicit display form.
*/
result = coerce_type_typmod(result,
targettype, targettypmod,
if (inputTypeId == UNKNOWNOID && IsA(node, Const))
{
/*
- * Input is a string constant with previously undetermined type.
- * Apply the target type's typinput function to it to produce a
- * constant of the target type.
+ * Input is a string constant with previously undetermined type. Apply
+ * the target type's typinput function to it to produce a constant of
+ * the target type.
*
* NOTE: this case cannot be folded together with the other
* constant-input case, since the typinput function does not
* float-to-int type conversion will round to integer.
*
* XXX if the typinput function is not immutable, we really ought to
- * postpone evaluation of the function call until runtime. But
- * there is no way to represent a typinput function call as an
- * expression tree, because C-string values are not Datums. (XXX
- * This *is* possible as of 7.3, do we want to do it?)
+ * postpone evaluation of the function call until runtime. But there
+ * is no way to represent a typinput function call as an expression
+ * tree, because C-string values are not Datums. (XXX This *is*
+ * possible as of 7.3, do we want to do it?)
*/
Const *con = (Const *) node;
Const *newcon = makeNode(Const);
/*
* We pass typmod -1 to the input routine, primarily because
- * existing input routines follow implicit-coercion semantics
- * for length checks, which is not always what we want here.
- * Any length constraint will be applied later by our caller.
+ * existing input routines follow implicit-coercion semantics for
+ * length checks, which is not always what we want here. Any
+ * length constraint will be applied later by our caller.
*
- * Note that we call stringTypeDatum using the domain's pg_type
- * row, if it's a domain. This works because the domain row
- * has the same typinput and typelem as the base type ---
- * ugly...
+ * Note that we call stringTypeDatum using the domain's pg_type row,
+ * if it's a domain. This works because the domain row has the
+ * same typinput and typelem as the base type --- ugly...
*/
newcon->constvalue = stringTypeDatum(targetType, val, -1);
}
pstate != NULL && pstate->p_variableparams)
{
/*
- * Input is a Param of previously undetermined type, and we want
- * to update our knowledge of the Param's type. Find the topmost
+ * Input is a Param of previously undetermined type, and we want to
+ * update our knowledge of the Param's type. Find the topmost
* ParseState and update the state.
*/
Param *param = (Param *) node;
/* Ooops */
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
- errmsg("inconsistent types deduced for parameter $%d",
- paramno),
+ errmsg("inconsistent types deduced for parameter $%d",
+ paramno),
errdetail("%s versus %s",
- format_type_be(toppstate->p_paramtypes[paramno - 1]),
+ format_type_be(toppstate->p_paramtypes[paramno - 1]),
format_type_be(targetTypeId))));
}
if (OidIsValid(funcId))
{
/*
- * Generate an expression tree representing run-time
- * application of the conversion function. If we are dealing
- * with a domain target type, the conversion function will
- * yield the base type, and we need to extract the correct
- * typmod to use from the domain's typtypmod.
+ * Generate an expression tree representing run-time application
+ * of the conversion function. If we are dealing with a domain
+ * target type, the conversion function will yield the base type,
+ * and we need to extract the correct typmod to use from the
+ * domain's typtypmod.
*/
Oid baseTypeId = getBaseType(targetTypeId);
int32 baseTypeMod;
result = build_coercion_expression(node, funcId,
baseTypeId, baseTypeMod,
cformat,
- (cformat != COERCE_IMPLICIT_CAST));
+ (cformat != COERCE_IMPLICIT_CAST));
/*
- * If domain, coerce to the domain type and relabel with
- * domain type ID. We can skip the internal length-coercion
- * step if the selected coercion function was a type-and-length
- * coercion.
+ * If domain, coerce to the domain type and relabel with domain
+ * type ID. We can skip the internal length-coercion step if the
+ * selected coercion function was a type-and-length coercion.
*/
if (targetTypeId != baseTypeId)
result = coerce_to_domain(result, baseTypeId, targetTypeId,
else
{
/*
- * We don't need to do a physical conversion, but we do need
- * to attach a RelabelType node so that the expression will be
- * seen to have the intended type when inspected by
- * higher-level code.
+ * We don't need to do a physical conversion, but we do need to
+ * attach a RelabelType node so that the expression will be seen
+ * to have the intended type when inspected by higher-level code.
*
* Also, domains may have value restrictions beyond the base type
* that must be accounted for. If the destination is a domain
if (result == node)
{
/*
- * XXX could we label result with exprTypmod(node) instead
- * of default -1 typmod, to save a possible
- * length-coercion later? Would work if both types have
- * same interpretation of typmod, which is likely but not
- * certain.
+ * XXX could we label result with exprTypmod(node) instead of
+ * default -1 typmod, to save a possible length-coercion
+ * later? Would work if both types have same interpretation of
+ * typmod, which is likely but not certain.
*/
result = (Node *) makeRelabelType((Expr *) result,
targetTypeId, -1,
{
/*
* Input class type is a subclass of target, so generate an
- * appropriate runtime conversion (removing unneeded columns
- * and possibly rearranging the ones that are wanted).
+ * appropriate runtime conversion (removing unneeded columns and
+ * possibly rearranging the ones that are wanted).
*/
ConvertRowtypeExpr *r = makeNode(ConvertRowtypeExpr);
}
/*
- * If input is an untyped string constant, assume we can convert
- * it to anything.
+ * If input is an untyped string constant, assume we can convert it to
+ * anything.
*/
if (inputTypeId == UNKNOWNOID)
continue;
/*
- * If pg_cast shows that we can coerce, accept. This test now
- * covers both binary-compatible and coercion-function cases.
+ * If pg_cast shows that we can coerce, accept. This test now covers
+ * both binary-compatible and coercion-function cases.
*/
if (find_coercion_pathway(targetTypeId, inputTypeId, ccontext,
&funcId))
continue;
/*
- * If input is RECORD and target is a composite type, assume we
- * can coerce (may need tighter checking here)
+ * If input is RECORD and target is a composite type, assume we can
+ * coerce (may need tighter checking here)
*/
if (inputTypeId == RECORDOID &&
ISCOMPLEX(targetTypeId))
hide_coercion_node(arg);
/*
- * If the domain applies a typmod to its base type, build the
- * appropriate coercion step. Mark it implicit for display purposes,
- * because we don't want it shown separately by ruleutils.c; but the
- * isExplicit flag passed to the conversion function depends on the
- * manner in which the domain coercion is invoked, so that the
- * semantics of implicit and explicit coercion differ. (Is that
- * really the behavior we want?)
+ * If the domain applies a typmod to its base type, build the appropriate
+ * coercion step. Mark it implicit for display purposes, because we don't
+ * want it shown separately by ruleutils.c; but the isExplicit flag passed
+ * to the conversion function depends on the manner in which the domain
+ * coercion is invoked, so that the semantics of implicit and explicit
+ * coercion differ. (Is that really the behavior we want?)
*
* NOTE: because we apply this as part of the fixed expression structure,
- * ALTER DOMAIN cannot alter the typtypmod. But it's unclear that
- * that would be safe to do anyway, without lots of knowledge about
- * what the base type thinks the typmod means.
+ * ALTER DOMAIN cannot alter the typtypmod. But it's unclear that that
+ * would be safe to do anyway, without lots of knowledge about what the
+ * base type thinks the typmod means.
*/
if (!lengthCoercionDone)
{
- int32 typmod = get_typtypmod(typeId);
+ int32 typmod = get_typtypmod(typeId);
if (typmod >= 0)
arg = coerce_type_typmod(arg, baseTypeId, typmod,
}
/*
- * Now build the domain coercion node. This represents run-time
- * checking of any constraints currently attached to the domain. This
- * also ensures that the expression is properly labeled as to result
- * type.
+ * Now build the domain coercion node. This represents run-time checking
+ * of any constraints currently attached to the domain. This also ensures
+ * that the expression is properly labeled as to result type.
*/
result = makeNode(CoerceToDomain);
result->arg = (Expr *) arg;
Oid funcId;
/*
- * A negative typmod is assumed to mean that no coercion is wanted.
- * Also, skip coercion if already done.
+ * A negative typmod is assumed to mean that no coercion is wanted. Also,
+ * skip coercion if already done.
*/
if (targetTypMod < 0 || targetTypMod == exprTypmod(node))
return node;
procstruct = (Form_pg_proc) GETSTRUCT(tp);
/*
- * Asserts essentially check that function is a legal coercion
- * function. We can't make the seemingly obvious tests on prorettype
- * and proargtypes[0], because of various binary-compatibility cases.
+ * Asserts essentially check that function is a legal coercion function.
+ * We can't make the seemingly obvious tests on prorettype and
+ * proargtypes[0], because of various binary-compatibility cases.
*/
/* Assert(targetTypeId == procstruct->prorettype); */
Assert(!procstruct->proretset);
if (node && IsA(node, RowExpr))
{
/*
- * Since the RowExpr must be of type RECORD, we needn't worry
- * about it containing any dropped columns.
+ * Since the RowExpr must be of type RECORD, we needn't worry about it
+ * containing any dropped columns.
*/
args = ((RowExpr *) node)->args;
}
if (tupdesc->attrs[i]->attisdropped)
{
/*
- * can't use atttypid here, but it doesn't really matter what
- * type the Const claims to be.
+ * can't use atttypid here, but it doesn't really matter what type
+ * the Const claims to be.
*/
newargs = lappend(newargs, makeNullConst(INT4OID));
continue;
format_type_be(targetTypeId)),
errdetail("Cannot cast type %s to %s in column %d.",
format_type_be(exprtype),
- format_type_be(tupdesc->attrs[i]->atttypid),
+ format_type_be(tupdesc->attrs[i]->atttypid),
ucolno)));
newargs = lappend(newargs, expr);
ucolno++;
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/* translator: first %s is name of a SQL construct, eg WHERE */
- errmsg("argument of %s must be type boolean, not type %s",
- constructName, format_type_be(inputTypeId))));
+ errmsg("argument of %s must be type boolean, not type %s",
+ constructName, format_type_be(inputTypeId))));
}
if (expression_returns_set(node))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/* translator: first %s is name of a SQL construct, eg LIMIT */
- errmsg("argument of %s must be type integer, not type %s",
- constructName, format_type_be(inputTypeId))));
+ errmsg("argument of %s must be type integer, not type %s",
+ constructName, format_type_be(inputTypeId))));
}
if (expression_returns_set(node))
else if (TypeCategory(ntype) != pcategory)
{
/*
- * both types in different categories? then not much
- * hope...
+ * both types in different categories? then not much hope...
*/
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/*
- * translator: first %s is name of a SQL construct, eg
- * CASE
+ * translator: first %s is name of a SQL construct, eg CASE
*/
errmsg("%s types %s and %s cannot be matched",
context,
format_type_be(ntype))));
}
else if (!IsPreferredType(pcategory, ptype) &&
- can_coerce_type(1, &ptype, &ntype, COERCION_IMPLICIT) &&
- !can_coerce_type(1, &ntype, &ptype, COERCION_IMPLICIT))
+ can_coerce_type(1, &ptype, &ntype, COERCION_IMPLICIT) &&
+ !can_coerce_type(1, &ntype, &ptype, COERCION_IMPLICIT))
{
/*
- * take new type if can coerce to it implicitly but not
- * the other way; but if we have a preferred type, stay on
- * it.
+ * take new type if can coerce to it implicitly but not the
+ * other way; but if we have a preferred type, stay on it.
*/
ptype = ntype;
pcategory = TypeCategory(ptype);
}
/*
- * If all the inputs were UNKNOWN type --- ie, unknown-type literals
- * --- then resolve as type TEXT. This situation comes up with
- * constructs like SELECT (CASE WHEN foo THEN 'bar' ELSE 'baz' END);
- * SELECT 'foo' UNION SELECT 'bar'; It might seem desirable to leave
- * the construct's output type as UNKNOWN, but that really doesn't
- * work, because we'd probably end up needing a runtime coercion from
- * UNKNOWN to something else, and we usually won't have it. We need
- * to coerce the unknown literals while they are still literals, so a
- * decision has to be made now.
+ * If all the inputs were UNKNOWN type --- ie, unknown-type literals ---
+ * then resolve as type TEXT. This situation comes up with constructs
+ * like SELECT (CASE WHEN foo THEN 'bar' ELSE 'baz' END); SELECT 'foo'
+ * UNION SELECT 'bar'; It might seem desirable to leave the construct's
+ * output type as UNKNOWN, but that really doesn't work, because we'd
+ * probably end up needing a runtime coercion from UNKNOWN to something
+ * else, and we usually won't have it. We need to coerce the unknown
+ * literals while they are still literals, so a decision has to be made
+ * now.
*/
if (ptype == UNKNOWNOID)
ptype = TEXTOID;
bool have_anyelement = false;
/*
- * Loop through the arguments to see if we have any that are ANYARRAY
- * or ANYELEMENT. If so, require the actual types to be
- * self-consistent
+ * Loop through the arguments to see if we have any that are ANYARRAY or
+ * ANYELEMENT. If so, require the actual types to be self-consistent
*/
for (j = 0; j < nargs; j++)
{
if (!OidIsValid(elem_typeid))
{
/*
- * if we don't have an element type yet, use the one we just
- * got
+ * if we don't have an element type yet, use the one we just got
*/
elem_typeid = array_typelem;
}
bool have_anyelement = (rettype == ANYELEMENTOID);
/*
- * Loop through the arguments to see if we have any that are ANYARRAY
- * or ANYELEMENT. If so, require the actual types to be
- * self-consistent
+ * Loop through the arguments to see if we have any that are ANYARRAY or
+ * ANYELEMENT. If so, require the actual types to be self-consistent
*/
for (j = 0; j < nargs; j++)
{
if (OidIsValid(elem_typeid) && actual_type != elem_typeid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("arguments declared \"anyelement\" are not all alike"),
+ errmsg("arguments declared \"anyelement\" are not all alike"),
errdetail("%s versus %s",
format_type_be(elem_typeid),
format_type_be(actual_type))));
if (OidIsValid(array_typeid) && actual_type != array_typeid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("arguments declared \"anyarray\" are not all alike"),
+ errmsg("arguments declared \"anyarray\" are not all alike"),
errdetail("%s versus %s",
format_type_be(array_typeid),
format_type_be(actual_type))));
}
/*
- * Fast Track: if none of the arguments are ANYARRAY or ANYELEMENT,
- * return the unmodified rettype.
+ * Fast Track: if none of the arguments are ANYARRAY or ANYELEMENT, return
+ * the unmodified rettype.
*/
if (!have_generics)
return rettype;
if (!OidIsValid(elem_typeid))
{
/*
- * if we don't have an element type yet, use the one we just
- * got
+ * if we don't have an element type yet, use the one we just got
*/
elem_typeid = array_typelem;
}
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for data type %s",
- format_type_be(elem_typeid))));
+ errmsg("could not find array type for data type %s",
+ format_type_be(elem_typeid))));
}
declared_arg_types[j] = array_typeid;
}
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for data type %s",
- format_type_be(elem_typeid))));
+ errmsg("could not find array type for data type %s",
+ format_type_be(elem_typeid))));
}
return array_typeid;
}
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for data type %s",
- format_type_be(context_actual_type))));
+ errmsg("could not find array type for data type %s",
+ format_type_be(context_actual_type))));
return array_typeid;
}
}
return false;
/*
- * This switch should agree with TypeCategory(), above. Note that at
- * this point, category certainly matches the type.
+ * This switch should agree with TypeCategory(), above. Note that at this
+ * point, category certainly matches the type.
*/
switch (category)
{
else
{
/*
- * If there's no pg_cast entry, perhaps we are dealing with a pair
- * of array types. If so, and if the element types have a
- * suitable cast, use array_type_coerce() or
- * array_type_length_coerce().
+ * If there's no pg_cast entry, perhaps we are dealing with a pair of
+ * array types. If so, and if the element types have a suitable cast,
+ * use array_type_coerce() or array_type_length_coerce().
*
- * Hack: disallow coercions to oidvector and int2vector, which
- * otherwise tend to capture coercions that should go to "real" array
- * types. We want those types to be considered "real" arrays for many
- * purposes, but not this one. (Also, array_type_coerce isn't
- * guaranteed to produce an output that meets the restrictions of
- * these datatypes, such as being 1-dimensional.)
+ * Hack: disallow coercions to oidvector and int2vector, which otherwise
+ * tend to capture coercions that should go to "real" array types. We
+ * want those types to be considered "real" arrays for many purposes,
+ * but not this one. (Also, array_type_coerce isn't guaranteed to
+ * produce an output that meets the restrictions of these datatypes,
+ * such as being 1-dimensional.)
*/
Oid targetElemType;
Oid sourceElemType;
return false;
if ((targetElemType = get_element_type(targetTypeId)) != InvalidOid &&
- (sourceElemType = get_element_type(sourceTypeId)) != InvalidOid)
+ (sourceElemType = get_element_type(sourceTypeId)) != InvalidOid)
{
if (find_coercion_pathway(targetElemType, sourceElemType,
ccontext, &elemfuncid))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.184 2005/06/26 22:05:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.185 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* We have to split any field-selection operations apart from
- * subscripting. Adjacent A_Indices nodes have to be treated as a
- * single multidimensional subscript operation.
+ * subscripting. Adjacent A_Indices nodes have to be treated as a single
+ * multidimensional subscript operation.
*/
foreach(i, indirection)
{
if (subscripts)
result = (Node *) transformArraySubscripts(pstate,
result,
- exprType(result),
+ exprType(result),
InvalidOid,
-1,
subscripts,
/*
* Not known as a column of any range-table entry.
*
- * Consider the possibility that it's VALUE in a domain
- * check expression. (We handle VALUE as a name, not
- * a keyword, to avoid breaking a lot of applications
- * that have used VALUE as a column name in the past.)
+ * Consider the possibility that it's VALUE in a domain check
+ * expression. (We handle VALUE as a name, not a keyword,
+ * to avoid breaking a lot of applications that have used
+ * VALUE as a column name in the past.)
*/
if (pstate->p_value_substitute != NULL &&
strcmp(name, "value") == 0)
/*
* Try to find the name as a relation. Note that only
- * relations already entered into the rangetable will
- * be recognized.
+ * relations already entered into the rangetable will be
+ * recognized.
*
* This is a hack for backwards compatibility with
- * PostQUEL-inspired syntax. The preferred form now
- * is "rel.*".
+ * PostQUEL-inspired syntax. The preferred form now is
+ * "rel.*".
*/
if (refnameRangeTblEntry(pstate, NULL, name,
&levels_up) != NULL)
if (node == NULL)
{
/*
- * Not known as a column of any range-table entry, so
- * try it as a function call. Here, we will create an
+ * Not known as a column of any range-table entry, so try
+ * it as a function call. Here, we will create an
* implicit RTE for tables not already entered.
*/
node = transformWholeRowRef(pstate, NULL, name1);
node = ParseFuncOrColumn(pstate,
- list_make1(makeString(name2)),
+ list_make1(makeString(name2)),
list_make1(node),
false, false, true);
}
/* Try it as a function call */
node = transformWholeRowRef(pstate, name1, name2);
node = ParseFuncOrColumn(pstate,
- list_make1(makeString(name3)),
+ list_make1(makeString(name3)),
list_make1(node),
false, false, true);
}
/* Try it as a function call */
node = transformWholeRowRef(pstate, name2, name3);
node = ParseFuncOrColumn(pstate,
- list_make1(makeString(name4)),
+ list_make1(makeString(name4)),
list_make1(node),
false, false, true);
}
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper qualified name (too many dotted names): %s",
- NameListToString(cref->fields))));
+ errmsg("improper qualified name (too many dotted names): %s",
+ NameListToString(cref->fields))));
node = NULL; /* keep compiler quiet */
break;
}
toppstate = toppstate->parentParseState;
/* Check parameter number is in range */
- if (paramno <= 0) /* probably can't happen? */
+ if (paramno <= 0) /* probably can't happen? */
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PARAMETER),
errmsg("there is no parameter $%d", paramno)));
Node *result;
/*
- * Special-case "foo = NULL" and "NULL = foo" for compatibility
- * with standards-broken products (like Microsoft's). Turn these
- * into IS NULL exprs.
+ * Special-case "foo = NULL" and "NULL = foo" for compatibility with
+ * standards-broken products (like Microsoft's). Turn these into IS NULL
+ * exprs.
*/
if (Transform_null_equals &&
list_length(a->name) == 1 &&
((SubLink *) rexpr)->subLinkType == EXPR_SUBLINK)
{
/*
- * Convert "row op subselect" into a MULTIEXPR sublink.
- * Formerly the grammar did this, but now that a row construct
- * is allowed anywhere in expressions, it's easier to do it
- * here.
+ * Convert "row op subselect" into a MULTIEXPR sublink. Formerly the
+ * grammar did this, but now that a row construct is allowed anywhere
+ * in expressions, it's easier to do it here.
*/
SubLink *s = (SubLink *) rexpr;
transformAExprOf(ParseState *pstate, A_Expr *a)
{
/*
- * Checking an expression for match to type. Will result in a
- * boolean constant node.
+ * Checking an expression for match to type. Will result in a boolean
+ * constant node.
*/
ListCell *telem;
A_Const *n;
}
/*
- * Expect two forms: equals or not equals. Flip the sense of the
- * result for not equals.
+ * Expect two forms: equals or not equals. Flip the sense of the result
+ * for not equals.
*/
if (strcmp(strVal(linitial(a->name)), "!=") == 0)
matched = (!matched);
ListCell *args;
/*
- * Transform the list of arguments. We use a shallow list copy
- * and then transform-in-place to avoid O(N^2) behavior from
- * repeated lappend's.
+ * Transform the list of arguments. We use a shallow list copy and then
+ * transform-in-place to avoid O(N^2) behavior from repeated lappend's.
*
- * XXX: repeated lappend() would no longer result in O(n^2)
- * behavior; worth reconsidering this design?
+ * XXX: repeated lappend() would no longer result in O(n^2) behavior; worth
+ * reconsidering this design?
*/
targs = list_copy(fn->args);
foreach(args, targs)
if (arg)
{
/*
- * If test expression is an untyped literal, force it to text.
- * We have to do something now because we won't be able to do
- * this coercion on the placeholder. This is not as flexible
- * as what was done in 7.4 and before, but it's good enough to
- * handle the sort of silly coding commonly seen.
+ * If test expression is an untyped literal, force it to text. We have
+ * to do something now because we won't be able to do this coercion on
+ * the placeholder. This is not as flexible as what was done in 7.4
+ * and before, but it's good enough to handle the sort of silly coding
+ * commonly seen.
*/
if (exprType(arg) == UNKNOWNOID)
arg = coerce_to_common_type(pstate, arg, TEXTOID, "CASE");
/*
* Note: default result is considered the most significant type in
- * determining preferred type. This is how the code worked before,
- * but it seems a little bogus to me
- * --- tgl
+ * determining preferred type. This is how the code worked before, but it
+ * seems a little bogus to me --- tgl
*/
typeids = lcons_oid(exprType((Node *) newc->defresult), typeids);
if (sublink->subLinkType == EXISTS_SUBLINK)
{
/*
- * EXISTS needs no lefthand or combining operator. These
- * fields should be NIL already, but make sure.
+ * EXISTS needs no lefthand or combining operator. These fields
+ * should be NIL already, but make sure.
*/
sublink->lefthand = NIL;
sublink->operName = NIL;
ListCell *tlist_item = list_head(qtree->targetList);
/*
- * Make sure the subselect delivers a single column (ignoring
- * resjunk targets).
+ * Make sure the subselect delivers a single column (ignoring resjunk
+ * targets).
*/
if (tlist_item == NULL ||
((TargetEntry *) lfirst(tlist_item))->resjunk)
}
/*
- * EXPR and ARRAY need no lefthand or combining
- * operator. These fields should be NIL already, but make
- * sure.
+ * EXPR and ARRAY need no lefthand or combining operator. These fields
+ * should be NIL already, but make sure.
*/
sublink->lefthand = NIL;
sublink->operName = NIL;
lfirst(l) = transformExpr(pstate, lfirst(l));
/*
- * If the expression is "<> ALL" (with unqualified opname)
- * then convert it to "NOT IN". This is a hack to improve
- * efficiency of expressions output by pre-7.4 Postgres.
+ * If the expression is "<> ALL" (with unqualified opname) then
+ * convert it to "NOT IN". This is a hack to improve efficiency of
+ * expressions output by pre-7.4 Postgres.
*/
if (sublink->subLinkType == ALL_SUBLINK &&
list_length(op) == 1 && strcmp(opname, "<>") == 0)
/*
* To build the list of combining operator OIDs, we must scan
- * subquery's targetlist to find values that will be matched
- * against lefthand values. We need to ignore resjunk
- * targets, so doing the outer iteration over right_list is
- * easier than doing it over left_list.
+ * subquery's targetlist to find values that will be matched against
+ * lefthand values. We need to ignore resjunk targets, so doing the
+ * outer iteration over right_list is easier than doing it over
+ * left_list.
*/
sublink->operOids = NIL;
ll_item = lnext(ll_item);
/*
- * It's OK to use oper() not compatible_oper() here,
- * because make_subplan() will insert type coercion calls
- * if needed.
+ * It's OK to use oper() not compatible_oper() here, because
+ * make_subplan() will insert type coercion calls if needed.
*/
optup = oper(op,
exprType(lexpr),
if (opform->oprresult != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator %s must return type boolean, not type %s",
- opname,
- format_type_be(opform->oprresult)),
+ errmsg("operator %s must return type boolean, not type %s",
+ opname,
+ format_type_be(opform->oprresult)),
errhint("The operator of a quantified predicate subquery must return type boolean.")));
if (get_func_retset(opform->oprcode))
default:
elog(ERROR, "unrecognized booltesttype: %d",
(int) b->booltesttype);
- clausename = NULL; /* keep compiler quiet */
+ clausename = NULL; /* keep compiler quiet */
}
b->arg = (Expr *) transformExpr(pstate, (Node *) b->arg);
default:
/*
- * RTE is a join or subselect. We represent this as a
- * whole-row Var of RECORD type. (Note that in most cases the
- * Var will be expanded to a RowExpr during planning, but that
- * is not our concern here.)
+ * RTE is a join or subselect. We represent this as a whole-row
+ * Var of RECORD type. (Note that in most cases the Var will be
+ * expanded to a RowExpr during planning, but that is not our
+ * concern here.)
*/
result = (Node *) makeVar(vnum,
InvalidAttrNumber,
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("could not find array type for data type %s",
- format_type_be(exprType((Node *) tent->expr)))));
+ format_type_be(exprType((Node *) tent->expr)))));
}
}
else
case T_SubPlan:
{
/*
- * Although the parser does not ever deal with
- * already-planned expression trees, we support SubPlan
- * nodes in this routine for the convenience of
- * ruleutils.c.
+ * Although the parser does not ever deal with already-planned
+ * expression trees, we support SubPlan nodes in this routine
+ * for the convenience of ruleutils.c.
*/
SubPlan *subplan = (SubPlan *) expr;
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("could not find array type for data type %s",
- format_type_be(exprType((Node *) tent->expr)))));
+ format_type_be(exprType((Node *) tent->expr)))));
}
}
else
case BPCHAROID:
if (!con->constisnull)
{
- int32 len = VARSIZE(DatumGetPointer(con->constvalue)) - VARHDRSZ;
+ int32 len = VARSIZE(DatumGetPointer(con->constvalue)) - VARHDRSZ;
/* if multi-byte, take len and find # characters */
if (pg_database_encoding_max_length() > 1)
case T_CaseExpr:
{
/*
- * If all the alternatives agree on type/typmod, return
- * that typmod, else use -1
+ * If all the alternatives agree on type/typmod, return that
+ * typmod, else use -1
*/
CaseExpr *cexpr = (CaseExpr *) expr;
Oid casetype = cexpr->casetype;
case T_CoalesceExpr:
{
/*
- * If all the alternatives agree on type/typmod, return
- * that typmod, else use -1
+ * If all the alternatives agree on type/typmod, return that
+ * typmod, else use -1
*/
CoalesceExpr *cexpr = (CoalesceExpr *) expr;
Oid coalescetype = cexpr->coalescetype;
case T_MinMaxExpr:
{
/*
- * If all the alternatives agree on type/typmod, return
- * that typmod, else use -1
+ * If all the alternatives agree on type/typmod, return that
+ * typmod, else use -1
*/
MinMaxExpr *mexpr = (MinMaxExpr *) expr;
Oid minmaxtype = mexpr->minmaxtype;
return false;
/*
- * If it's not a two-argument or three-argument function with the
- * second argument being an int4 constant, it can't have been created
- * from a length coercion (it must be a type coercion, instead).
+ * If it's not a two-argument or three-argument function with the second
+ * argument being an int4 constant, it can't have been created from a
+ * length coercion (it must be a type coercion, instead).
*/
nargs = list_length(func->args);
if (nargs < 2 || nargs > 3)
errmsg("unequal number of entries in row expression")));
/*
- * XXX it's really wrong to generate a simple AND combination for < <=
- * > >=. We probably need to invent a new runtime node type to handle
- * those correctly. For the moment, though, keep on doing this ...
+ * XXX it's really wrong to generate a simple AND combination for < <= >
+ * >=. We probably need to invent a new runtime node type to handle those
+ * correctly. For the moment, though, keep on doing this ...
*/
oprname = strVal(llast(opname));
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("operator %s is not supported for row expressions",
- oprname)));
+ errmsg("operator %s is not supported for row expressions",
+ oprname)));
boolop = 0; /* keep compiler quiet */
}
if (((OpExpr *) result)->opresulttype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM requires = operator to yield boolean")));
+ errmsg("IS DISTINCT FROM requires = operator to yield boolean")));
/*
* We rely on DistinctExpr and OpExpr being same struct
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.181 2005/06/22 15:19:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.182 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
FuncDetailCode fdresult;
/*
- * Most of the rest of the parser just assumes that functions do not
- * have more than FUNC_MAX_ARGS parameters. We have to test here to
- * protect against array overruns, etc. Of course, this may not be a
- * function, but the test doesn't hurt.
+ * Most of the rest of the parser just assumes that functions do not have
+ * more than FUNC_MAX_ARGS parameters. We have to test here to protect
+ * against array overruns, etc. Of course, this may not be a function,
+ * but the test doesn't hurt.
*/
if (list_length(fargs) > FUNC_MAX_ARGS)
ereport(ERROR,
/*
* Extract arg type info in preparation for function lookup.
*
- * If any arguments are Param markers of type VOID, we discard them
- * from the parameter list. This is a hack to allow the JDBC driver
- * to not have to distinguish "input" and "output" parameter symbols
- * while parsing function-call constructs. We can't use foreach()
- * because we may modify the list ...
+ * If any arguments are Param markers of type VOID, we discard them from the
+ * parameter list. This is a hack to allow the JDBC driver to not have to
+ * distinguish "input" and "output" parameter symbols while parsing
+ * function-call constructs. We can't use foreach() because we may modify
+ * the list ...
*/
nargs = 0;
for (l = list_head(fargs); l != NULL; l = nextl)
nextl = lnext(l);
- if (argtype == VOIDOID && IsA(arg, Param) && !is_column)
+ if (argtype == VOIDOID && IsA(arg, Param) &&!is_column)
{
fargs = list_delete_ptr(fargs, arg);
continue;
/*
* Check for column projection: if function has one argument, and that
- * argument is of complex type, and function name is not qualified,
- * then the "function call" could be a projection. We also check that
- * there wasn't any aggregate decoration.
+ * argument is of complex type, and function name is not qualified, then
+ * the "function call" could be a projection. We also check that there
+ * wasn't any aggregate decoration.
*/
if (nargs == 1 && !agg_star && !agg_distinct && list_length(funcname) == 1)
{
return retval;
/*
- * If ParseComplexProjection doesn't recognize it as a
- * projection, just press on.
+ * If ParseComplexProjection doesn't recognize it as a projection,
+ * just press on.
*/
}
}
* func_get_detail looks up the function in the catalogs, does
* disambiguation for polymorphic functions, handles inheritance, and
* returns the funcid and type and set or singleton status of the
- * function's return value. it also returns the true argument types
- * to the function.
+ * function's return value. it also returns the true argument types to
+ * the function.
*/
fdresult = func_get_detail(funcname, fargs, nargs, actual_arg_types,
&funcid, &rettype, &retset,
if (fdresult == FUNCDETAIL_COERCION)
{
/*
- * We can do it as a trivial coercion. coerce_type can handle
- * these cases, so why duplicate code...
+ * We can do it as a trivial coercion. coerce_type can handle these
+ * cases, so why duplicate code...
*/
return coerce_type(pstate, linitial(fargs),
actual_arg_types[0], rettype, -1,
else if (fdresult == FUNCDETAIL_NORMAL)
{
/*
- * Normal function found; was there anything indicating it must be
- * an aggregate?
+ * Normal function found; was there anything indicating it must be an
+ * aggregate?
*/
if (agg_star)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("%s(*) specified, but %s is not an aggregate function",
- NameListToString(funcname),
- NameListToString(funcname))));
+ errmsg("%s(*) specified, but %s is not an aggregate function",
+ NameListToString(funcname),
+ NameListToString(funcname))));
if (agg_distinct)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("DISTINCT specified, but %s is not an aggregate function",
- NameListToString(funcname))));
+ errmsg("DISTINCT specified, but %s is not an aggregate function",
+ NameListToString(funcname))));
}
else if (fdresult != FUNCDETAIL_AGGREGATE)
{
/*
* Oops. Time to die.
*
- * If we are dealing with the attribute notation rel.function, give
- * an error message that is appropriate for that case.
+ * If we are dealing with the attribute notation rel.function, give an
+ * error message that is appropriate for that case.
*/
if (is_column)
{
errmsg("function %s is not unique",
func_signature_string(funcname, nargs,
actual_arg_types)),
- errhint("Could not choose a best candidate function. "
- "You may need to add explicit type casts.")));
+ errhint("Could not choose a best candidate function. "
+ "You may need to add explicit type casts.")));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function %s does not exist",
func_signature_string(funcname, nargs,
actual_arg_types)),
- errhint("No function matches the given name and argument types. "
- "You may need to add explicit type casts.")));
+ errhint("No function matches the given name and argument types. "
+ "You may need to add explicit type casts.")));
}
/*
- * enforce consistency with ANYARRAY and ANYELEMENT argument and
- * return types, possibly adjusting return type or declared_arg_types
- * (which will be used as the cast destination by make_fn_arguments)
+ * enforce consistency with ANYARRAY and ANYELEMENT argument and return
+ * types, possibly adjusting return type or declared_arg_types (which will
+ * be used as the cast destination by make_fn_arguments)
*/
rettype = enforce_generic_type_consistency(actual_arg_types,
declared_arg_types,
FUNC_MAX_ARGS)));
/*
- * If any input types are domains, reduce them to their base types.
- * This ensures that we will consider functions on the base type to be
- * "exact matches" in the exact-match heuristic; it also makes it
- * possible to do something useful with the type-category heuristics.
- * Note that this makes it difficult, but not impossible, to use
- * functions declared to take a domain as an input datatype. Such a
- * function will be selected over the base-type function only if it is
- * an exact match at all argument positions, and so was already chosen
- * by our caller.
+ * If any input types are domains, reduce them to their base types. This
+ * ensures that we will consider functions on the base type to be "exact
+ * matches" in the exact-match heuristic; it also makes it possible to do
+ * something useful with the type-category heuristics. Note that this
+ * makes it difficult, but not impossible, to use functions declared to
+ * take a domain as an input datatype. Such a function will be selected
+ * over the base-type function only if it is an exact match at all
+ * argument positions, and so was already chosen by our caller.
*/
for (i = 0; i < nargs; i++)
input_base_typeids[i] = getBaseType(input_typeids[i]);
return candidates;
/*
- * Still too many candidates? Now look for candidates which have
- * either exact matches or preferred types at the args that will
- * require coercion. (Restriction added in 7.4: preferred type must be
- * of same category as input type; give no preference to
- * cross-category conversions to preferred types.) Keep all
- * candidates if none match.
+ * Still too many candidates? Now look for candidates which have either
+ * exact matches or preferred types at the args that will require
+ * coercion. (Restriction added in 7.4: preferred type must be of same
+ * category as input type; give no preference to cross-category
+ * conversions to preferred types.) Keep all candidates if none match.
*/
for (i = 0; i < nargs; i++) /* avoid multiple lookups */
slot_category[i] = TypeCategory(input_base_typeids[i]);
return candidates;
/*
- * Still too many candidates? Try assigning types for the unknown
- * columns.
+ * Still too many candidates? Try assigning types for the unknown columns.
*
- * NOTE: for a binary operator with one unknown and one non-unknown
- * input, we already tried the heuristic of looking for a candidate
- * with the known input type on both sides (see binary_oper_exact()).
- * That's essentially a special case of the general algorithm we try
- * next.
+ * NOTE: for a binary operator with one unknown and one non-unknown input, we
+ * already tried the heuristic of looking for a candidate with the known
+ * input type on both sides (see binary_oper_exact()). That's essentially
+ * a special case of the general algorithm we try next.
*
- * We do this by examining each unknown argument position to see if we
- * can determine a "type category" for it. If any candidate has an
- * input datatype of STRING category, use STRING category (this bias
- * towards STRING is appropriate since unknown-type literals look like
- * strings). Otherwise, if all the candidates agree on the type
- * category of this argument position, use that category. Otherwise,
- * fail because we cannot determine a category.
+ * We do this by examining each unknown argument position to see if we can
+ * determine a "type category" for it. If any candidate has an input
+ * datatype of STRING category, use STRING category (this bias towards
+ * STRING is appropriate since unknown-type literals look like strings).
+ * Otherwise, if all the candidates agree on the type category of this
+ * argument position, use that category. Otherwise, fail because we
+ * cannot determine a category.
*
- * If we are able to determine a type category, also notice whether any
- * of the candidates takes a preferred datatype within the category.
+ * If we are able to determine a type category, also notice whether any of
+ * the candidates takes a preferred datatype within the category.
*
- * Having completed this examination, remove candidates that accept the
- * wrong category at any unknown position. Also, if at least one
- * candidate accepted a preferred type at a position, remove
- * candidates that accept non-preferred types.
+ * Having completed this examination, remove candidates that accept the wrong
+ * category at any unknown position. Also, if at least one candidate
+ * accepted a preferred type at a position, remove candidates that accept
+ * non-preferred types.
*
* If we are down to one candidate at the end, we win.
*/
else
{
/*
- * Remember conflict, but keep going (might find
- * STRING)
+ * Remember conflict, but keep going (might find STRING)
*/
have_conflict = true;
}
raw_candidates = FuncnameGetCandidates(funcname, nargs);
/*
- * Quickly check if there is an exact match to the input datatypes
- * (there can be only one)
+ * Quickly check if there is an exact match to the input datatypes (there
+ * can be only one)
*/
for (best_candidate = raw_candidates;
best_candidate != NULL;
/*
* If we didn't find an exact match, next consider the possibility
* that this is really a type-coercion request: a single-argument
- * function call where the function name is a type name. If so,
- * and if we can do the coercion trivially (no run-time function
- * call needed), then go ahead and treat the "function call" as a
- * coercion. This interpretation needs to be given higher
- * priority than interpretations involving a type coercion
- * followed by a function call, otherwise we can produce
- * surprising results. For example, we want "text(varchar)" to be
- * interpreted as a trivial coercion, not as "text(name(varchar))"
- * which the code below this point is entirely capable of
- * selecting.
+ * function call where the function name is a type name. If so, and
+ * if we can do the coercion trivially (no run-time function call
+ * needed), then go ahead and treat the "function call" as a coercion.
+ * This interpretation needs to be given higher priority than
+ * interpretations involving a type coercion followed by a function
+ * call, otherwise we can produce surprising results. For example, we
+ * want "text(varchar)" to be interpreted as a trivial coercion, not
+ * as "text(name(varchar))" which the code below this point is
+ * entirely capable of selecting.
*
- * "Trivial" coercions are ones that involve binary-compatible types
- * and ones that are coercing a previously-unknown-type literal
- * constant to a specific type.
+ * "Trivial" coercions are ones that involve binary-compatible types and
+ * ones that are coercing a previously-unknown-type literal constant
+ * to a specific type.
*
- * The reason we can restrict our check to binary-compatible
- * coercions here is that we expect non-binary-compatible
- * coercions to have an implementation function named after the
- * target type. That function will be found by normal lookup if
- * appropriate.
+ * The reason we can restrict our check to binary-compatible coercions
+ * here is that we expect non-binary-compatible coercions to have an
+ * implementation function named after the target type. That function
+ * will be found by normal lookup if appropriate.
*
- * NB: it's important that this code stays in sync with what
- * coerce_type can do, because the caller will try to apply
- * coerce_type if we return FUNCDETAIL_COERCION. If we return
- * that result for something coerce_type can't handle, we'll cause
- * infinite recursion between this module and coerce_type!
+ * NB: it's important that this code stays in sync with what coerce_type
+ * can do, because the caller will try to apply coerce_type if we
+ * return FUNCDETAIL_COERCION. If we return that result for something
+ * coerce_type can't handle, we'll cause infinite recursion between
+ * this module and coerce_type!
*/
if (nargs == 1 && fargs != NIL)
{
}
/*
- * didn't find an exact match, so now try to match up
- * candidates...
+ * didn't find an exact match, so now try to match up candidates...
*/
if (raw_candidates != NULL)
{
current_candidates);
/*
- * If we were able to choose a best candidate, we're
- * done. Otherwise, ambiguous function call.
+ * If we were able to choose a best candidate, we're done.
+ * Otherwise, ambiguous function call.
*/
if (!best_candidate)
return FUNCDETAIL_MULTIPLE;
inhrel = heap_open(InheritsRelationId, AccessShareLock);
/*
- * Use queue to do a breadth-first traversal of the inheritance graph
- * from the relid supplied up to the root. Notice that we append to
- * the queue inside the loop --- this is okay because the foreach()
- * macro doesn't advance queue_item until the next loop iteration
- * begins.
+ * Use queue to do a breadth-first traversal of the inheritance graph from
+ * the relid supplied up to the root. Notice that we append to the queue
+ * inside the loop --- this is okay because the foreach() macro doesn't
+ * advance queue_item until the next loop iteration begins.
*/
foreach(queue_item, queue)
{
/*
* Okay, this is a not-yet-seen relid. Add it to the list of
- * already-visited OIDs, then find all the types this relid
- * inherits from and add them to the queue. The one exception is
- * we don't add the original relation to 'visited'.
+ * already-visited OIDs, then find all the types this relid inherits
+ * from and add them to the queue. The one exception is we don't add
+ * the original relation to 'visited'.
*/
if (queue_item != list_head(queue))
visited = lappend_oid(visited, this_relid);
while ((inhtup = heap_getnext(inhscan, ForwardScanDirection)) != NULL)
{
Form_pg_inherits inh = (Form_pg_inherits) GETSTRUCT(inhtup);
- Oid inhparent = inh->inhparent;
+ Oid inhparent = inh->inhparent;
/* If this is the target superclass, we're done */
if (get_rel_type_id(inhparent) == superclassTypeId)
int i;
/*
- * Special case for whole-row Vars so that we can resolve (foo.*).bar
- * even when foo is a reference to a subselect, join, or RECORD
- * function. A bonus is that we avoid generating an unnecessary
- * FieldSelect; our result can omit the whole-row Var and just be a
- * Var for the selected field.
+ * Special case for whole-row Vars so that we can resolve (foo.*).bar even
+ * when foo is a reference to a subselect, join, or RECORD function. A
+ * bonus is that we avoid generating an unnecessary FieldSelect; our
+ * result can omit the whole-row Var and just be a Var for the selected
+ * field.
*
- * This case could be handled by expandRecordVariable, but it's
- * more efficient to do it this way when possible.
+ * This case could be handled by expandRecordVariable, but it's more
+ * efficient to do it this way when possible.
*/
if (IsA(first_arg, Var) &&
((Var *) first_arg)->varattno == InvalidAttrNumber)
/*
* Else do it the hard way with get_expr_result_type().
*
- * If it's a Var of type RECORD, we have to work even harder: we have
- * to find what the Var refers to, and pass that to get_expr_result_type.
- * That task is handled by expandRecordVariable().
+ * If it's a Var of type RECORD, we have to work even harder: we have to find
+ * what the Var refers to, and pass that to get_expr_result_type. That
+ * task is handled by expandRecordVariable().
*/
if (IsA(first_arg, Var) &&
((Var *) first_arg)->vartype == RECORDOID)
else if (relTypeId == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("could not identify column \"%s\" in record data type",
- attname)));
+ errmsg("could not identify column \"%s\" in record data type",
+ attname)));
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s(%s) is not an aggregate",
- NameListToString(aggname), format_type_be(basetype))));
+ NameListToString(aggname), format_type_be(basetype))));
}
ReleaseSysCache(ftup);
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function %s does not exist",
- func_signature_string(funcname, nargs, argtypes))));
+ func_signature_string(funcname, nargs, argtypes))));
return InvalidOid;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.89 2005/05/30 01:20:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.90 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (elementType == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot subscript type %s because it is not an array",
- format_type_be(arrayType))));
+ errmsg("cannot subscript type %s because it is not an array",
+ format_type_be(arrayType))));
ReleaseSysCache(type_tuple_array);
/*
* A list containing only single subscripts refers to a single array
- * element. If any of the items are double subscripts (lower:upper),
- * then the subscript expression means an array slice operation. In
- * this case, we supply a default lower bound of 1 for any items that
- * contain only a single subscript. We have to prescan the
- * indirection list to see if there are any double subscripts.
+ * element. If any of the items are double subscripts (lower:upper), then
+ * the subscript expression means an array slice operation. In this case,
+ * we supply a default lower bound of 1 for any items that contain only a
+ * single subscript. We have to prescan the indirection list to see if
+ * there are any double subscripts.
*/
foreach(idx, indirection)
{
}
/*
- * The type represented by the subscript expression is the element
- * type if we are fetching a single element, but it is the same as the
- * array type if we are fetching a slice or storing.
+ * The type represented by the subscript expression is the element type if
+ * we are fetching a single element, but it is the same as the array type
+ * if we are fetching a slice or storing.
*/
if (isSlice || assignFrom != NULL)
resultType = arrayType;
subexpr = transformExpr(pstate, ai->lidx);
/* If it's not int4 already, try to coerce */
subexpr = coerce_to_target_type(pstate,
- subexpr, exprType(subexpr),
+ subexpr, exprType(subexpr),
INT4OID, -1,
COERCION_ASSIGNMENT,
COERCE_IMPLICIT_CAST);
if (subexpr == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("array subscript must have type integer")));
+ errmsg("array subscript must have type integer")));
}
else
{
/*
* If doing an array store, coerce the source value to the right type.
- * (This should agree with the coercion done by
- * updateTargetListEntry.)
+ * (This should agree with the coercion done by updateTargetListEntry.)
*/
if (assignFrom != NULL)
{
" but expression is of type %s",
format_type_be(typeneeded),
format_type_be(typesource)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
/*
* It might actually fit in int32. Probably only INT_MIN can
* occur, but we'll code the test generally just to be sure.
*/
- int32 val32 = (int32) val64;
+ int32 val32 = (int32) val64;
if (val64 == (int64) val32)
{
typeid = INT8OID;
typelen = sizeof(int64);
- typebyval = false; /* XXX might change someday */
+ typebyval = false; /* XXX might change someday */
}
}
else
break;
case T_String:
+
/*
* We assume here that UNKNOWN's internal representation is the
* same as CSTRING
val = CStringGetDatum(strVal(value));
typeid = UNKNOWNOID; /* will be coerced later */
- typelen = -2; /* cstring-style varwidth type */
+ typelen = -2; /* cstring-style varwidth type */
typebyval = false;
break;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.81 2004/12/31 22:00:27 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.82 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Look for an "=" operator for the datatype. We require it to be an
- * exact or binary-compatible match, since most callers are not
- * prepared to cope with adding any run-time type coercion steps.
+ * exact or binary-compatible match, since most callers are not prepared
+ * to cope with adding any run-time type coercion steps.
*/
typentry = lookup_type_cache(argtype, TYPECACHE_EQ_OPR);
oproid = typentry->eq_opr;
/*
- * If the datatype is an array, then we can use array_eq ... but only
- * if there is a suitable equality operator for the element type.
- * (This check is not in the raw typcache.c code ... should it be?)
+ * If the datatype is an array, then we can use array_eq ... but only if
+ * there is a suitable equality operator for the element type. (This check
+ * is not in the raw typcache.c code ... should it be?)
*/
if (oproid == ARRAY_EQ_OP)
{
if (!noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(argtype))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(argtype))));
return NULL;
}
Operator optup;
/*
- * Look for a "<" operator for the datatype. We require it to be an
- * exact or binary-compatible match, since most callers are not
- * prepared to cope with adding any run-time type coercion steps.
+ * Look for a "<" operator for the datatype. We require it to be an exact
+ * or binary-compatible match, since most callers are not prepared to cope
+ * with adding any run-time type coercion steps.
*
* Note: the search algorithm used by typcache.c ensures that if a "<"
* operator is returned, it will be consistent with the "=" operator
- * returned by equality_oper. This is critical for sorting and
- * grouping purposes.
+ * returned by equality_oper. This is critical for sorting and grouping
+ * purposes.
*/
typentry = lookup_type_cache(argtype, TYPECACHE_LT_OPR);
oproid = typentry->lt_opr;
/*
- * If the datatype is an array, then we can use array_lt ... but only
- * if there is a suitable less-than operator for the element type.
- * (This check is not in the raw typcache.c code ... should it be?)
+ * If the datatype is an array, then we can use array_lt ... but only if
+ * there is a suitable less-than operator for the element type. (This
+ * check is not in the raw typcache.c code ... should it be?)
*/
if (oproid == ARRAY_LT_OP)
{
if (!noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an ordering operator for type %s",
- format_type_be(argtype)),
- errhint("Use an explicit ordering operator or modify the query.")));
+ errmsg("could not identify an ordering operator for type %s",
+ format_type_be(argtype)),
+ errhint("Use an explicit ordering operator or modify the query.")));
return NULL;
}
Operator optup;
/*
- * Look for a ">" operator for the datatype. We require it to be an
- * exact or binary-compatible match, since most callers are not
- * prepared to cope with adding any run-time type coercion steps.
+ * Look for a ">" operator for the datatype. We require it to be an exact
+ * or binary-compatible match, since most callers are not prepared to cope
+ * with adding any run-time type coercion steps.
*
* Note: the search algorithm used by typcache.c ensures that if a ">"
* operator is returned, it will be consistent with the "=" operator
- * returned by equality_oper. This is critical for sorting and
- * grouping purposes.
+ * returned by equality_oper. This is critical for sorting and grouping
+ * purposes.
*/
typentry = lookup_type_cache(argtype, TYPECACHE_GT_OPR);
oproid = typentry->gt_opr;
/*
- * If the datatype is an array, then we can use array_gt ... but only
- * if there is a suitable greater-than operator for the element type.
- * (This check is not in the raw typcache.c code ... should it be?)
+ * If the datatype is an array, then we can use array_gt ... but only if
+ * there is a suitable greater-than operator for the element type. (This
+ * check is not in the raw typcache.c code ... should it be?)
*/
if (oproid == ARRAY_GT_OP)
{
if (!noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an ordering operator for type %s",
- format_type_be(argtype)),
- errhint("Use an explicit ordering operator or modify the query.")));
+ errmsg("could not identify an ordering operator for type %s",
+ format_type_be(argtype)),
+ errhint("Use an explicit ordering operator or modify the query.")));
return NULL;
}
*/
/*
- * Unspecified type for one of the arguments? then use the
- * other (XXX this is probably dead code?)
+ * Unspecified type for one of the arguments? then use the other
+ * (XXX this is probably dead code?)
*/
if (rtypeId == InvalidOid)
rtypeId = ltypeId;
if (!OidIsValid(operOid))
{
/*
- * We must run oper_select_candidate even if only one
- * candidate, otherwise we may falsely return a
- * non-type-compatible operator.
+ * We must run oper_select_candidate even if only one candidate,
+ * otherwise we may falsely return a non-type-compatible operator.
*/
fdresult = oper_select_candidate(1, &arg, clist, &operOid);
}
* First, quickly check to see if there is an exactly matching
* operator (there can be only one such entry in the list).
*
- * The returned list has args in the form (0, oprright). Move the
- * useful data into args[0] to keep oper_select_candidate simple.
- * XXX we are assuming here that we may scribble on the list!
+ * The returned list has args in the form (0, oprright). Move the useful
+ * data into args[0] to keep oper_select_candidate simple. XXX we are
+ * assuming here that we may scribble on the list!
*/
FuncCandidateList clisti;
if (!OidIsValid(operOid))
{
/*
- * We must run oper_select_candidate even if only one
- * candidate, otherwise we may falsely return a
- * non-type-compatible operator.
+ * We must run oper_select_candidate even if only one candidate,
+ * otherwise we may falsely return a non-type-compatible operator.
*/
fdresult = oper_select_candidate(1, &arg, clist, &operOid);
}
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("operator does not exist: %s",
op_signature_string(op, oprkind, arg1, arg2)),
- errhint("No operator matches the given name and argument type(s). "
- "You may need to add explicit type casts.")));
+ errhint("No operator matches the given name and argument type(s). "
+ "You may need to add explicit type casts.")));
}
/*
atypeId = exprType(rtree);
/*
- * The right-hand input of the operator will be the element type of
- * the array. However, if we currently have just an untyped literal
- * on the right, stay with that and hope we can resolve the operator.
+ * The right-hand input of the operator will be the element type of the
+ * array. However, if we currently have just an untyped literal on the
+ * right, stay with that and hope we can resolve the operator.
*/
if (atypeId == UNKNOWNOID)
rtypeId = UNKNOWNOID;
if (!OidIsValid(rtypeId))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires array on right side")));
+ errmsg("op ANY/ALL (array) requires array on right side")));
}
/* Now resolve the operator */
declared_arg_types[1] = opform->oprright;
/*
- * enforce consistency with ANYARRAY and ANYELEMENT argument and
- * return types, possibly adjusting return type or declared_arg_types
- * (which will be used as the cast destination by make_fn_arguments)
+ * enforce consistency with ANYARRAY and ANYELEMENT argument and return
+ * types, possibly adjusting return type or declared_arg_types (which will
+ * be used as the cast destination by make_fn_arguments)
*/
rettype = enforce_generic_type_consistency(actual_arg_types,
declared_arg_types,
if (rettype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires operator to yield boolean")));
+ errmsg("op ANY/ALL (array) requires operator to yield boolean")));
if (get_func_retset(opform->oprcode))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires operator not to return a set")));
+ errmsg("op ANY/ALL (array) requires operator not to return a set")));
/*
* Now switch back to the array type on the right, arranging for any
}
/*
- * enforce consistency with ANYARRAY and ANYELEMENT argument and
- * return types, possibly adjusting return type or declared_arg_types
- * (which will be used as the cast destination by make_fn_arguments)
+ * enforce consistency with ANYARRAY and ANYELEMENT argument and return
+ * types, possibly adjusting return type or declared_arg_types (which will
+ * be used as the cast destination by make_fn_arguments)
*/
rettype = enforce_generic_type_consistency(actual_arg_types,
declared_arg_types,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.114 2005/10/06 19:51:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.115 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool add_missing_from;
static RangeTblEntry *scanNameSpaceForRefname(ParseState *pstate,
- const char *refname);
+ const char *refname);
static RangeTblEntry *scanNameSpaceForRelid(ParseState *pstate, Oid relid);
static bool isLockedRel(ParseState *pstate, char *refname);
static void expandRelation(Oid relid, Alias *eref,
bool include_dropped,
List **colnames, List **colvars);
static void expandTupleDesc(TupleDesc tupdesc, Alias *eref,
- int rtindex, int sublevels_up,
- bool include_dropped,
- List **colnames, List **colvars);
+ int rtindex, int sublevels_up,
+ bool include_dropped,
+ List **colnames, List **colvars);
static int specialAttNum(const char *attname);
static void warnAutoRange(ParseState *pstate, RangeVar *relation);
* Scan the user column names (or aliases) for a match. Complain if
* multiple matches.
*
- * Note: eref->colnames may include entries for dropped columns, but
- * those will be empty strings that cannot match any legal SQL
- * identifier, so we don't bother to test for that case here.
+ * Note: eref->colnames may include entries for dropped columns, but those
+ * will be empty strings that cannot match any legal SQL identifier, so we
+ * don't bother to test for that case here.
*
- * Should this somehow go wrong and we try to access a dropped column,
- * we'll still catch it by virtue of the checks in
- * get_rte_attribute_type(), which is called by make_var(). That
- * routine has to do a cache lookup anyway, so the check there is
- * cheap.
+ * Should this somehow go wrong and we try to access a dropped column, we'll
+ * still catch it by virtue of the checks in get_rte_attribute_type(),
+ * which is called by make_var(). That routine has to do a cache lookup
+ * anyway, so the check there is cheap.
*/
foreach(c, rte->eref->colnames)
{
if (result)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_COLUMN),
- errmsg("column reference \"%s\" is ambiguous",
- colname)));
+ errmsg("column reference \"%s\" is ambiguous",
+ colname)));
result = newresult;
}
}
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("table \"%s\" has %d columns available but %d columns specified",
- eref->aliasname, maxattrs - numdropped, numaliases)));
+ eref->aliasname, maxattrs - numdropped, numaliases)));
}
/*
if (list_length(alias->colnames) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("too many column aliases specified for function %s",
- funcname)));
+ errmsg("too many column aliases specified for function %s",
+ funcname)));
eref->colnames = copyObject(alias->colnames);
return;
}
rte->alias = alias;
/*
- * Get the rel's OID. This access also ensures that we have an
- * up-to-date relcache entry for the rel. Since this is typically the
- * first access to a rel in a statement, be careful to get the right
- * access level depending on whether we're doing SELECT FOR UPDATE/SHARE.
+ * Get the rel's OID. This access also ensures that we have an up-to-date
+ * relcache entry for the rel. Since this is typically the first access
+ * to a rel in a statement, be careful to get the right access level
+ * depending on whether we're doing SELECT FOR UPDATE/SHARE.
*/
lockmode = isLockedRel(pstate, refname) ? RowShareLock : AccessShareLock;
rel = heap_openrv(relation, lockmode);
rte->relid = RelationGetRelid(rel);
/*
- * Build the list of effective column names using user-supplied
- * aliases and/or actual column names.
+ * Build the list of effective column names using user-supplied aliases
+ * and/or actual column names.
*/
rte->eref = makeAlias(refname, NIL);
buildRelationAliases(rel->rd_att, alias, rte->eref);
/*
- * Drop the rel refcount, but keep the access lock till end of
- * transaction so that the table can't be deleted or have its schema
- * modified underneath us.
+ * Drop the rel refcount, but keep the access lock till end of transaction
+ * so that the table can't be deleted or have its schema modified
+ * underneath us.
*/
heap_close(rel, NoLock);
rte->checkAsUser = InvalidOid; /* not set-uid by default, either */
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
rte->relid = RelationGetRelid(rel);
/*
- * Build the list of effective column names using user-supplied
- * aliases and/or actual column names.
+ * Build the list of effective column names using user-supplied aliases
+ * and/or actual column names.
*/
rte->eref = makeAlias(refname, NIL);
buildRelationAliases(rel->rd_att, alias, rte->eref);
rte->checkAsUser = InvalidOid; /* not set-uid by default, either */
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
rte->checkAsUser = InvalidOid;
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
&tupdesc);
/*
- * A coldeflist is required if the function returns RECORD and hasn't
- * got a predetermined record type, and is prohibited otherwise.
+ * A coldeflist is required if the function returns RECORD and hasn't got
+ * a predetermined record type, and is prohibited otherwise.
*/
if (coldeflist != NIL)
{
else
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("function \"%s\" in FROM has unsupported return type %s",
- funcname, format_type_be(funcrettype))));
+ errmsg("function \"%s\" in FROM has unsupported return type %s",
+ funcname, format_type_be(funcrettype))));
/*----------
* Flags:
rte->checkAsUser = InvalidOid;
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
/* fill in any unspecified alias columns */
if (numaliases < list_length(colnames))
eref->colnames = list_concat(eref->colnames,
- list_copy_tail(colnames, numaliases));
+ list_copy_tail(colnames, numaliases));
rte->eref = eref;
rte->checkAsUser = InvalidOid;
/*
- * Add completed RTE to pstate's range table list, but not to join
- * list nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join list
+ * nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
{
if (addToJoinList)
{
- int rtindex = RTERangeTablePosn(pstate, rte, NULL);
+ int rtindex = RTERangeTablePosn(pstate, rte, NULL);
RangeTblRef *rtr = makeNode(RangeTblRef);
rtr->rtindex = rtindex;
/* Base data type, i.e. scalar */
if (colnames)
*colnames = lappend(*colnames,
- linitial(rte->eref->colnames));
+ linitial(rte->eref->colnames));
if (colvars)
{
/*
* During ordinary parsing, there will never be any
- * deleted columns in the join; but we have to check
- * since this routine is also used by the rewriter,
- * and joins found in stored rules might have join
- * columns for since-deleted columns. This will be
- * signaled by a NULL Const in the alias-vars list.
+ * deleted columns in the join; but we have to check since
+ * this routine is also used by the rewriter, and joins
+ * found in stored rules might have join columns for
+ * since-deleted columns. This will be signaled by a NULL
+ * Const in the alias-vars list.
*/
if (IsA(avar, Const))
{
if (colvars)
{
/*
- * can't use atttypid here, but it doesn't really
- * matter what type the Const claims to be.
+ * can't use atttypid here, but it doesn't really matter
+ * what type the Const claims to be.
*/
*colvars = lappend(*colvars, makeNullConst(INT4OID));
}
te_list = lappend(te_list, te);
}
- Assert(name == NULL && var == NULL); /* lists not the same
- * length? */
+ Assert(name == NULL && var == NULL); /* lists not the same length? */
return te_list;
}
return get_relid_attribute_name(rte->relid, attnum);
/*
- * Otherwise use the column name from eref. There should always be
- * one.
+ * Otherwise use the column name from eref. There should always be one.
*/
if (attnum > 0 && attnum <= list_length(rte->eref->colnames))
return strVal(list_nth(rte->eref->colnames, attnum - 1));
att_tup = (Form_pg_attribute) GETSTRUCT(tp);
/*
- * If dropped column, pretend it ain't there. See notes
- * in scanRTEForColumn.
+ * If dropped column, pretend it ain't there. See notes in
+ * scanRTEForColumn.
*/
if (att_tup->attisdropped)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- NameStr(att_tup->attname),
- get_rel_name(rte->relid))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ NameStr(att_tup->attname),
+ get_rel_name(rte->relid))));
*vartype = att_tup->atttypid;
*vartypmod = att_tup->atttypmod;
ReleaseSysCache(tp);
if (attnum < 1 || attnum > tupdesc->natts)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column %d of relation \"%s\" does not exist",
- attnum,
- rte->eref->aliasname)));
+ errmsg("column %d of relation \"%s\" does not exist",
+ attnum,
+ rte->eref->aliasname)));
att_tup = tupdesc->attrs[attnum - 1];
/*
- * If dropped column, pretend it ain't there. See
- * notes in scanRTEForColumn.
+ * If dropped column, pretend it ain't there. See notes
+ * in scanRTEForColumn.
*/
if (att_tup->attisdropped)
ereport(ERROR,
case RTE_JOIN:
{
/*
- * Join RTE --- get type info from join RTE's alias
- * variable
+ * Join RTE --- get type info from join RTE's alias variable
*/
Node *aliasvar;
case RTE_RELATION:
{
/*
- * Plain relation RTE --- get the attribute's catalog
- * entry
+ * Plain relation RTE --- get the attribute's catalog entry
*/
HeapTuple tp;
Form_pg_attribute att_tup;
case RTE_JOIN:
{
/*
- * A join RTE would not have dropped columns when
- * constructed, but one in a stored rule might contain
- * columns that were dropped from the underlying tables,
- * if said columns are nowhere explicitly referenced in
- * the rule. This will be signaled to us by a NULL Const
- * in the joinaliasvars list.
+ * A join RTE would not have dropped columns when constructed,
+ * but one in a stored rule might contain columns that were
+ * dropped from the underlying tables, if said columns are
+ * nowhere explicitly referenced in the rule. This will be
+ * signaled to us by a NULL Const in the joinaliasvars list.
*/
Var *aliasvar;
if (pstate->parentParseState != NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("missing FROM-clause entry in subquery for table \"%s\"",
- relation->relname)));
+ errmsg("missing FROM-clause entry in subquery for table \"%s\"",
+ relation->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
else
ereport(NOTICE,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("adding missing FROM-clause entry for table \"%s\"",
- relation->relname)));
+ errmsg("adding missing FROM-clause entry for table \"%s\"",
+ relation->relname)));
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.137 2005/06/26 22:05:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.138 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
- Var *var, int levelsup);
+ Var *var, int levelsup);
static Node *transformAssignmentIndirection(ParseState *pstate,
Node *basenode,
const char *targetName,
if (colname == NULL && !resjunk)
{
/*
- * Generate a suitable column name for a column without any
- * explicit 'AS ColumnName' clause.
+ * Generate a suitable column name for a column without any explicit
+ * 'AS ColumnName' clause.
*/
colname = FigureColname(node);
}
/*
* Check for "something.*". Depending on the complexity of the
- * "something", the star could appear as the last name in
- * ColumnRef, or as the last indirection item in A_Indirection.
+ * "something", the star could appear as the last name in ColumnRef,
+ * or as the last indirection item in A_Indirection.
*/
if (IsA(res->val, ColumnRef))
{
{
/* It is something.*, expand into multiple items */
p_target = list_concat(p_target,
- ExpandIndirectionStar(pstate, ind));
+ ExpandIndirectionStar(pstate, ind));
continue;
}
}
/*
* If the expression is a DEFAULT placeholder, insert the attribute's
- * type/typmod into it so that exprType will report the right things.
- * (We expect that the eventually substituted default expression will
- * in fact have this type and typmod.) Also, reject trying to update
- * a subfield or array element with DEFAULT, since there can't be any
- * default for portions of a column.
+ * type/typmod into it so that exprType will report the right things. (We
+ * expect that the eventually substituted default expression will in fact
+ * have this type and typmod.) Also, reject trying to update a subfield
+ * or array element with DEFAULT, since there can't be any default for
+ * portions of a column.
*/
if (tle->expr && IsA(tle->expr, SetToDefault))
{
if (IsA(linitial(indirection), A_Indices))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot set an array element to DEFAULT")));
+ errmsg("cannot set an array element to DEFAULT")));
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/*
* If there is indirection on the target column, prepare an array or
- * subfield assignment expression. This will generate a new column
- * value that the source value has been inserted into, which can then
- * be placed in the new tuple constructed by INSERT or UPDATE.
+ * subfield assignment expression. This will generate a new column value
+ * that the source value has been inserted into, which can then be placed
+ * in the new tuple constructed by INSERT or UPDATE.
*/
if (indirection)
{
if (pstate->p_is_insert)
{
/*
- * The command is INSERT INTO table (col.something) ... so
- * there is not really a source value to work with. Insert a
- * NULL constant as the source value.
+ * The command is INSERT INTO table (col.something) ... so there
+ * is not really a source value to work with. Insert a NULL
+ * constant as the source value.
*/
colVar = (Node *) makeNullConst(attrtype);
}
colname,
format_type_be(attrtype),
format_type_be(type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
/*
* Set the resno to identify the target column --- the rewriter and
- * planner depend on this. We also set the resname to identify the
- * target column, but this is only for debugging purposes; it should
- * not be relied on. (In particular, it might be out of date in a
- * stored rule.)
+ * planner depend on this. We also set the resname to identify the target
+ * column, but this is only for debugging purposes; it should not be
+ * relied on. (In particular, it might be out of date in a stored rule.)
*/
tle->resno = (AttrNumber) attrno;
tle->resname = colname;
/*
* We have to split any field-selection operations apart from
- * subscripting. Adjacent A_Indices nodes have to be treated as a
- * single multidimensional subscript operation.
+ * subscripting. Adjacent A_Indices nodes have to be treated as a single
+ * multidimensional subscript operation.
*/
for_each_cell(i, indirection)
{
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
else
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
return result;
attrno = attnameAttNum(pstate->p_target_relation, name, false);
/*
- * Check for duplicates, but only of whole columns --- we
- * allow INSERT INTO foo (col.subcol1, col.subcol2)
+ * Check for duplicates, but only of whole columns --- we allow
+ * INSERT INTO foo (col.subcol1, col.subcol2)
*/
if (col->indirection == NIL)
{
bms_is_member(attrno, partialcols))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" specified more than once",
- name)));
+ errmsg("column \"%s\" specified more than once",
+ name)));
wholecols = bms_add_member(wholecols, attrno);
}
else
if (bms_is_member(attrno, wholecols))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" specified more than once",
- name)));
+ errmsg("column \"%s\" specified more than once",
+ name)));
partialcols = bms_add_member(partialcols, attrno);
}
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper qualified name (too many dotted names): %s",
- NameListToString(fields))));
+ errmsg("improper qualified name (too many dotted names): %s",
+ NameListToString(fields))));
schemaname = NULL; /* keep compiler quiet */
relname = NULL;
break;
if (!pstate->p_varnamespace)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("SELECT * with no tables specified is not valid")));
+ errmsg("SELECT * with no tables specified is not valid")));
foreach(l, pstate->p_varnamespace)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
- int rtindex = RTERangeTablePosn(pstate, rte, NULL);
+ int rtindex = RTERangeTablePosn(pstate, rte, NULL);
target = list_concat(target,
expandRelAttrs(pstate, rte, rtindex, 0));
/*
* Verify it's a composite type, and get the tupdesc. We use
- * get_expr_result_type() because that can handle references to
- * functions returning anonymous record types. If that fails,
- * use lookup_rowtype_tupdesc(), which will almost certainly fail
- * as well, but it will give an appropriate error message.
+ * get_expr_result_type() because that can handle references to functions
+ * returning anonymous record types. If that fails, use
+ * lookup_rowtype_tupdesc(), which will almost certainly fail as well, but
+ * it will give an appropriate error message.
*
- * If it's a Var of type RECORD, we have to work even harder: we have
- * to find what the Var refers to, and pass that to get_expr_result_type.
- * That task is handled by expandRecordVariable().
+ * If it's a Var of type RECORD, we have to work even harder: we have to find
+ * what the Var refers to, and pass that to get_expr_result_type. That
+ * task is handled by expandRecordVariable().
*/
if (IsA(expr, Var) &&
((Var *) expr)->vartype == RECORDOID)
continue;
/*
- * If we got a whole-row Var from the rowtype reference, we can
- * expand the fields as simple Vars. Otherwise we must generate
- * multiple copies of the rowtype reference and do FieldSelects.
+ * If we got a whole-row Var from the rowtype reference, we can expand
+ * the fields as simple Vars. Otherwise we must generate multiple
+ * copies of the rowtype reference and do FieldSelects.
*/
if (IsA(expr, Var) &&
((Var *) expr)->varattno == InvalidAttrNumber)
* Get the tuple descriptor for a Var of type RECORD, if possible.
*
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
* the tupdesc from it. We ereport if we can't determine the tupdesc.
*
{
case RTE_RELATION:
case RTE_SPECIAL:
+
/*
* This case should not occur: a column of a table shouldn't have
* type RECORD. Fall through and fail (most likely) at the
{
/*
* Recurse into the sub-select to see what its Var refers
- * to. We have to build an additional level of ParseState
+ * to. We have to build an additional level of ParseState
* to keep in step with varlevelsup in the subselect.
*/
ParseState mypstate;
/* else fall through to inspect the expression */
break;
case RTE_FUNCTION:
+
/*
- * We couldn't get here unless a function is declared with one
- * of its result columns as RECORD, which is not allowed.
+ * We couldn't get here unless a function is declared with one of
+ * its result columns as RECORD, which is not allowed.
*/
break;
}
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass
- * to lookup_rowtype_tupdesc() which will probably fail, but will
- * give an appropriate error message while failing.
+ * get_expr_result_type() can do anything with it. If not, pass to
+ * lookup_rowtype_tupdesc() which will probably fail, but will give an
+ * appropriate error message while failing.
*/
if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE)
tupleDesc = lookup_rowtype_tupdesc(exprType(expr), exprTypmod(expr));
return 2;
case T_MinMaxExpr:
/* make greatest/least act like a regular function */
- switch (((MinMaxExpr*) node)->op)
+ switch (((MinMaxExpr *) node)->op)
{
case IS_GREATEST:
*name = "greatest";
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.76 2005/08/01 20:31:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.77 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
case 1:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper %%TYPE reference (too few dotted names): %s",
- NameListToString(typename->names))));
+ errmsg("improper %%TYPE reference (too few dotted names): %s",
+ NameListToString(typename->names))));
break;
case 2:
rel->relname = strVal(linitial(typename->names));
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- field, rel->relname)));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ field, rel->relname)));
restype = get_atttype(relid, attnum);
/* this construct should never have an array indicator */
/*
* Currently we just suppress any syntax error position report, rather
- * than transforming to an "internal query" error. It's unlikely that
- * a type name is complex enough to need positioning.
+ * than transforming to an "internal query" error. It's unlikely that a
+ * type name is complex enough to need positioning.
*/
errposition(0);
}
error_context_stack = ptserrcontext.previous;
/*
- * Make sure we got back exactly what we expected and no more;
- * paranoia is justified since the string might contain anything.
+ * Make sure we got back exactly what we expected and no more; paranoia is
+ * justified since the string might contain anything.
*/
if (list_length(raw_parsetree_list) != 1)
goto fail;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.29 2004/12/31 22:00:27 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.30 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (s[i] == '\'')
{
/*
- * Note: if scanner is working right, unescaped quotes can
- * only appear in pairs, so there should be another character.
+ * Note: if scanner is working right, unescaped quotes can only
+ * appear in pairs, so there should be another character.
*/
i++;
newStr[j] = s[i];
result = palloc(len + 1);
/*
- * SQL99 specifies Unicode-aware case normalization, which we don't
- * yet have the infrastructure for. Instead we use tolower() to
- * provide a locale-aware translation. However, there are some
- * locales where this is not right either (eg, Turkish may do strange
- * things with 'i' and 'I'). Our current compromise is to use
- * tolower() for characters with the high bit set, and use an
- * ASCII-only downcasing for 7-bit characters.
+ * SQL99 specifies Unicode-aware case normalization, which we don't yet
+ * have the infrastructure for. Instead we use tolower() to provide a
+ * locale-aware translation. However, there are some locales where this
+ * is not right either (eg, Turkish may do strange things with 'i' and
+ * 'I'). Our current compromise is to use tolower() for characters with
+ * the high bit set, and use an ASCII-only downcasing for 7-bit
+ * characters.
*/
for (i = 0; i < len; i++)
{
if (warn)
ereport(NOTICE,
(errcode(ERRCODE_NAME_TOO_LONG),
- errmsg("identifier \"%s\" will be truncated to \"%.*s\"",
- ident, len, ident)));
+ errmsg("identifier \"%s\" will be truncated to \"%.*s\"",
+ ident, len, ident)));
ident[len] = '\0';
}
}
delete_sem(Address[2 * i + 1]);
/*
- * Reset to an invalid semId (in case other process try to get
- * the infos from a cloned area
+ * Reset to an invalid semId (in case other process try to get the
+ * infos from a cloned area
*/
Address[2 * i + 1] = 0;
}
Address[0] = 0;
/*
- * Delete the area (it might be cloned by other process. Let them
- * live with it, in all cases semIds are 0 so if another process
- * try to use it, it will fail
+ * Delete the area (it might be cloned by other process. Let them live
+ * with it, in all cases semIds are 0 so if another process try to use
+ * it, it will fail
*/
delete_area(semId);
/* Get an area clone (in case it's not in our address space) */
/*
- * TODO : a check of address space might be done to avoid
- * duplicate areas in the same address space
+ * TODO : a check of address space might be done to avoid duplicate
+ * areas in the same address space
*/
parea = clone_area(Nom, &Address, B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, parea);
return parea;
long i;
/*
- * Limit to 250 (8 byte per sem : 4 for the semid and 4 for
- * the last pid which accessed the semaphore in a pool
+ * Limit to 250 (8 byte per sem : 4 for the semid and 4 for the
+ * last pid which accessed the semaphore in a pool
*/
if (semNum > 250)
{
if (sops[i].sem_op < 0)
{
/*
- * Try acquiring the semaphore till we are not interrupted by
- * a signal
+ * Try acquiring the semaphore till we are not interrupted by a
+ * signal
*/
if (sops[i].sem_flg == IPC_NOWAIT)
{
if (ainfo.team == teinfo.team)
{
/*
- * the area is already in our address space, just return the
- * address
+ * the area is already in our address space, just return the address
*/
return (int *) ainfo.address;
}
else
{
/*
- * the area is not in our address space, clone it before and
- * return the address
+ * the area is not in our address space, clone it before and return
+ * the address
*/
area_id narea;
return -1;
/*
- * area does not exist and its creation is requested, create it (be
- * sure to have a 4ko multiple size
+ * area does not exist and its creation is requested, create it (be sure
+ * to have a 4ko multiple size
*/
return create_area(nom, &Address, B_ANY_ADDRESS, ((size / 4096) + 1) * 4096, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
}
/* Main server loop */
for (;;)
{
- int32 opcode = 0;
+ int32 opcode = 0;
char datas[4000];
/*
- * Wait for a message from the backend : 1 : load a shared
- * object 2 : unload a shared object any other : exit support
- * server
+ * Wait for a message from the backend : 1 : load a shared object
+ * 2 : unload a shared object any other : exit support server
*/
read_port(port_in, &opcode, datas, 4000);
case 2:
/*
- * Unload shared object and send back the result of
- * the operation
+ * Unload shared object and send back the result of the
+ * operation
*/
write_port(port_out, unload_add_on(*((int *) (datas))), NULL, 0);
break;
if (get_image_symbol(addon, datas, B_SYMBOL_TYPE_TEXT, &fpt) == B_OK);
{
/*
- * Sometime the loader return B_OK for an
- * inexistant function with an invalid address !!!
- * Check that the return address is in the image
- * range
+ * Sometime the loader return B_OK for an inexistant
+ * function with an invalid address !!! Check that the
+ * return address is in the image range
*/
get_image_info(addon, &info_im);
static void *mainModule;
/*
- * Upon the first call register a terminate handler that will close
- * all libraries. Also get a reference to the main module for use with
+ * Upon the first call register a terminate handler that will close all
+ * libraries. Also get a reference to the main module for use with
* loadbind.
*/
if (!mainModule)
}
/*
- * load should be declared load(const char *...). Thus we cast the
- * path to a normal char *. Ugly.
+ * load should be declared load(const char *...). Thus we cast the path to
+ * a normal char *. Ugly.
*/
if ((mp->entry = (void *) load((char *) path, L_NOAUTODEFER, NULL)) == NULL)
{
strcat(errbuf, ": ");
/*
- * If AIX says the file is not executable, the error can be
- * further described by querying the loader about the last error.
+ * If AIX says the file is not executable, the error can be further
+ * described by querying the loader about the last error.
*/
if (errno == ENOEXEC)
{
errvalid = 0;
/*
- * If the shared object was compiled using xlC we will need to call
- * static constructors (and later on dlclose destructors).
+ * If the shared object was compiled using xlC we will need to call static
+ * constructors (and later on dlclose destructors).
*/
if (mp->cdtors = (CdtorPtr) dlsym(mp, "__cdtors"))
{
int i;
/*
- * Could speed up the search, but I assume that one assigns the result
- * to function pointers anyways.
+ * Could speed up the search, but I assume that one assigns the result to
+ * function pointers anyways.
*/
for (ep = mp->exports, i = mp->nExports; i; i--, ep++)
if (strcmp(ep->name, symbol) == 0)
}
/*
- * The module might be loaded due to the LIBPATH environment
- * variable. Search for the loaded module using L_GETINFO.
+ * The module might be loaded due to the LIBPATH environment variable.
+ * Search for the loaded module using L_GETINFO.
*/
if ((buf = malloc(size)) == NULL)
{
}
/*
- * Traverse the list of loaded modules. The entry point returned
- * by load() does actually point to the data segment origin.
+ * Traverse the list of loaded modules. The entry point returned by
+ * load() does actually point to the data segment origin.
*/
lp = (struct ld_info *) buf;
while (lp)
/*
* Get the padding for the data section. This is needed for AIX 4.1
- * compilers. This is used when building the final function pointer to
- * the exported symbol.
+ * compilers. This is used when building the final function pointer to the
+ * exported symbol.
*/
if (ldnshread(ldp, _DATA, &shdata) != SUCCESS)
{
}
/*
- * We read the complete loader section in one chunk, this makes
- * finding long symbol names residing in the string table easier.
+ * We read the complete loader section in one chunk, this makes finding
+ * long symbol names residing in the string table easier.
*/
if ((ldbuf = (char *) malloc(sh.s_size)) == NULL)
{
}
/*
- * Fill in the export table. All entries are relative to the entry
- * point we got from load.
+ * Fill in the export table. All entries are relative to the entry point
+ * we got from load.
*/
ep = mp->exports;
ls = (LDSYM *) (ldbuf + LDHDRSZ);
{
/*
* The l_name member is not zero terminated, we must copy the
- * first SYMNMLEN chars and make sure we have a zero byte at
- * the end.
+ * first SYMNMLEN chars and make sure we have a zero byte at the
+ * end.
*/
strncpy(tmpsym, ls->l_name, SYMNMLEN);
tmpsym[SYMNMLEN] = '\0';
}
/*
- * The first entry is the main module. The entry point returned by
- * load() does actually point to the data segment origin.
+ * The first entry is the main module. The entry point returned by load()
+ * does actually point to the data segment origin.
*/
lp = (struct ld_info *) buf;
ret = lp->ldinfo_dataorg;
/*
- * $PostgreSQL: pgsql/src/backend/port/dynloader/aix.h,v 1.12 2003/11/29 22:39:51 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/aix.h,v 1.13 2005/10/15 02:49:23 momjian Exp $
*
* @(#)dlfcn.h 1.4 revision of 95/04/25 09:36:52
* This is an unpublished work copyright (c) 1992 HELIOS Software GmbH
#ifdef HAVE_DLOPEN
-
#else /* HAVE_DLOPEN */
#ifdef __cplusplus
void *dlsym(void *handle, const char *symbol);
char *dlerror(void);
int dlclose(void *handle);
-
#else
void *dlopen();
void *dlsym();
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/dynloader/bsdi.c,v 1.26 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/bsdi.c,v 1.27 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int dl_initialized = 0;
/*
- * initializes the dynamic loader with the executable's pathname.
- * (only needs to do this the first time pg_dlopen is called.)
+ * initializes the dynamic loader with the executable's pathname. (only
+ * needs to do this the first time pg_dlopen is called.)
*/
if (!dl_initialized)
{
return NULL;
/*
- * If undefined symbols: try to link with the C and math libraries!
- * This could be smarter, if the dynamic linker was able to handle
- * shared libs!
+ * If undefined symbols: try to link with the C and math libraries! This
+ * could be smarter, if the dynamic linker was able to handle shared libs!
*/
if (dld_undefined_sym_count > 0)
{
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/port/dynloader/bsdi.h,v 1.21 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/bsdi.h,v 1.22 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define pg_dlsym dlsym
#define pg_dlclose dlclose
#define pg_dlerror dlerror
-
#else /* not HAVE_DLOPEN */
#define pg_dlsym(handle, funcname) ((PGFunction) dld_get_func((funcname)))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/dynloader/hpux.c,v 1.27 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/hpux.c,v 1.28 2005/10/15 02:49:23 momjian Exp $
*
* NOTES
* all functions are defined here -- it's impossible to trace the
* call the library!
*/
shl_t handle = shl_load(filename,
- BIND_IMMEDIATE | BIND_VERBOSE | DYNAMIC_PATH,
+ BIND_IMMEDIATE | BIND_VERBOSE | DYNAMIC_PATH,
0L);
return (void *) handle;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/dynloader/linux.c,v 1.30 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/linux.c,v 1.31 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int dl_initialized = 0;
/*
- * initializes the dynamic loader with the executable's pathname.
- * (only needs to do this the first time pg_dlopen is called.)
+ * initializes the dynamic loader with the executable's pathname. (only
+ * needs to do this the first time pg_dlopen is called.)
*/
if (!dl_initialized)
{
return NULL;
/*
- * If undefined symbols: try to link with the C and math libraries!
- * This could be smarter, if the dynamic linker was able to handle
- * shared libs!
+ * If undefined symbols: try to link with the C and math libraries! This
+ * could be smarter, if the dynamic linker was able to handle shared libs!
*/
if (dld_undefined_sym_count > 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/dynloader/ultrix4.c,v 1.22 2004/12/31 22:00:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/ultrix4.c,v 1.23 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void *handle;
/*
- * initializes the dynamic loader with the executable's pathname.
- * (only needs to do this the first time pg_dlopen is called.)
+ * initializes the dynamic loader with the executable's pathname. (only
+ * needs to do this the first time pg_dlopen is called.)
*/
if (!dl_initialized)
{
}
/*
- * open the file. We do the symbol resolution right away so that we
- * will know if there are undefined symbols. (This is in fact the same
+ * open the file. We do the symbol resolution right away so that we will
+ * know if there are undefined symbols. (This is in fact the same
* semantics as "ld -A". ie. you cannot have undefined symbols.
*/
if ((handle = dl_open(filename, DL_NOW)) == NULL)
-/* $PostgreSQL: pgsql/src/backend/port/dynloader/win32.c,v 1.6 2005/08/12 21:23:10 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/backend/port/dynloader/win32.c,v 1.7 2005/10/15 02:49:23 momjian Exp $ */
#include
#include
-char *dlerror(void);
-int dlclose(void *handle);
-void *dlsym(void *handle, const char *symbol);
-void *dlopen(const char *path, int mode);
+char *dlerror(void);
+int dlclose(void *handle);
+void *dlsym(void *handle, const char *symbol);
+void *dlopen(const char *path, int mode);
static char last_dyn_error[512];
-static void set_dl_error(void)
+static void
+set_dl_error(void)
{
- DWORD err = GetLastError();
+ DWORD err = GetLastError();
if (FormatMessage(FORMAT_MESSAGE_IGNORE_INSERTS |
- FORMAT_MESSAGE_FROM_SYSTEM,
- NULL,
- err,
- MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
- last_dyn_error,
- sizeof(last_dyn_error)-1,
- NULL) == 0)
+ FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL,
+ err,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ last_dyn_error,
+ sizeof(last_dyn_error) - 1,
+ NULL) == 0)
{
- snprintf(last_dyn_error, sizeof(last_dyn_error)-1,
- "unknown error %lu", err);
- }
+ snprintf(last_dyn_error, sizeof(last_dyn_error) - 1,
+ "unknown error %lu", err);
+ }
}
char *
void *
dlsym(void *handle, const char *symbol)
{
- void *ptr;
+ void *ptr;
+
ptr = GetProcAddress((HMODULE) handle, symbol);
- if (!ptr)
+ if (!ptr)
{
set_dl_error();
return NULL;
void *
dlopen(const char *path, int mode)
{
- HMODULE h;
- int prevmode;
+ HMODULE h;
+ int prevmode;
/* Disable popup error messages when loading DLLs */
prevmode = SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX);
h = LoadLibrary(path);
SetErrorMode(prevmode);
-
- if (!h)
+
+ if (!h)
{
set_dl_error();
return NULL;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/ipc_test.c,v 1.17 2005/02/05 20:07:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/ipc_test.c,v 1.18 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
shmem_exit(code);
while (--on_proc_exit_index >= 0)
(*on_proc_exit_list[on_proc_exit_index].function) (code,
- on_proc_exit_list[on_proc_exit_index].arg);
+ on_proc_exit_list[on_proc_exit_index].arg);
exit(code);
}
{
while (--on_shmem_exit_index >= 0)
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
- on_shmem_exit_list[on_shmem_exit_index].arg);
+ on_shmem_exit_list[on_shmem_exit_index].arg);
on_shmem_exit_index = 0;
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/posix_sema.c,v 1.13 2004/12/31 22:00:29 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/posix_sema.c,v 1.14 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Unlink the semaphore immediately, so it can't be accessed
- * externally. This also ensures that it will go away if we crash.
+ * Unlink the semaphore immediately, so it can't be accessed externally.
+ * This also ensures that it will go away if we crash.
*/
sem_unlink(semname);
return mySem;
}
-
#else /* !USE_NAMED_POSIX_SEMAPHORES */
/*
int errStatus;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and lock the semaphore again.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. We
- * assume that if such an interrupt comes in while we are waiting, it
- * will cause the sem_wait() call to exit with errno == EINTR, so that
- * we will be able to service the interrupt (if not in a critical
- * section already).
+ * Each time around the loop, we check for a cancel/die interrupt. We assume
+ * that if such an interrupt comes in while we are waiting, it will cause
+ * the sem_wait() call to exit with errno == EINTR, so that we will be
+ * able to service the interrupt (if not in a critical section already).
*
* Once we acquire the lock, we do NOT check for an interrupt before
- * returning. The caller needs to be able to record ownership of the
- * lock before any interrupt can be accepted.
+ * returning. The caller needs to be able to record ownership of the lock
+ * before any interrupt can be accepted.
*
- * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
- * and entering the sem_wait() call. If a cancel/die interrupt occurs
- * in that window, we would fail to notice it until after we acquire
- * the lock (or get another interrupt to escape the sem_wait()). We
- * can avoid this problem by temporarily setting ImmediateInterruptOK
- * to true before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt
- * in this interval will execute directly. However, there is a huge
- * pitfall: there is another window of a few instructions after the
- * sem_wait() before we are able to reset ImmediateInterruptOK. If an
- * interrupt occurs then, we'll lose control, which means that the
- * lock has been acquired but our caller did not get a chance to
- * record the fact. Therefore, we only set ImmediateInterruptOK if the
- * caller tells us it's OK to do so, ie, the caller does not need to
- * record acquiring the lock. (This is currently true for lockmanager
- * locks, since the process that granted us the lock did all the
- * necessary state updates. It's not true for Posix semaphores used to
- * implement LW locks or emulate spinlocks --- but the wait time for
- * such locks should not be very long, anyway.)
+ * There is a window of a few instructions between CHECK_FOR_INTERRUPTS and
+ * entering the sem_wait() call. If a cancel/die interrupt occurs in that
+ * window, we would fail to notice it until after we acquire the lock (or
+ * get another interrupt to escape the sem_wait()). We can avoid this
+ * problem by temporarily setting ImmediateInterruptOK to true before we
+ * do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
+ * execute directly. However, there is a huge pitfall: there is another
+ * window of a few instructions after the sem_wait() before we are able to
+ * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
+ * control, which means that the lock has been acquired but our caller did
+ * not get a chance to record the fact. Therefore, we only set
+ * ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the
+ * caller does not need to record acquiring the lock. (This is currently
+ * true for lockmanager locks, since the process that granted us the lock
+ * did all the necessary state updates. It's not true for Posix semaphores
+ * used to implement LW locks or emulate spinlocks --- but the wait time
+ * for such locks should not be very long, anyway.)
*/
do
{
int errStatus;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and unlock the semaphore again. Not clear this
- * can really happen, but might as well cope.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and unlock the semaphore again. Not clear this can really happen,
+ * but might as well cope.
*/
do
{
int errStatus;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and lock the semaphore again.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and lock the semaphore again.
*/
do
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/qnx4/sem.c,v 1.12 2003/11/29 19:51:54 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/qnx4/sem.c,v 1.13 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int nsems;
sem_t sem[SEMMAX]; /* array of POSIX semaphores */
struct sem semV[SEMMAX]; /* array of System V semaphore structures */
- struct pending_ops pendingOps[SEMMAX]; /* array of pending
- * operations */
+ struct pending_ops pendingOps[SEMMAX]; /* array of pending operations */
};
struct sem_info
fprintf(stderr,
"Found a pre-existing shared memory block for the semaphore memory\n"
"of a different size (%ld instead %ld). Make sure that all executables\n"
- "are from the same release or remove the file \"/dev/shmem/%s\"\n"
+ "are from the same release or remove the file \"/dev/shmem/%s\"\n"
"left by a previous version.\n",
(long) statbuf.st_size,
(long) sem_info_size,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/qnx4/shm.c,v 1.9 2003/11/29 19:51:54 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/qnx4/shm.c,v 1.10 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
case IPC_STAT:
/*
- * we have to open it first. stat() does no prefix tracking ->
- * the call would go to fsys instead of proc
+ * we have to open it first. stat() does no prefix tracking -> the
+ * call would go to fsys instead of proc
*/
keytoname(shmid, name);
fd = shm_open(name, 0, MODE);
result = fstat(fd, &statbuf);
/*
- * if the file exists, subtract 2 from linkcount : one for
- * our own open and one for the dir entry
+ * if the file exists, subtract 2 from linkcount : one for our
+ * own open and one for the dir entry
*/
if (!result)
buf->shm_nattch = statbuf.st_nlink - 2;
else
{
/*
- * if there's no entry for this key it doesn't matter the
- * next shmget() would get a different shm anyway
+ * if there's no entry for this key it doesn't matter the next
+ * shmget() would get a different shm anyway
*/
buf->shm_nattch = 0;
return 0;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/sysv_sema.c,v 1.16 2004/12/31 22:00:29 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/sysv_sema.c,v 1.17 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define PGSemaMagic 537 /* must be less than SEMVMX */
-static IpcSemaphoreId *mySemaSets; /* IDs of sema sets acquired so
- * far */
+static IpcSemaphoreId *mySemaSets; /* IDs of sema sets acquired so far */
static int numSemaSets; /* number of sema sets acquired so far */
static int maxSemaSets; /* allocated size of mySemaSets array */
static IpcSemaphoreKey nextSemaKey; /* next key to try using */
if (semId < 0)
{
/*
- * Fail quietly if error indicates a collision with existing set.
- * One would expect EEXIST, given that we said IPC_EXCL, but
- * perhaps we could get a permission violation instead? Also,
- * EIDRM might occur if an old set is slated for destruction but
- * not gone yet.
+ * Fail quietly if error indicates a collision with existing set. One
+ * would expect EEXIST, given that we said IPC_EXCL, but perhaps we
+ * could get a permission violation instead? Also, EIDRM might occur
+ * if an old set is slated for destruction but not gone yet.
*/
if (errno == EEXIST || errno == EACCES
#ifdef EIDRM
IPC_CREAT | IPC_EXCL | IPCProtection),
(errno == ENOSPC) ?
errhint("This error does *not* mean that you have run out of disk space.\n"
- "It occurs when either the system limit for the maximum number of "
- "semaphore sets (SEMMNI), or the system wide maximum number of "
- "semaphores (SEMMNS), would be exceeded. You need to raise the "
- "respective kernel parameter. Alternatively, reduce PostgreSQL's "
- "consumption of semaphores by reducing its max_connections parameter "
+ "It occurs when either the system limit for the maximum number of "
+ "semaphore sets (SEMMNI), or the system wide maximum number of "
+ "semaphores (SEMMNS), would be exceeded. You need to raise the "
+ "respective kernel parameter. Alternatively, reduce PostgreSQL's "
+ "consumption of semaphores by reducing its max_connections parameter "
"(currently %d).\n"
- "The PostgreSQL documentation contains more information about "
+ "The PostgreSQL documentation contains more information about "
"configuring your system for PostgreSQL.",
MaxBackends) : 0));
}
semId, semNum, value),
(errno == ERANGE) ?
errhint("You possibly need to raise your kernel's SEMVMX value to be at least "
- "%d. Look into the PostgreSQL documentation for details.",
+ "%d. Look into the PostgreSQL documentation for details.",
value) : 0));
}
continue; /* sema belongs to a non-Postgres app */
/*
- * If the creator PID is my own PID or does not belong to any
- * extant process, it's safe to zap it.
+ * If the creator PID is my own PID or does not belong to any extant
+ * process, it's safe to zap it.
*/
creatorPID = IpcSemaphoreGetLastPID(semId, numSems);
if (creatorPID <= 0)
}
/*
- * The sema set appears to be from a dead Postgres process, or
- * from a previous cycle of life in this same process. Zap it, if
- * possible. This probably shouldn't fail, but if it does, assume
- * the sema set belongs to someone else after all, and continue
- * quietly.
+ * The sema set appears to be from a dead Postgres process, or from a
+ * previous cycle of life in this same process. Zap it, if possible.
+ * This probably shouldn't fail, but if it does, assume the sema set
+ * belongs to someone else after all, and continue quietly.
*/
semun.val = 0; /* unused, but keep compiler quiet */
if (semctl(semId, 0, IPC_RMID, semun) < 0)
break; /* successful create */
/*
- * Can only get here if some other process managed to create the
- * same sema key before we did. Let him have that one, loop
- * around to try next key.
+ * Can only get here if some other process managed to create the same
+ * sema key before we did. Let him have that one, loop around to try
+ * next key.
*/
}
/*
- * OK, we created a new sema set. Mark it as created by this process.
- * We do this by setting the spare semaphore to PGSemaMagic-1 and then
- * incrementing it with semop(). That leaves it with value
- * PGSemaMagic and sempid referencing this process.
+ * OK, we created a new sema set. Mark it as created by this process. We
+ * do this by setting the spare semaphore to PGSemaMagic-1 and then
+ * incrementing it with semop(). That leaves it with value PGSemaMagic
+ * and sempid referencing this process.
*/
IpcSemaphoreInitialize(semId, numSems, PGSemaMagic - 1);
mysema.semId = semId;
elog(PANIC, "out of memory");
numSemaSets = 0;
nextSemaKey = port * 1000;
- nextSemaNumber = SEMAS_PER_SET; /* force sema set alloc on 1st
- * call */
+ nextSemaNumber = SEMAS_PER_SET; /* force sema set alloc on 1st call */
on_shmem_exit(ReleaseSemaphores, 0);
}
sops.sem_num = sema->semNum;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and lock the semaphore again.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. We
- * assume that if such an interrupt comes in while we are waiting, it
- * will cause the semop() call to exit with errno == EINTR, so that we
- * will be able to service the interrupt (if not in a critical section
- * already).
+ * Each time around the loop, we check for a cancel/die interrupt. We assume
+ * that if such an interrupt comes in while we are waiting, it will cause
+ * the semop() call to exit with errno == EINTR, so that we will be able
+ * to service the interrupt (if not in a critical section already).
*
* Once we acquire the lock, we do NOT check for an interrupt before
- * returning. The caller needs to be able to record ownership of the
- * lock before any interrupt can be accepted.
+ * returning. The caller needs to be able to record ownership of the lock
+ * before any interrupt can be accepted.
*
- * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
- * and entering the semop() call. If a cancel/die interrupt occurs in
- * that window, we would fail to notice it until after we acquire the
- * lock (or get another interrupt to escape the semop()). We can
- * avoid this problem by temporarily setting ImmediateInterruptOK to
- * true before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt in
- * this interval will execute directly. However, there is a huge
- * pitfall: there is another window of a few instructions after the
- * semop() before we are able to reset ImmediateInterruptOK. If an
- * interrupt occurs then, we'll lose control, which means that the
- * lock has been acquired but our caller did not get a chance to
- * record the fact. Therefore, we only set ImmediateInterruptOK if the
- * caller tells us it's OK to do so, ie, the caller does not need to
- * record acquiring the lock. (This is currently true for lockmanager
- * locks, since the process that granted us the lock did all the
- * necessary state updates. It's not true for SysV semaphores used to
- * implement LW locks or emulate spinlocks --- but the wait time for
- * such locks should not be very long, anyway.)
+ * There is a window of a few instructions between CHECK_FOR_INTERRUPTS and
+ * entering the semop() call. If a cancel/die interrupt occurs in that
+ * window, we would fail to notice it until after we acquire the lock (or
+ * get another interrupt to escape the semop()). We can avoid this
+ * problem by temporarily setting ImmediateInterruptOK to true before we
+ * do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
+ * execute directly. However, there is a huge pitfall: there is another
+ * window of a few instructions after the semop() before we are able to
+ * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
+ * control, which means that the lock has been acquired but our caller did
+ * not get a chance to record the fact. Therefore, we only set
+ * ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the
+ * caller does not need to record acquiring the lock. (This is currently
+ * true for lockmanager locks, since the process that granted us the lock
+ * did all the necessary state updates. It's not true for SysV semaphores
+ * used to implement LW locks or emulate spinlocks --- but the wait time
+ * for such locks should not be very long, anyway.)
*/
do
{
sops.sem_num = sema->semNum;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and unlock the semaphore again. Not clear this
- * can really happen, but might as well cope.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and unlock the semaphore again. Not clear this can really happen,
+ * but might as well cope.
*/
do
{
sops.sem_num = sema->semNum;
/*
- * Note: if errStatus is -1 and errno == EINTR then it means we
- * returned from the operation prematurely because we were sent a
- * signal. So we try and lock the semaphore again.
+ * Note: if errStatus is -1 and errno == EINTR then it means we returned
+ * from the operation prematurely because we were sent a signal. So we
+ * try and lock the semaphore again.
*/
do
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.43 2005/08/20 23:26:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.44 2005/10/15 02:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (shmid < 0)
{
/*
- * Fail quietly if error indicates a collision with existing
- * segment. One would expect EEXIST, given that we said IPC_EXCL,
- * but perhaps we could get a permission violation instead? Also,
- * EIDRM might occur if an old seg is slated for destruction but
- * not gone yet.
+ * Fail quietly if error indicates a collision with existing segment.
+ * One would expect EEXIST, given that we said IPC_EXCL, but perhaps
+ * we could get a permission violation instead? Also, EIDRM might
+ * occur if an old seg is slated for destruction but not gone yet.
*/
if (errno == EEXIST || errno == EACCES
#ifdef EIDRM
*/
ereport(FATAL,
(errmsg("could not create shared memory segment: %m"),
- errdetail("Failed system call was shmget(key=%lu, size=%lu, 0%o).",
- (unsigned long) memKey, (unsigned long) size,
- IPC_CREAT | IPC_EXCL | IPCProtection),
+ errdetail("Failed system call was shmget(key=%lu, size=%lu, 0%o).",
+ (unsigned long) memKey, (unsigned long) size,
+ IPC_CREAT | IPC_EXCL | IPCProtection),
(errno == EINVAL) ?
errhint("This error usually means that PostgreSQL's request for a shared memory "
- "segment exceeded your kernel's SHMMAX parameter. You can either "
+ "segment exceeded your kernel's SHMMAX parameter. You can either "
"reduce the request size or reconfigure the kernel with larger SHMMAX. "
- "To reduce the request size (currently %lu bytes), reduce "
- "PostgreSQL's shared_buffers parameter (currently %d) and/or "
+ "To reduce the request size (currently %lu bytes), reduce "
+ "PostgreSQL's shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
"If the request size is already small, it's possible that it is less than "
"your kernel's SHMMIN parameter, in which case raising the request size or "
"reconfiguring SHMMIN is called for.\n"
- "The PostgreSQL documentation contains more information about shared "
+ "The PostgreSQL documentation contains more information about shared "
"memory configuration.",
(unsigned long) size, NBuffers, MaxBackends) : 0,
(errno == ENOMEM) ?
errhint("This error usually means that PostgreSQL's request for a shared "
- "memory segment exceeded available memory or swap space. "
- "To reduce the request size (currently %lu bytes), reduce "
- "PostgreSQL's shared_buffers parameter (currently %d) and/or "
+ "memory segment exceeded available memory or swap space. "
+ "To reduce the request size (currently %lu bytes), reduce "
+ "PostgreSQL's shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
- "The PostgreSQL documentation contains more information about shared "
+ "The PostgreSQL documentation contains more information about shared "
"memory configuration.",
(unsigned long) size, NBuffers, MaxBackends) : 0,
(errno == ENOSPC) ?
errhint("This error does *not* mean that you have run out of disk space. "
"It occurs either if all available shared memory IDs have been taken, "
"in which case you need to raise the SHMMNI parameter in your kernel, "
- "or because the system's overall limit for shared memory has been "
- "reached. If you cannot increase the shared memory limit, "
- "reduce PostgreSQL's shared memory request (currently %lu bytes), "
- "by reducing its shared_buffers parameter (currently %d) and/or "
+ "or because the system's overall limit for shared memory has been "
+ "reached. If you cannot increase the shared memory limit, "
+ "reduce PostgreSQL's shared memory request (currently %lu bytes), "
+ "by reducing its shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
- "The PostgreSQL documentation contains more information about shared "
+ "The PostgreSQL documentation contains more information about shared "
"memory configuration.",
(unsigned long) size, NBuffers, MaxBackends) : 0));
}
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
{
IpcMemoryId shmId = (IpcMemoryId) id2;
struct shmid_ds shmStat;
+
#ifndef WIN32
struct stat statbuf;
PGShmemHeader *hdr;
#endif
/*
- * We detect whether a shared memory segment is in use by seeing
- * whether it (a) exists and (b) has any processes are attached to it.
+ * We detect whether a shared memory segment is in use by seeing whether
+ * it (a) exists and (b) has any processes are attached to it.
*/
if (shmctl(shmId, IPC_STAT, &shmStat) < 0)
{
/*
* EINVAL actually has multiple possible causes documented in the
- * shmctl man page, but we assume it must mean the segment no
- * longer exists.
+ * shmctl man page, but we assume it must mean the segment no longer
+ * exists.
*/
if (errno == EINVAL)
return false;
+
/*
- * EACCES implies that the segment belongs to some other userid,
- * which means it is not a Postgres shmem segment (or at least,
- * not one that is relevant to our data directory).
+ * EACCES implies that the segment belongs to some other userid, which
+ * means it is not a Postgres shmem segment (or at least, not one that
+ * is relevant to our data directory).
*/
if (errno == EACCES)
return false;
+
/*
- * Otherwise, we had better assume that the segment is in use.
- * The only likely case is EIDRM, which implies that the segment
- * has been IPC_RMID'd but there are still processes attached to it.
+ * Otherwise, we had better assume that the segment is in use. The
+ * only likely case is EIDRM, which implies that the segment has been
+ * IPC_RMID'd but there are still processes attached to it.
*/
return true;
}
void *memAddress;
PGShmemHeader *hdr;
IpcMemoryId shmid;
+
#ifndef WIN32
struct stat statbuf;
#endif
}
/*
- * The segment appears to be from a dead Postgres process, or from
- * a previous cycle of life in this same process. Zap it, if
- * possible. This probably shouldn't fail, but if it does, assume
- * the segment belongs to someone else after all, and continue
- * quietly.
+ * The segment appears to be from a dead Postgres process, or from a
+ * previous cycle of life in this same process. Zap it, if possible.
+ * This probably shouldn't fail, but if it does, assume the segment
+ * belongs to someone else after all, and continue quietly.
*/
shmdt(memAddress);
if (shmctl(shmid, IPC_RMID, NULL) < 0)
break; /* successful create and attach */
/*
- * Can only get here if some other process managed to create the
- * same shmem key before we did. Let him have that one, loop
- * around to try next key.
+ * Can only get here if some other process managed to create the same
+ * shmem key before we did. Let him have that one, loop around to try
+ * next key.
*/
}
/*
- * OK, we created a new segment. Mark it as created by this process.
- * The order of assignments here is critical so that another Postgres
- * process can't see the header as valid but belonging to an invalid
- * PID!
+ * OK, we created a new segment. Mark it as created by this process. The
+ * order of assignments here is critical so that another Postgres process
+ * can't see the header as valid but belonging to an invalid PID!
*/
hdr = (PGShmemHeader *) memAddress;
hdr->creatorPID = getpid();
/*
* PGSharedMemoryReAttach
*
- * Re-attach to an already existing shared memory segment. In the non
+ * Re-attach to an already existing shared memory segment. In the non
* EXEC_BACKEND case this is not used, because postmaster children inherit
* the shared memory segment attachment via fork().
*
UsedShmemSegAddr = hdr; /* probably redundant */
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
* PGSharedMemoryDetach
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/error.c,v 1.5 2005/10/07 16:34:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/error.c,v 1.6 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
errno = doserrors[i].doserr;
ereport(DEBUG5,
(errmsg_internal("mapped win32 error code %lu to %d",
- e, errno)));
+ e, errno)));
return;
}
}
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/security.c,v 1.8 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/security.c,v 1.9 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static BOOL pgwin32_get_dynamic_tokeninfo(HANDLE token,
- TOKEN_INFORMATION_CLASS class, char **InfoBuffer,
- char *errbuf, int errsize);
+ TOKEN_INFORMATION_CLASS class, char **InfoBuffer,
+ char *errbuf, int errsize);
/*
* Returns nonzero if the current user has administrative privileges,
{
HANDLE AccessToken;
char *InfoBuffer = NULL;
- char errbuf[256];
+ char errbuf[256];
PTOKEN_GROUPS Groups;
PSID AdministratorsSid;
PSID PowerUsersSid;
CloseHandle(AccessToken);
if (!AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0,
0, &AdministratorsSid))
{
write_stderr("could not get SID for Administrators group: error code %d\n",
}
if (!AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0,
0, &PowerUsersSid))
{
write_stderr("could not get SID for PowerUsers group: error code %d\n",
{
static int _is_service = -1;
HANDLE AccessToken;
- char *InfoBuffer = NULL;
- char errbuf[256];
+ char *InfoBuffer = NULL;
+ char errbuf[256];
PTOKEN_GROUPS Groups;
PTOKEN_USER User;
PSID ServiceSid;
if (!pgwin32_get_dynamic_tokeninfo(AccessToken, TokenUser, &InfoBuffer,
errbuf, sizeof(errbuf)))
{
- fprintf(stderr,errbuf);
+ fprintf(stderr, errbuf);
return -1;
}
User = (PTOKEN_USER) InfoBuffer;
if (!AllocateAndInitializeSid(&NtAuthority, 1,
- SECURITY_LOCAL_SYSTEM_RID, 0, 0, 0, 0, 0, 0, 0,
+ SECURITY_LOCAL_SYSTEM_RID, 0, 0, 0, 0, 0, 0, 0,
&LocalSystemSid))
{
fprintf(stderr, "could not get SID for local system account\n");
if (!pgwin32_get_dynamic_tokeninfo(AccessToken, TokenGroups, &InfoBuffer,
errbuf, sizeof(errbuf)))
{
- fprintf(stderr,errbuf);
+ fprintf(stderr, errbuf);
return -1;
}
Groups = (PTOKEN_GROUPS) InfoBuffer;
if (!AllocateAndInitializeSid(&NtAuthority, 1,
- SECURITY_SERVICE_RID, 0, 0, 0, 0, 0, 0, 0,
+ SECURITY_SERVICE_RID, 0, 0, 0, 0, 0, 0, 0,
&ServiceSid))
{
fprintf(stderr, "could not get SID for service group\n");
pgwin32_get_dynamic_tokeninfo(HANDLE token, TOKEN_INFORMATION_CLASS class,
char **InfoBuffer, char *errbuf, int errsize)
{
- DWORD InfoBufferSize;
+ DWORD InfoBufferSize;
if (GetTokenInformation(token, class, NULL, 0, &InfoBufferSize))
{
- snprintf(errbuf,errsize,"could not get token information: got zero size\n");
+ snprintf(errbuf, errsize, "could not get token information: got zero size\n");
return FALSE;
}
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
{
- snprintf(errbuf,errsize,"could not get token information: error code %d\n",
+ snprintf(errbuf, errsize, "could not get token information: error code %d\n",
(int) GetLastError());
return FALSE;
}
*InfoBuffer = malloc(InfoBufferSize);
if (*InfoBuffer == NULL)
{
- snprintf(errbuf,errsize,"could not allocate %d bytes for token information\n",
+ snprintf(errbuf, errsize, "could not allocate %d bytes for token information\n",
(int) InfoBufferSize);
return FALSE;
}
- if (!GetTokenInformation(token, class, *InfoBuffer,
+ if (!GetTokenInformation(token, class, *InfoBuffer,
InfoBufferSize, &InfoBufferSize))
{
- snprintf(errbuf,errsize,"could not get token information: error code %d\n",
+ snprintf(errbuf, errsize, "could not get token information: error code %d\n",
(int) GetLastError());
return FALSE;
}
-
+
return TRUE;
}
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/sema.c,v 1.10 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/sema.c,v 1.11 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (nsops != 1)
{
/*
- * Not supported (we return on 1st success, and don't cancel
- * earlier ops)
+ * Not supported (we return on 1st success, and don't cancel earlier
+ * ops)
*/
errno = E2BIG;
return -1;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/shmem.c,v 1.10 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/shmem.c,v 1.11 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* TODO -- shmat needs to count # attached to shared mem */
void *lpmem = MapViewOfFileEx((HANDLE) memId,
FILE_MAP_WRITE | FILE_MAP_READ,
- 0, 0, /* (DWORD)pshmdsc->segsize */ 0 /* s_segsize */ , shmaddr);
+ 0, 0, /* (DWORD)pshmdsc->segsize */ 0 /* s_segsize */ , shmaddr);
if (lpmem == NULL)
{
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.11 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.12 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int pg_signal_mask;
DLLIMPORT HANDLE pgwin32_signal_event;
-HANDLE pgwin32_initial_signal_pipe = INVALID_HANDLE_VALUE;
+HANDLE pgwin32_initial_signal_pipe = INVALID_HANDLE_VALUE;
/* Signal handling thread function */
signal_thread_handle = CreateThread(NULL, 0, pg_signal_thread, NULL, 0, NULL);
if (signal_thread_handle == NULL)
ereport(FATAL,
- (errmsg_internal("failed to create signal handler thread")));
+ (errmsg_internal("failed to create signal handler thread")));
/* Create console control handle to pick up Ctrl-C etc */
if (!SetConsoleCtrlHandler(pg_console_handler, TRUE))
ereport(FATAL,
- (errmsg_internal("failed to set console control handler")));
+ (errmsg_internal("failed to set console control handler")));
}
LeaveCriticalSection(&pg_signal_crit_sec);
sig(i);
EnterCriticalSection(&pg_signal_crit_sec);
- break; /* Restart outer loop, in case signal mask
- * or queue has been modified inside
- * signal handler */
+ break; /* Restart outer loop, in case signal mask or
+ * queue has been modified inside signal
+ * handler */
}
}
}
pg_signal_mask = mask;
/*
- * Dispatch any signals queued up right away, in case we have
- * unblocked one or more signals previously queued
+ * Dispatch any signals queued up right away, in case we have unblocked
+ * one or more signals previously queued
*/
pgwin32_dispatch_queued_signals();
wsprintf(pipename, "\\\\.\\pipe\\pgsignal_%d", (int) pid);
pipe = CreateNamedPipe(pipename, PIPE_ACCESS_DUPLEX,
- PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
+ PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
PIPE_UNLIMITED_INSTANCES, 16, 16, 1000, NULL);
if (pipe == INVALID_HANDLE_VALUE)
CloseHandle(pipe);
return 0;
}
- WriteFile(pipe, &sigNum, 1, &bytes, NULL); /* Don't care if it works
- * or not.. */
+ WriteFile(pipe, &sigNum, 1, &bytes, NULL); /* Don't care if it works or
+ * not.. */
FlushFileBuffers(pipe);
DisconnectNamedPipe(pipe);
CloseHandle(pipe);
pg_signal_thread(LPVOID param)
{
char pipename[128];
- HANDLE pipe = pgwin32_initial_signal_pipe;
+ HANDLE pipe = pgwin32_initial_signal_pipe;
wsprintf(pipename, "\\\\.\\pipe\\pgsignal_%d", GetCurrentProcessId());
if (pipe == INVALID_HANDLE_VALUE)
{
pipe = CreateNamedPipe(pipename, PIPE_ACCESS_DUPLEX,
- PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
- PIPE_UNLIMITED_INSTANCES, 16, 16, 1000, NULL);
+ PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
+ PIPE_UNLIMITED_INSTANCES, 16, 16, 1000, NULL);
if (pipe == INVALID_HANDLE_VALUE)
{
if (fConnected)
{
hThread = CreateThread(NULL, 0,
- (LPTHREAD_START_ROUTINE) pg_signal_dispatch_thread,
+ (LPTHREAD_START_ROUTINE) pg_signal_dispatch_thread,
(LPVOID) pipe, 0, NULL);
if (hThread == INVALID_HANDLE_VALUE)
write_stderr("could not create signal dispatch thread: error code %d\n",
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.8 2004/12/31 22:00:37 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.9 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
SOCKET rs;
/*
- * Poll for signals, but don't return with EINTR, since we don't
- * handle that in pqcomm.c
+ * Poll for signals, but don't return with EINTR, since we don't handle
+ * that in pqcomm.c
*/
pgwin32_poll_signals();
if (WSAGetLastError() != WSAEWOULDBLOCK)
/*
- * Not completed, and not just "would block", so an
- * error occured
+ * Not completed, and not just "would block", so an error
+ * occured
*/
FD_SET(writefds->fd_array[i], &outwritefds);
}
if (r != WAIT_TIMEOUT && r != WAIT_IO_COMPLETION && r != (WAIT_OBJECT_0 + numevents))
{
/*
- * We scan all events, even those not signalled, in case more than
- * one event has been tagged but Wait.. can only return one.
+ * We scan all events, even those not signalled, in case more than one
+ * event has been tagged but Wait.. can only return one.
*/
WSANETWORKEVENTS resEvents;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.4 2005/08/15 16:25:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.5 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static time_t last_autovac_stop_time = 0;
/* Memory context for long-lived data */
-static MemoryContext AutovacMemCxt;
+static MemoryContext AutovacMemCxt;
/* struct to keep list of candidate databases for vacuum */
typedef struct autovac_dbase
{
- Oid oid;
- char *name;
- TransactionId frozenxid;
- TransactionId vacuumxid;
+ Oid oid;
+ char *name;
+ TransactionId frozenxid;
+ TransactionId vacuumxid;
PgStat_StatDBEntry *entry;
- int32 age;
+ int32 age;
} autovac_dbase;
/* struct to keep track of tables to vacuum and/or analyze */
static void do_autovacuum(PgStat_StatDBEntry *dbentry);
static List *autovac_get_database_list(void);
static void test_rel_for_autovac(Oid relid, PgStat_StatTabEntry *tabentry,
- Form_pg_class classForm,
- Form_pg_autovacuum avForm,
- List **vacuum_tables,
- List **toast_table_ids);
+ Form_pg_class classForm,
+ Form_pg_autovacuum avForm,
+ List **vacuum_tables,
+ List **toast_table_ids);
static void autovacuum_do_vac_analyze(List *relids, bool dovacuum,
- bool doanalyze, bool freeze);
+ bool doanalyze, bool freeze);
/*
return 0;
/*
- * Do nothing if too soon since last autovacuum exit. This limits
- * how often the daemon runs. Since the time per iteration can be
- * quite variable, it seems more useful to measure/control the time
- * since last subprocess exit than since last subprocess launch.
+ * Do nothing if too soon since last autovacuum exit. This limits how
+ * often the daemon runs. Since the time per iteration can be quite
+ * variable, it seems more useful to measure/control the time since last
+ * subprocess exit than since last subprocess launch.
*
- * However, we *also* check the time since last subprocess launch;
- * this prevents thrashing under fork-failure conditions.
+ * However, we *also* check the time since last subprocess launch; this
+ * prevents thrashing under fork-failure conditions.
*
- * Note that since we will be re-called from the postmaster main loop,
- * we will get another chance later if we do nothing now.
+ * Note that since we will be re-called from the postmaster main loop, we
+ * will get another chance later if we do nothing now.
*
* XXX todo: implement sleep scale factor that existed in contrib code.
*/
last_autovac_start_time = curtime;
#ifdef EXEC_BACKEND
- switch((AutoVacPID = autovac_forkexec()))
+ switch ((AutoVacPID = autovac_forkexec()))
#else
- switch((AutoVacPID = fork_process()))
+ switch ((AutoVacPID = fork_process()))
#endif
{
case -1:
ereport(LOG,
- (errmsg("could not fork autovacuum process: %m")));
+ (errmsg("could not fork autovacuum process: %m")));
return 0;
#ifndef EXEC_BACKEND
av[ac++] = "postgres";
av[ac++] = "-forkautovac";
- av[ac++] = NULL; /* filled in by postmaster_forkexec */
+ av[ac++] = NULL; /* filled in by postmaster_forkexec */
av[ac] = NULL;
Assert(ac < lengthof(av));
return postmaster_forkexec(ac, av);
}
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
* AutoVacMain
NON_EXEC_STATIC void
AutoVacMain(int argc, char *argv[])
{
- ListCell *cell;
- List *dblist;
- TransactionId nextXid;
- autovac_dbase *db;
- bool whole_db;
- sigjmp_buf local_sigjmp_buf;
+ ListCell *cell;
+ List *dblist;
+ TransactionId nextXid;
+ autovac_dbase *db;
+ bool whole_db;
+ sigjmp_buf local_sigjmp_buf;
/* we are a postmaster subprocess now */
IsUnderPostmaster = true;
SetProcessingMode(InitProcessing);
/*
- * Set up signal handlers. We operate on databases much like a
- * regular backend, so we use the same signal handling. See
- * equivalent code in tcop/postgres.c.
+ * Set up signal handlers. We operate on databases much like a regular
+ * backend, so we use the same signal handling. See equivalent code in
+ * tcop/postgres.c.
*
- * Currently, we don't pay attention to postgresql.conf changes
- * that happen during a single daemon iteration, so we can ignore
- * SIGHUP.
+ * Currently, we don't pay attention to postgresql.conf changes that happen
+ * during a single daemon iteration, so we can ignore SIGHUP.
*/
pqsignal(SIGHUP, SIG_IGN);
+
/*
- * Presently, SIGINT will lead to autovacuum shutdown, because that's
- * how we handle ereport(ERROR). It could be improved however.
+ * Presently, SIGINT will lead to autovacuum shutdown, because that's how
+ * we handle ereport(ERROR). It could be improved however.
*/
pqsignal(SIGINT, StatementCancelHandler);
pqsignal(SIGTERM, die);
EmitErrorReport();
/*
- * We can now go away. Note that because we'll call InitProcess,
- * a callback will be registered to do ProcKill, which will clean
- * up necessary state.
+ * We can now go away. Note that because we'll call InitProcess, a
+ * callback will be registered to do ProcKill, which will clean up
+ * necessary state.
*/
proc_exit(0);
}
dblist = autovac_get_database_list();
/*
- * Get the next Xid that was current as of the last checkpoint.
- * We need it to determine whether databases are about to need
- * database-wide vacuums.
+ * Get the next Xid that was current as of the last checkpoint. We need it
+ * to determine whether databases are about to need database-wide vacuums.
*/
nextXid = GetRecentNextXid();
* recently auto-vacuumed, or one that needs database-wide vacuum (to
* prevent Xid wraparound-related data loss).
*
- * Note that a database with no stats entry is not considered, except
- * for Xid wraparound purposes. The theory is that if no one has ever
- * connected to it since the stats were last initialized, it doesn't
- * need vacuuming.
+ * Note that a database with no stats entry is not considered, except for Xid
+ * wraparound purposes. The theory is that if no one has ever connected
+ * to it since the stats were last initialized, it doesn't need vacuuming.
*
* XXX This could be improved if we had more info about whether it needs
* vacuuming before connecting to it. Perhaps look through the pgstats
* data for the database's tables? One idea is to keep track of the
* number of new and dead tuples per database in pgstats. However it
- * isn't clear how to construct a metric that measures that and not
- * cause starvation for less busy databases.
+ * isn't clear how to construct a metric that measures that and not cause
+ * starvation for less busy databases.
*/
db = NULL;
whole_db = false;
foreach(cell, dblist)
{
- autovac_dbase *tmp = lfirst(cell);
- bool this_whole_db;
- int32 freeze_age,
- vacuum_age;
+ autovac_dbase *tmp = lfirst(cell);
+ bool this_whole_db;
+ int32 freeze_age,
+ vacuum_age;
/*
* We look for the database that most urgently needs a database-wide
- * vacuum. We decide that a database-wide vacuum is needed 100000
+ * vacuum. We decide that a database-wide vacuum is needed 100000
* transactions sooner than vacuum.c's vac_truncate_clog() would
* decide to start giving warnings. If any such db is found, we
* ignore all other dbs.
*
- * Unlike vacuum.c, we also look at vacuumxid. This is so that
- * pg_clog can be kept trimmed to a reasonable size.
+ * Unlike vacuum.c, we also look at vacuumxid. This is so that pg_clog
+ * can be kept trimmed to a reasonable size.
*/
freeze_age = (int32) (nextXid - tmp->frozenxid);
vacuum_age = (int32) (nextXid - tmp->vacuumxid);
* modified, after the database was dropped from the pg_database
* table. (This is of course a not-very-bulletproof test, but it's
* cheap to make. If we do mistakenly choose a recently dropped
- * database, InitPostgres will fail and we'll drop out until the
- * next autovac run.)
+ * database, InitPostgres will fail and we'll drop out until the next
+ * autovac run.)
*/
if (tmp->entry->destroy != 0)
continue;
if (db)
{
/*
- * Report autovac startup to the stats collector. We deliberately
- * do this before InitPostgres, so that the last_autovac_time will
- * get updated even if the connection attempt fails. This is to
- * prevent autovac from getting "stuck" repeatedly selecting an
- * unopenable database, rather than making any progress on stuff
- * it can connect to.
+ * Report autovac startup to the stats collector. We deliberately do
+ * this before InitPostgres, so that the last_autovac_time will get
+ * updated even if the connection attempt fails. This is to prevent
+ * autovac from getting "stuck" repeatedly selecting an unopenable
+ * database, rather than making any progress on stuff it can connect
+ * to.
*/
pgstat_report_autovac(db->oid);
/*
* autovac_get_database_list
*
- * Return a list of all databases. Note we cannot use pg_database,
+ * Return a list of all databases. Note we cannot use pg_database,
* because we aren't connected yet; we use the flat database file.
*/
static List *
autovac_get_database_list(void)
{
- char *filename;
- List *dblist = NIL;
- char thisname[NAMEDATALEN];
- FILE *db_file;
- Oid db_id;
- Oid db_tablespace;
+ char *filename;
+ List *dblist = NIL;
+ char thisname[NAMEDATALEN];
+ FILE *db_file;
+ Oid db_id;
+ Oid db_tablespace;
TransactionId db_frozenxid;
TransactionId db_vacuumxid;
&db_tablespace, &db_frozenxid,
&db_vacuumxid))
{
- autovac_dbase *db;
+ autovac_dbase *db;
db = (autovac_dbase *) palloc(sizeof(autovac_dbase));
static void
process_whole_db(void)
{
- Relation dbRel;
- ScanKeyData entry[1];
- SysScanDesc scan;
- HeapTuple tup;
+ Relation dbRel;
+ ScanKeyData entry[1];
+ SysScanDesc scan;
+ HeapTuple tup;
Form_pg_database dbForm;
- bool freeze;
+ bool freeze;
/* Start a transaction so our commands have one to play into. */
StartTransactionCommand();
static void
do_autovacuum(PgStat_StatDBEntry *dbentry)
{
- Relation classRel,
- avRel;
- HeapTuple tuple;
- HeapScanDesc relScan;
- List *vacuum_tables = NIL;
- List *toast_table_ids = NIL;
- ListCell *cell;
+ Relation classRel,
+ avRel;
+ HeapTuple tuple;
+ HeapScanDesc relScan;
+ List *vacuum_tables = NIL;
+ List *toast_table_ids = NIL;
+ ListCell *cell;
PgStat_StatDBEntry *shared;
/* Start a transaction so our commands have one to play into. */
StartTransactionCommand();
/*
- * StartTransactionCommand and CommitTransactionCommand will
- * automatically switch to other contexts. We need this one
- * to keep the list of relations to vacuum/analyze across
- * transactions.
+ * StartTransactionCommand and CommitTransactionCommand will automatically
+ * switch to other contexts. We need this one to keep the list of
+ * relations to vacuum/analyze across transactions.
*/
MemoryContextSwitchTo(AutovacMemCxt);
/*
* Scan pg_class and determine which tables to vacuum.
*
- * The stats subsystem collects stats for toast tables independently
- * of the stats for their parent tables. We need to check those stats
- * since in cases with short, wide tables there might be proportionally
- * much more activity in the toast table than in its parent.
+ * The stats subsystem collects stats for toast tables independently of the
+ * stats for their parent tables. We need to check those stats since in
+ * cases with short, wide tables there might be proportionally much more
+ * activity in the toast table than in its parent.
*
* Since we can only issue VACUUM against the parent table, we need to
* transpose a decision to vacuum a toast table into a decision to vacuum
- * its parent. There's no point in considering ANALYZE on a toast table,
- * either. To support this, we keep a list of OIDs of toast tables that
+ * its parent. There's no point in considering ANALYZE on a toast table,
+ * either. To support this, we keep a list of OIDs of toast tables that
* need vacuuming alongside the list of regular tables. Regular tables
* will be entered into the table list even if they appear not to need
- * vacuuming; we go back and re-mark them after finding all the
- * vacuumable toast tables.
+ * vacuuming; we go back and re-mark them after finding all the vacuumable
+ * toast tables.
*/
relScan = heap_beginscan(classRel, SnapshotNow, 0, NULL);
Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple);
Form_pg_autovacuum avForm = NULL;
PgStat_StatTabEntry *tabentry;
- SysScanDesc avScan;
+ SysScanDesc avScan;
HeapTuple avTup;
- ScanKeyData entry[1];
+ ScanKeyData entry[1];
Oid relid;
/* Consider only regular and toast tables. */
continue;
/*
- * Skip temp tables (i.e. those in temp namespaces). We cannot
- * safely process other backends' temp tables.
+ * Skip temp tables (i.e. those in temp namespaces). We cannot safely
+ * process other backends' temp tables.
*/
if (isTempNamespace(classForm->relnamespace))
continue;
/*
* test_rel_for_autovac
*
- * Check whether a table needs to be vacuumed or analyzed. Add it to the
+ * Check whether a table needs to be vacuumed or analyzed. Add it to the
* appropriate output list if so.
*
* A table needs to be vacuumed if the number of dead tuples exceeds a
List **vacuum_tables,
List **toast_table_ids)
{
- Relation rel;
- float4 reltuples; /* pg_class.reltuples */
+ Relation rel;
+ float4 reltuples; /* pg_class.reltuples */
+
/* constants from pg_autovacuum or GUC variables */
- int vac_base_thresh,
- anl_base_thresh;
- float4 vac_scale_factor,
- anl_scale_factor;
+ int vac_base_thresh,
+ anl_base_thresh;
+ float4 vac_scale_factor,
+ anl_scale_factor;
+
/* thresholds calculated from above constants */
- float4 vacthresh,
- anlthresh;
+ float4 vacthresh,
+ anlthresh;
+
/* number of vacuum (resp. analyze) tuples at this time */
- float4 vactuples,
- anltuples;
+ float4 vactuples,
+ anltuples;
+
/* cost-based vacuum delay parameters */
- int vac_cost_limit;
- int vac_cost_delay;
- bool dovacuum;
- bool doanalyze;
+ int vac_cost_limit;
+ int vac_cost_delay;
+ bool dovacuum;
+ bool doanalyze;
/* User disabled it in pg_autovacuum? */
if (avForm && !avForm->enabled)
return;
/*
- * Skip a table not found in stat hash. If it's not acted upon,
- * there's no need to vacuum it. (Note that database-level check
- * will take care of Xid wraparound.)
+ * Skip a table not found in stat hash. If it's not acted upon, there's
+ * no need to vacuum it. (Note that database-level check will take care
+ * of Xid wraparound.)
*/
if (!PointerIsValid(tabentry))
return;
anlthresh = (float4) anl_base_thresh + anl_scale_factor * reltuples;
/*
- * Note that we don't need to take special consideration for stat
- * reset, because if that happens, the last vacuum and analyze counts
- * will be reset too.
+ * Note that we don't need to take special consideration for stat reset,
+ * because if that happens, the last vacuum and analyze counts will be
+ * reset too.
*/
elog(DEBUG3, "%s: vac: %.0f (threshold %.0f), anl: %.0f (threshold %.0f)",
/*
* autovacuum_do_vac_analyze
- * Vacuum and/or analyze a list of tables; or all tables if relids = NIL
+ * Vacuum and/or analyze a list of tables; or all tables if relids = NIL
*/
static void
autovacuum_do_vac_analyze(List *relids, bool dovacuum, bool doanalyze,
bool freeze)
{
- VacuumStmt *vacstmt;
- MemoryContext old_cxt;
-
+ VacuumStmt *vacstmt;
+ MemoryContext old_cxt;
+
/*
* The node must survive transaction boundaries, so make sure we create it
* in a long-lived context
*/
old_cxt = MemoryContextSwitchTo(AutovacMemCxt);
-
+
vacstmt = makeNode(VacuumStmt);
/*
* Point QueryContext to the autovac memory context to fake out the
- * PreventTransactionChain check inside vacuum(). Note that this
- * is also why we palloc vacstmt instead of just using a local variable.
+ * PreventTransactionChain check inside vacuum(). Note that this is also
+ * why we palloc vacstmt instead of just using a local variable.
*/
QueryContext = CurrentMemoryContext;
/*
* AutoVacuumingActive
- * Check GUC vars and report whether the autovacuum process should be
- * running.
+ * Check GUC vars and report whether the autovacuum process should be
+ * running.
*/
bool
AutoVacuumingActive(void)
/*
* autovac_init
- * This is called at postmaster initialization.
+ * This is called at postmaster initialization.
*
* Annoy the user if he got it wrong.
*/
ereport(WARNING,
(errmsg("autovacuum not started because of misconfiguration"),
errhint("Enable options \"stats_start_collector\" and \"stats_row_level\".")));
+
/*
* Set the GUC var so we don't fork autovacuum uselessly, and also to
* help debugging.
/*
* IsAutoVacuumProcess
- * Return whether this process is an autovacuum process.
+ * Return whether this process is an autovacuum process.
*/
bool
IsAutoVacuumProcess(void)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.20 2005/09/12 22:20:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.21 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* Note: we deliberately ignore SIGTERM, because during a standard Unix
* system shutdown cycle, init will SIGTERM all processes at once. We
- * want to wait for the backends to exit, whereupon the postmaster
- * will tell us it's okay to shut down (via SIGUSR2).
+ * want to wait for the backends to exit, whereupon the postmaster will
+ * tell us it's okay to shut down (via SIGUSR2).
*
- * SIGUSR1 is presently unused; keep it spare in case someday we want
- * this process to participate in sinval messaging.
+ * SIGUSR1 is presently unused; keep it spare in case someday we want this
+ * process to participate in sinval messaging.
*/
pqsignal(SIGHUP, BgSigHupHandler); /* set flag to read config file */
pqsignal(SIGINT, ReqCheckpointHandler); /* request checkpoint */
#endif
/*
- * Initialize so that first time-driven checkpoint happens at the
- * correct time.
+ * Initialize so that first time-driven checkpoint happens at the correct
+ * time.
*/
last_checkpoint_time = time(NULL);
/*
- * Create a memory context that we will do all our work in. We do this
- * so that we can reset the context during error recovery and thereby
- * avoid possible memory leaks. Formerly this code just ran in
+ * Create a memory context that we will do all our work in. We do this so
+ * that we can reset the context during error recovery and thereby avoid
+ * possible memory leaks. Formerly this code just ran in
* TopMemoryContext, but resetting that would be a really bad idea.
*/
bgwriter_context = AllocSetContextCreate(TopMemoryContext,
}
/*
- * Now return to normal top-level context and clear ErrorContext
- * for next time.
+ * Now return to normal top-level context and clear ErrorContext for
+ * next time.
*/
MemoryContextSwitchTo(bgwriter_context);
FlushErrorState();
RESUME_INTERRUPTS();
/*
- * Sleep at least 1 second after any error. A write error is
- * likely to be repeated, and we don't want to be filling the
- * error logs as fast as we can.
+ * Sleep at least 1 second after any error. A write error is likely
+ * to be repeated, and we don't want to be filling the error logs as
+ * fast as we can.
*/
pg_usleep(1000000L);
}
}
/*
- * Do an unforced checkpoint if too much time has elapsed since
- * the last one.
+ * Do an unforced checkpoint if too much time has elapsed since the
+ * last one.
*/
now = time(NULL);
elapsed_secs = now - last_checkpoint_time;
/*
* We will warn if (a) too soon since last checkpoint (whatever
* caused it) and (b) somebody has set the ckpt_time_warn flag
- * since the last checkpoint start. Note in particular that
- * this implementation will not generate warnings caused by
+ * since the last checkpoint start. Note in particular that this
+ * implementation will not generate warnings caused by
* CheckPointTimeout < CheckPointWarning.
*/
if (BgWriterShmem->ckpt_time_warn &&
/*
* After any checkpoint, close all smgr files. This is so we
- * won't hang onto smgr references to deleted files
- * indefinitely.
+ * won't hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
/*
* Note we record the checkpoint start time not end time as
- * last_checkpoint_time. This is so that time-driven
- * checkpoints happen at a predictable spacing.
+ * last_checkpoint_time. This is so that time-driven checkpoints
+ * happen at a predictable spacing.
*/
last_checkpoint_time = now;
}
BgBufferSync();
/*
- * Nap for the configured time, or sleep for 10 seconds if there
- * is no bgwriter activity configured.
+ * Nap for the configured time, or sleep for 10 seconds if there is no
+ * bgwriter activity configured.
*
- * On some platforms, signals won't interrupt the sleep. To ensure
- * we respond reasonably promptly when someone signals us, break
- * down the sleep into 1-second increments, and check for
- * interrupts after each nap.
+ * On some platforms, signals won't interrupt the sleep. To ensure we
+ * respond reasonably promptly when someone signals us, break down the
+ * sleep into 1-second increments, and check for interrupts after each
+ * nap.
*
* We absorb pending requests after each short sleep.
*/
/*
* DO NOT proc_exit() -- we're here because shared memory may be
- * corrupted, so we don't want to try to clean up our transaction.
- * Just nail the windows shut and get out of town.
+ * corrupted, so we don't want to try to clean up our transaction. Just
+ * nail the windows shut and get out of town.
*
- * Note we do exit(1) not exit(0). This is to force the postmaster into
- * a system reset cycle if some idiot DBA sends a manual SIGQUIT to a
- * random backend. This is necessary precisely because we don't clean
- * up our shared memory state.
+ * Note we do exit(1) not exit(0). This is to force the postmaster into a
+ * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
+ * backend. This is necessary precisely because we don't clean up our
+ * shared memory state.
*/
exit(1);
}
Size size;
/*
- * Currently, the size of the requests[] array is arbitrarily set
- * equal to NBuffers. This may prove too large or small ...
+ * Currently, the size of the requests[] array is arbitrarily set equal to
+ * NBuffers. This may prove too large or small ...
*/
size = offsetof(BgWriterShmemStruct, requests);
size = add_size(size, mul_size(NBuffers, sizeof(BgWriterRequest)));
CreateCheckPoint(false, true);
/*
- * After any checkpoint, close all smgr files. This is so we
- * won't hang onto smgr references to deleted files
- * indefinitely.
+ * After any checkpoint, close all smgr files. This is so we won't
+ * hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
"could not signal for checkpoint: %m");
/*
- * If requested, wait for completion. We detect completion according
- * to the algorithm given above.
+ * If requested, wait for completion. We detect completion according to
+ * the algorithm given above.
*/
if (waitforit)
{
/*
* We are waiting for ckpt_done >= old_started, in a modulo sense.
- * This is a little tricky since we don't know the width or
- * signedness of sig_atomic_t. We make the lowest common
- * denominator assumption that it is only as wide as "char". This
- * means that this algorithm will cope correctly as long as we
- * don't sleep for more than 127 completed checkpoints. (If we
- * do, we will get another chance to exit after 128 more
- * checkpoints...)
+ * This is a little tricky since we don't know the width or signedness
+ * of sig_atomic_t. We make the lowest common denominator assumption
+ * that it is only as wide as "char". This means that this algorithm
+ * will cope correctly as long as we don't sleep for more than 127
+ * completed checkpoints. (If we do, we will get another chance to
+ * exit after 128 more checkpoints...)
*/
while (((signed char) (bgs->ckpt_done - old_started)) < 0)
{
return;
/*
- * We have to PANIC if we fail to absorb all the pending requests
- * (eg, because our hashtable runs out of memory). This is because
- * the system cannot run safely if we are unable to fsync what we
- * have been told to fsync. Fortunately, the hashtable is so small
- * that the problem is quite unlikely to arise in practice.
+ * We have to PANIC if we fail to absorb all the pending requests (eg,
+ * because our hashtable runs out of memory). This is because the system
+ * cannot run safely if we are unable to fsync what we have been told to
+ * fsync. Fortunately, the hashtable is so small that the problem is
+ * quite unlikely to arise in practice.
*/
START_CRIT_SECTION();
/*
- * We try to avoid holding the lock for a long time by copying the
- * request array.
+ * We try to avoid holding the lock for a long time by copying the request
+ * array.
*/
LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE);
/*
* fork_process.c
- * A simple wrapper on top of fork(). This does not handle the
- * EXEC_BACKEND case; it might be extended to do so, but it would be
- * considerably more complex.
+ * A simple wrapper on top of fork(). This does not handle the
+ * EXEC_BACKEND case; it might be extended to do so, but it would be
+ * considerably more complex.
*
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/fork_process.c,v 1.3 2005/03/16 00:02:39 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/fork_process.c,v 1.4 2005/10/15 02:49:23 momjian Exp $
*/
#include "postgres.h"
#include "postmaster/fork_process.h"
pid_t
fork_process(void)
{
- pid_t result;
+ pid_t result;
+
#ifdef LINUX_PROFILE
struct itimerval prof_itimer;
#endif
/*
- * Flush stdio channels just before fork, to avoid double-output
- * problems. Ideally we'd use fflush(NULL) here, but there are still a
- * few non-ANSI stdio libraries out there (like SunOS 4.1.x) that
- * coredump if we do. Presently stdout and stderr are the only stdio
- * output channels used by the postmaster, so fflush'ing them should
- * be sufficient.
+ * Flush stdio channels just before fork, to avoid double-output problems.
+ * Ideally we'd use fflush(NULL) here, but there are still a few non-ANSI
+ * stdio libraries out there (like SunOS 4.1.x) that coredump if we do.
+ * Presently stdout and stderr are the only stdio output channels used by
+ * the postmaster, so fflush'ing them should be sufficient.
*/
fflush(stdout);
fflush(stderr);
#ifdef LINUX_PROFILE
+
/*
- * Linux's fork() resets the profiling timer in the child process. If
- * we want to profile child processes then we need to save and restore
- * the timer setting. This is a waste of time if not profiling,
- * however, so only do it if commanded by specific -DLINUX_PROFILE
- * switch.
+ * Linux's fork() resets the profiling timer in the child process. If we
+ * want to profile child processes then we need to save and restore the
+ * timer setting. This is a waste of time if not profiling, however, so
+ * only do it if commanded by specific -DLINUX_PROFILE switch.
*/
getitimer(ITIMER_PROF, &prof_itimer);
#endif
return result;
}
-#endif /* ! WIN32 */
+
+#endif /* ! WIN32 */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/pgarch.c,v 1.17 2005/07/04 04:51:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgarch.c,v 1.18 2005/10/15 02:49:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Timer definitions.
* ----------
*/
-#define PGARCH_AUTOWAKE_INTERVAL 60 /* How often to force a poll of
- * the archive status directory;
- * in seconds. */
-#define PGARCH_RESTART_INTERVAL 10 /* How often to attempt to restart
- * a failed archiver; in seconds. */
+#define PGARCH_AUTOWAKE_INTERVAL 60 /* How often to force a poll of the
+ * archive status directory; in
+ * seconds. */
+#define PGARCH_RESTART_INTERVAL 10 /* How often to attempt to restart a
+ * failed archiver; in seconds. */
/* ----------
* Archiver control info.
/*
* Do nothing if too soon since last archiver start. This is a safety
- * valve to protect against continuous respawn attempts if the
- * archiver is dying immediately at launch. Note that since we will be
- * re-called from the postmaster main loop, we will get another chance
- * later.
+ * valve to protect against continuous respawn attempts if the archiver is
+ * dying immediately at launch. Note that since we will be re-called from
+ * the postmaster main loop, we will get another chance later.
*/
curtime = time(NULL);
if ((unsigned int) (curtime - last_pgarch_start_time) <
/*
* We run the copy loop immediately upon entry, in case there are
- * unarchived files left over from a previous database run (or maybe
- * the archiver died unexpectedly). After that we wait for a signal
- * or timeout before doing more.
+ * unarchived files left over from a previous database run (or maybe the
+ * archiver died unexpectedly). After that we wait for a signal or
+ * timeout before doing more.
*/
wakened = true;
}
/*
- * There shouldn't be anything for the archiver to do except to
- * wait for a signal, ... however, the archiver exists to
- * protect our data, so she wakes up occasionally to allow
- * herself to be proactive. In particular this avoids getting
- * stuck if a signal arrives just before we sleep.
+ * There shouldn't be anything for the archiver to do except to wait
+ * for a signal, ... however, the archiver exists to protect our data,
+ * so she wakes up occasionally to allow herself to be proactive. In
+ * particular this avoids getting stuck if a signal arrives just
+ * before we sleep.
*/
if (!wakened)
{
/*
* loop through all xlogs with archive_status of .ready and archive
- * them...mostly we expect this to be a single file, though it is
- * possible some backend will add files onto the list of those that
- * need archiving while we are still copying earlier archives
+ * them...mostly we expect this to be a single file, though it is possible
+ * some backend will add files onto the list of those that need archiving
+ * while we are still copying earlier archives
*/
while (pgarch_readyXlog(xlog))
{
pgarch_readyXlog(char *xlog)
{
/*
- * open xlog status directory and read through list of xlogs that have
- * the .ready suffix, looking for earliest file. It is possible to
- * optimise this code, though only a single file is expected on the
- * vast majority of calls, so....
+ * open xlog status directory and read through list of xlogs that have the
+ * .ready suffix, looking for earliest file. It is possible to optimise
+ * this code, though only a single file is expected on the vast majority
+ * of calls, so....
*/
char XLogArchiveStatusDir[MAXPGPATH];
char newxlog[MAX_XFN_CHARS + 6 + 1];
if (rldir == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open archive status directory \"%s\": %m",
- XLogArchiveStatusDir)));
+ errmsg("could not open archive status directory \"%s\": %m",
+ XLogArchiveStatusDir)));
while ((rlde = ReadDir(rldir, XLogArchiveStatusDir)) != NULL)
{
*
* Copyright (c) 2001-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.109 2005/10/06 02:29:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.110 2005/10/15 02:49:23 momjian Exp $
* ----------
*/
#include "postgres.h"
* Timer definitions.
* ----------
*/
-#define PGSTAT_STAT_INTERVAL 500 /* How often to write the status
- * file; in milliseconds. */
+#define PGSTAT_STAT_INTERVAL 500 /* How often to write the status file;
+ * in milliseconds. */
-#define PGSTAT_DESTROY_DELAY 10000 /* How long to keep destroyed
- * objects known, to give delayed
- * UDP packets time to arrive; in
- * milliseconds. */
+#define PGSTAT_DESTROY_DELAY 10000 /* How long to keep destroyed objects
+ * known, to give delayed UDP packets
+ * time to arrive; in milliseconds. */
#define PGSTAT_DESTROY_COUNT (PGSTAT_DESTROY_DELAY / PGSTAT_STAT_INTERVAL)
-#define PGSTAT_RESTART_INTERVAL 60 /* How often to attempt to restart
- * a failed statistics collector;
- * in seconds. */
+#define PGSTAT_RESTART_INTERVAL 60 /* How often to attempt to restart a
+ * failed statistics collector; in
+ * seconds. */
/* ----------
* Amount of space reserved in pgstat_recvbuffer().
* ----------
*/
NON_EXEC_STATIC int pgStatSock = -1;
-NON_EXEC_STATIC int pgStatPipe[2] = {-1,-1};
+NON_EXEC_STATIC int pgStatPipe[2] = {-1, -1};
static struct sockaddr_storage pgStatAddr;
static pid_t pgStatCollectorPid = 0;
*/
typedef struct TabStatArray
{
- int tsa_alloc; /* num allocated */
- int tsa_used; /* num actually used */
+ int tsa_alloc; /* num allocated */
+ int tsa_used; /* num actually used */
PgStat_MsgTabstat **tsa_messages; /* the array itself */
} TabStatArray;
#define TABSTAT_QUANTUM 4 /* we alloc this many at a time */
-static TabStatArray RegularTabStat = { 0, 0, NULL };
-static TabStatArray SharedTabStat = { 0, 0, NULL };
+static TabStatArray RegularTabStat = {0, 0, NULL};
+static TabStatArray SharedTabStat = {0, 0, NULL};
static int pgStatXactCommit = 0;
static int pgStatXactRollback = 0;
}
/*
- * On some platforms, getaddrinfo_all() may return multiple addresses
- * only one of which will actually work (eg, both IPv6 and IPv4
- * addresses when kernel will reject IPv6). Worse, the failure may
- * occur at the bind() or perhaps even connect() stage. So we must
- * loop through the results till we find a working combination. We
- * will generate LOG messages, but no error, for bogus combinations.
+ * On some platforms, getaddrinfo_all() may return multiple addresses only
+ * one of which will actually work (eg, both IPv6 and IPv4 addresses when
+ * kernel will reject IPv6). Worse, the failure may occur at the bind()
+ * or perhaps even connect() stage. So we must loop through the results
+ * till we find a working combination. We will generate LOG messages, but
+ * no error, for bogus combinations.
*/
for (addr = addrs; addr; addr = addr->ai_next)
{
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not create socket for statistics collector: %m")));
+ errmsg("could not create socket for statistics collector: %m")));
continue;
}
/*
- * Bind it to a kernel assigned port on localhost and get the
- * assigned port via getsockname().
+ * Bind it to a kernel assigned port on localhost and get the assigned
+ * port via getsockname().
*/
if (bind(pgStatSock, addr->ai_addr, addr->ai_addrlen) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not bind socket for statistics collector: %m")));
+ errmsg("could not bind socket for statistics collector: %m")));
closesocket(pgStatSock);
pgStatSock = -1;
continue;
}
/*
- * Connect the socket to its own address. This saves a few cycles
- * by not having to respecify the target address on every send.
- * This also provides a kernel-level check that only packets from
- * this same address will be received.
+ * Connect the socket to its own address. This saves a few cycles by
+ * not having to respecify the target address on every send. This also
+ * provides a kernel-level check that only packets from this same
+ * address will be received.
*/
if (connect(pgStatSock, (struct sockaddr *) & pgStatAddr, alen) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not connect socket for statistics collector: %m")));
+ errmsg("could not connect socket for statistics collector: %m")));
closesocket(pgStatSock);
pgStatSock = -1;
continue;
}
/*
- * Try to send and receive a one-byte test message on the socket.
- * This is to catch situations where the socket can be created but
- * will not actually pass data (for instance, because kernel
- * packet filtering rules prevent it).
+ * Try to send and receive a one-byte test message on the socket. This
+ * is to catch situations where the socket can be created but will not
+ * actually pass data (for instance, because kernel packet filtering
+ * rules prevent it).
*/
test_byte = TESTBYTEVAL;
if (send(pgStatSock, &test_byte, 1, 0) != 1)
}
/*
- * There could possibly be a little delay before the message can
- * be received. We arbitrarily allow up to half a second before
- * deciding it's broken.
+ * There could possibly be a little delay before the message can be
+ * received. We arbitrarily allow up to half a second before deciding
+ * it's broken.
*/
for (;;) /* need a loop to handle EINTR */
{
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("select() failed in statistics collector: %m")));
+ errmsg("select() failed in statistics collector: %m")));
closesocket(pgStatSock);
pgStatSock = -1;
continue;
if (sel_res == 0 || !FD_ISSET(pgStatSock, &rset))
{
/*
- * This is the case we actually think is likely, so take pains
- * to give a specific message for it.
+ * This is the case we actually think is likely, so take pains to
+ * give a specific message for it.
*
* errno will not be set meaningfully here, so don't use it.
*/
goto startup_failed;
/*
- * Set the socket to non-blocking IO. This ensures that if the
- * collector falls behind (despite the buffering process), statistics
- * messages will be discarded; backends won't block waiting to send
- * messages to the collector.
+ * Set the socket to non-blocking IO. This ensures that if the collector
+ * falls behind (despite the buffering process), statistics messages will
+ * be discarded; backends won't block waiting to send messages to the
+ * collector.
*/
if (!pg_set_noblock(pgStatSock))
{
startup_failed:
ereport(LOG,
- (errmsg("disabling statistics collector for lack of working socket")));
+ (errmsg("disabling statistics collector for lack of working socket")));
if (addrs)
freeaddrinfo_all(hints.ai_family, addrs);
/*
* pgstat_reset_all() -
*
- * Remove the stats file. This is used on server start if the
+ * Remove the stats file. This is used on server start if the
* stats_reset_on_server_start feature is enabled, or if WAL
* recovery is needed after a crash.
*/
return 0;
/*
- * Do nothing if too soon since last collector start. This is a
- * safety valve to protect against continuous respawn attempts if the
- * collector is dying immediately at launch. Note that since we will
- * be re-called from the postmaster main loop, we will get another
- * chance later.
+ * Do nothing if too soon since last collector start. This is a safety
+ * valve to protect against continuous respawn attempts if the collector
+ * is dying immediately at launch. Note that since we will be re-called
+ * from the postmaster main loop, we will get another chance later.
*/
curtime = time(NULL);
if ((unsigned int) (curtime - last_pgstat_start_time) <
/* ----------
* pgstat_report_autovac() -
*
- * Called from autovacuum.c to report startup of an autovacuum process.
+ * Called from autovacuum.c to report startup of an autovacuum process.
* We are called before InitPostgres is done, so can't rely on MyDatabaseId;
* the db OID must be passed in, instead.
* ----------
/*
* We may not have a MyProcPort (eg, if this is the autovacuum process).
- * For the moment, punt and don't send BESTART --- would be better to
- * work out a clean way of handling "unknown clientaddr".
+ * For the moment, punt and don't send BESTART --- would be better to work
+ * out a clean way of handling "unknown clientaddr".
*/
if (MyProcPort)
{
/* --------
* pgstat_report_analyze() -
*
- * Tell the collector about the table we just analyzed.
+ * Tell the collector about the table we just analyzed.
* --------
*/
void
return 0;
/*
- * If not done for this transaction, read the statistics collector
- * stats file into some hash tables.
+ * If not done for this transaction, read the statistics collector stats
+ * file into some hash tables.
*/
backend_read_statsfile();
while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * Check if this relation is still alive by looking up it's
- * pg_class tuple in the system catalog cache.
+ * Check if this relation is still alive by looking up it's pg_class
+ * tuple in the system catalog cache.
*/
reltup = SearchSysCache(RELOID,
ObjectIdGetDatum(tabentry->tableid),
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to reset statistics counters")));
+ errmsg("must be superuser to reset statistics counters")));
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RESETCOUNTER);
msg.m_databaseid = MyDatabaseId;
{
Oid rel_id = rel->rd_id;
PgStat_TableEntry *useent;
- TabStatArray *tsarr;
+ TabStatArray *tsarr;
PgStat_MsgTabstat *tsmsg;
int mb;
int i;
continue;
/*
- * Not found, but found a message buffer with an empty slot
- * instead. Fine, let's use this one.
+ * Not found, but found a message buffer with an empty slot instead.
+ * Fine, let's use this one.
*/
i = tsmsg->m_nentries++;
useent = &tsmsg->m_entry[i];
pgStatXactCommit++;
/*
- * If there was no relation activity yet, just make one existing
- * message buffer used without slots, causing the next report to tell
- * new xact-counters.
+ * If there was no relation activity yet, just make one existing message
+ * buffer used without slots, causing the next report to tell new
+ * xact-counters.
*/
if (RegularTabStat.tsa_alloc == 0)
more_tabstat_space(&RegularTabStat);
pgStatXactRollback++;
/*
- * If there was no relation activity yet, just make one existing
- * message buffer used without slots, causing the next report to tell
- * new xact-counters.
+ * If there was no relation activity yet, just make one existing message
+ * buffer used without slots, causing the next report to tell new
+ * xact-counters.
*/
if (RegularTabStat.tsa_alloc == 0)
more_tabstat_space(&RegularTabStat);
pgstat_fetch_stat_dbentry(Oid dbid)
{
/*
- * If not done for this transaction, read the statistics collector
- * stats file into some hash tables.
+ * If not done for this transaction, read the statistics collector stats
+ * file into some hash tables.
*/
backend_read_statsfile();
PgStat_StatTabEntry *tabentry;
/*
- * If not done for this transaction, read the statistics collector
- * stats file into some hash tables.
+ * If not done for this transaction, read the statistics collector stats
+ * file into some hash tables.
*/
backend_read_statsfile();
#endif
/*
- * Start a buffering process to read from the socket, so we have a
- * little more time to process incoming messages.
+ * Start a buffering process to read from the socket, so we have a little
+ * more time to process incoming messages.
*
- * NOTE: the process structure is: postmaster is parent of buffer process
- * is parent of collector process. This way, the buffer can detect
- * collector failure via SIGCHLD, whereas otherwise it wouldn't notice
- * collector failure until it tried to write on the pipe. That would
- * mean that after the postmaster started a new collector, we'd have
- * two buffer processes competing to read from the UDP socket --- not
- * good.
+ * NOTE: the process structure is: postmaster is parent of buffer process is
+ * parent of collector process. This way, the buffer can detect collector
+ * failure via SIGCHLD, whereas otherwise it wouldn't notice collector
+ * failure until it tried to write on the pipe. That would mean that
+ * after the postmaster started a new collector, we'd have two buffer
+ * processes competing to read from the UDP socket --- not good.
*/
if (pgpipe(pgStatPipe) < 0)
ereport(ERROR,
(errcode_for_socket_access(),
- errmsg("could not create pipe for statistics buffer: %m")));
+ errmsg("could not create pipe for statistics buffer: %m")));
/* child becomes collector process */
#ifdef EXEC_BACKEND
MyProcPid = getpid(); /* reset MyProcPid */
/*
- * Reset signal handling. With the exception of restoring default
- * SIGCHLD and SIGQUIT handling, this is a no-op in the
- * non-EXEC_BACKEND case because we'll have inherited these settings
- * from the buffer process; but it's not a no-op for EXEC_BACKEND.
+ * Reset signal handling. With the exception of restoring default SIGCHLD
+ * and SIGQUIT handling, this is a no-op in the non-EXEC_BACKEND case
+ * because we'll have inherited these settings from the buffer process;
+ * but it's not a no-op for EXEC_BACKEND.
*/
pqsignal(SIGHUP, SIG_IGN);
pqsignal(SIGINT, SIG_IGN);
need_statwrite = TRUE;
/*
- * Read in an existing statistics stats file or initialize the stats
- * to zero.
+ * Read in an existing statistics stats file or initialize the stats to
+ * zero.
*/
pgStatRunningInCollector = TRUE;
pgstat_read_statsfile(&pgStatDBHash, InvalidOid, NULL, NULL);
for (;;)
{
/*
- * If we need to write the status file again (there have been
- * changes in the statistics since we wrote it last) calculate the
- * timeout until we have to do so.
+ * If we need to write the status file again (there have been changes
+ * in the statistics since we wrote it last) calculate the timeout
+ * until we have to do so.
*/
if (need_statwrite)
{
continue;
ereport(ERROR,
(errcode_for_socket_access(),
- errmsg("select() failed in statistics collector: %m")));
+ errmsg("select() failed in statistics collector: %m")));
}
/*
{
/*
* We may need to issue multiple read calls in case the buffer
- * process didn't write the message in a single write, which
- * is possible since it dumps its buffer bytewise. In any
- * case, we'd need two reads since we don't know the message
- * length initially.
+ * process didn't write the message in a single write, which is
+ * possible since it dumps its buffer bytewise. In any case, we'd
+ * need two reads since we don't know the message length
+ * initially.
*/
int nread = 0;
int targetlen = sizeof(PgStat_MsgHdr); /* initial */
{
/*
* Bogus message length implies that we got out of
- * sync with the buffer process somehow. Abort so
- * that we can restart both processes.
+ * sync with the buffer process somehow. Abort so that
+ * we can restart both processes.
*/
ereport(ERROR,
- (errmsg("invalid statistics message length")));
+ (errmsg("invalid statistics message length")));
}
}
}
/*
- * EOF on the pipe implies that the buffer process exited.
- * Fall out of outer loop.
+ * EOF on the pipe implies that the buffer process exited. Fall
+ * out of outer loop.
*/
if (pipeEOF)
break;
/*
- * Distribute the message to the specific function handling
- * it.
+ * Distribute the message to the specific function handling it.
*/
switch (msg.msg_hdr.m_type)
{
pgStatNumMessages++;
/*
- * If this is the first message after we wrote the stats file
- * the last time, setup the timeout that it'd be written.
+ * If this is the first message after we wrote the stats file the
+ * last time, setup the timeout that it'd be written.
*/
if (!need_statwrite)
{
}
/*
- * Note that we do NOT check for postmaster exit inside the loop;
- * only EOF on the buffer pipe causes us to fall out. This
- * ensures we don't exit prematurely if there are still a few
- * messages in the buffer or pipe at postmaster shutdown.
+ * Note that we do NOT check for postmaster exit inside the loop; only
+ * EOF on the buffer pipe causes us to fall out. This ensures we
+ * don't exit prematurely if there are still a few messages in the
+ * buffer or pipe at postmaster shutdown.
*/
}
/*
- * Okay, we saw EOF on the buffer pipe, so there are no more messages
- * to process. If the buffer process quit because of postmaster
- * shutdown, we want to save the final stats to reuse at next startup.
- * But if the buffer process failed, it seems best not to (there may
- * even now be a new collector firing up, and we don't want it to read
- * a partially-rewritten stats file).
+ * Okay, we saw EOF on the buffer pipe, so there are no more messages to
+ * process. If the buffer process quit because of postmaster shutdown, we
+ * want to save the final stats to reuse at next startup. But if the
+ * buffer process failed, it seems best not to (there may even now be a
+ * new collector firing up, and we don't want it to read a
+ * partially-rewritten stats file).
*/
if (!PostmasterIsAlive(false))
pgstat_write_statsfile();
set_ps_display("");
/*
- * We want to die if our child collector process does. There are two
- * ways we might notice that it has died: receive SIGCHLD, or get a
- * write failure on the pipe leading to the child. We can set SIGPIPE
- * to kill us here. Our SIGCHLD handler was already set up before we
- * forked (must do it that way, else it's a race condition).
+ * We want to die if our child collector process does. There are two ways
+ * we might notice that it has died: receive SIGCHLD, or get a write
+ * failure on the pipe leading to the child. We can set SIGPIPE to kill
+ * us here. Our SIGCHLD handler was already set up before we forked (must
+ * do it that way, else it's a race condition).
*/
pqsignal(SIGPIPE, SIG_DFL);
PG_SETMASK(&UnBlockSig);
/*
- * Set the write pipe to nonblock mode, so that we cannot block when
- * the collector falls behind.
+ * Set the write pipe to nonblock mode, so that we cannot block when the
+ * collector falls behind.
*/
if (!pg_set_noblock(writePipe))
ereport(ERROR,
}
/*
- * Wait for some work to do; but not for more than 10 seconds.
- * (This determines how quickly we will shut down after an
- * ungraceful postmaster termination; so it needn't be very fast.)
+ * Wait for some work to do; but not for more than 10 seconds. (This
+ * determines how quickly we will shut down after an ungraceful
+ * postmaster termination; so it needn't be very fast.)
*/
timeout.tv_sec = 10;
timeout.tv_usec = 0;
if (len < 0)
ereport(ERROR,
(errcode_for_socket_access(),
- errmsg("could not read statistics message: %m")));
+ errmsg("could not read statistics message: %m")));
/*
* We ignore messages that are smaller than our common header
* If the collector is ready to receive, write some data into his
* pipe. We may or may not be able to write all that we have.
*
- * NOTE: if what we have is less than PIPE_BUF bytes but more than
- * the space available in the pipe buffer, most kernels will
- * refuse to write any of it, and will return EAGAIN. This means
- * we will busy-loop until the situation changes (either because
- * the collector caught up, or because more data arrives so that
- * we have more than PIPE_BUF bytes buffered). This is not good,
- * but is there any way around it? We have no way to tell when
- * the collector has caught up...
+ * NOTE: if what we have is less than PIPE_BUF bytes but more than the
+ * space available in the pipe buffer, most kernels will refuse to
+ * write any of it, and will return EAGAIN. This means we will
+ * busy-loop until the situation changes (either because the collector
+ * caught up, or because more data arrives so that we have more than
+ * PIPE_BUF bytes buffered). This is not good, but is there any way
+ * around it? We have no way to tell when the collector has caught
+ * up...
*/
if (FD_ISSET(writePipe, &wfds))
{
continue; /* not enough space in pipe */
ereport(ERROR,
(errcode_for_socket_access(),
- errmsg("could not write to statistics collector pipe: %m")));
+ errmsg("could not write to statistics collector pipe: %m")));
}
/* NB: len < xfr is okay */
msg_send += len;
}
/*
- * Make sure we forwarded all messages before we check for
- * postmaster termination.
+ * Make sure we forwarded all messages before we check for postmaster
+ * termination.
*/
if (msg_have != 0 || FD_ISSET(pgStatSock, &rfds))
continue;
/*
- * If the postmaster has terminated, we die too. (This is no
- * longer the normal exit path, however.)
+ * If the postmaster has terminated, we die too. (This is no longer
+ * the normal exit path, however.)
*/
if (!PostmasterIsAlive(true))
exit(0);
pgstat_exit(SIGNAL_ARGS)
{
/*
- * For now, we just nail the doors shut and get out of town. It might
- * be cleaner to allow any pending messages to be sent, but that
- * creates a tradeoff against speed of exit.
+ * For now, we just nail the doors shut and get out of town. It might be
+ * cleaner to allow any pending messages to be sent, but that creates a
+ * tradeoff against speed of exit.
*/
/*
if (msg->m_backendid < 1 || msg->m_backendid > MaxBackends)
{
ereport(LOG,
- (errmsg("invalid server process ID %d", msg->m_backendid)));
+ (errmsg("invalid server process ID %d", msg->m_backendid)));
return -1;
}
beentry = &pgStatBeTable[msg->m_backendid - 1];
/*
- * If the slot contains the PID of this backend, everything is
- * fine and we have nothing to do. Note that all the slots are
- * zero'd out when the collector is started. We assume that a slot
- * is "empty" iff procpid == 0.
+ * If the slot contains the PID of this backend, everything is fine and we
+ * have nothing to do. Note that all the slots are zero'd out when the
+ * collector is started. We assume that a slot is "empty" iff procpid ==
+ * 0.
*/
if (beentry->procpid > 0 && beentry->procpid == msg->m_procpid)
return 0;
/*
- * Lookup if this backend is known to be dead. This can be caused due
- * to messages arriving in the wrong order - e.g. postmaster's BETERM
- * message might have arrived before we received all the backends
- * stats messages, or even a new backend with the same backendid was
- * faster in sending his BESTART.
+ * Lookup if this backend is known to be dead. This can be caused due to
+ * messages arriving in the wrong order - e.g. postmaster's BETERM message
+ * might have arrived before we received all the backends stats messages,
+ * or even a new backend with the same backendid was faster in sending his
+ * BESTART.
*
* If the backend is known to be dead, we ignore this add.
*/
return 1;
/*
- * Backend isn't known to be dead. If it's slot is currently used, we
- * have to kick out the old backend.
+ * Backend isn't known to be dead. If it's slot is currently used, we have
+ * to kick out the old backend.
*/
if (beentry->procpid > 0)
pgstat_sub_backend(beentry->procpid);
beentry->activity[0] = '\0';
/*
- * We can't initialize the rest of the data in this slot until we
- * see the BESTART message. Therefore, we set the database and
- * user to sentinel values, to indicate "undefined". There is no
- * easy way to do this for the client address, so make sure to
- * check that the database or user are defined before accessing
- * the client address.
+ * We can't initialize the rest of the data in this slot until we see the
+ * BESTART message. Therefore, we set the database and user to sentinel
+ * values, to indicate "undefined". There is no easy way to do this for
+ * the client address, so make sure to check that the database or user are
+ * defined before accessing the client address.
*/
beentry->userid = InvalidOid;
beentry->databaseid = InvalidOid;
pgstat_get_db_entry(Oid databaseid, bool create)
{
PgStat_StatDBEntry *result;
- bool found;
- HASHACTION action = (create ? HASH_ENTER : HASH_FIND);
+ bool found;
+ HASHACTION action = (create ? HASH_ENTER : HASH_FIND);
/* Lookup or create the hash table entry for this database */
result = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
hash_ctl.hash = oid_hash;
result->tables = hash_create("Per-database table",
- PGSTAT_TAB_HASH_SIZE,
- &hash_ctl,
- HASH_ELEM | HASH_FUNCTION);
+ PGSTAT_TAB_HASH_SIZE,
+ &hash_ctl,
+ HASH_ELEM | HASH_FUNCTION);
}
return result;
bool found;
/*
- * Search in the known-backends table for the slot containing this
- * PID.
+ * Search in the known-backends table for the slot containing this PID.
*/
for (i = 0; i < MaxBackends; i++)
{
if (pgStatBeTable[i].procpid == procpid)
{
/*
- * That's him. Add an entry to the known to be dead backends.
- * Due to possible misorder in the arrival of UDP packets it's
- * possible that even if we know the backend is dead, there
- * could still be messages queued that arrive later. Those
- * messages must not cause our number of backends statistics
- * to get screwed up, so we remember for a couple of seconds
- * that this PID is dead and ignore them (only the counting of
- * backends, not the table access stats they sent).
+ * That's him. Add an entry to the known to be dead backends. Due
+ * to possible misorder in the arrival of UDP packets it's
+ * possible that even if we know the backend is dead, there could
+ * still be messages queued that arrive later. Those messages must
+ * not cause our number of backends statistics to get screwed up,
+ * so we remember for a couple of seconds that this PID is dead
+ * and ignore them (only the counting of backends, not the table
+ * access stats they sent).
*/
deadbe = (PgStat_StatBeDead *) hash_search(pgStatBeDead,
(void *) &procpid,
}
/*
- * No big problem if not found. This can happen if UDP messages arrive
- * out of order here.
+ * No big problem if not found. This can happen if UDP messages arrive out
+ * of order here.
*/
}
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not open temporary statistics file \"%s\": %m",
- PGSTAT_STAT_TMPFILE)));
+ errmsg("could not open temporary statistics file \"%s\": %m",
+ PGSTAT_STAT_TMPFILE)));
return;
}
while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * If this database is marked destroyed, count down and do so if
- * it reaches 0.
+ * If this database is marked destroyed, count down and do so if it
+ * reaches 0.
*/
if (dbentry->destroy > 0)
{
while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&tstat)) != NULL)
{
/*
- * If table entry marked for destruction, same as above for
- * the database entry.
+ * If table entry marked for destruction, same as above for the
+ * database entry.
*/
if (tabentry->destroy > 0)
{
}
/*
- * At least we think this is still a live table. Print its
- * access stats.
+ * At least we think this is still a live table. Print its access
+ * stats.
*/
fputc('T', fpout);
fwrite(tabentry, sizeof(PgStat_StatTabEntry), 1, fpout);
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not close temporary statistics file \"%s\": %m",
- PGSTAT_STAT_TMPFILE)));
+ errmsg("could not close temporary statistics file \"%s\": %m",
+ PGSTAT_STAT_TMPFILE)));
}
else
{
while ((deadbe = (PgStat_StatBeDead *) hash_seq_search(&hstat)) != NULL)
{
/*
- * Count down the destroy delay and remove entries where it
- * reaches 0.
+ * Count down the destroy delay and remove entries where it reaches 0.
*/
if (--(deadbe->destroy) <= 0)
{
HASH_REMOVE, NULL) == NULL)
{
ereport(ERROR,
- (errmsg("dead-server-process hash table corrupted "
- "during cleanup --- abort")));
+ (errmsg("dead-server-process hash table corrupted "
+ "during cleanup --- abort")));
}
}
}
/*
* If running in the collector or the autovacuum process, we use the
- * DynaHashCxt memory context. If running in a backend, we use the
+ * DynaHashCxt memory context. If running in a backend, we use the
* TopTransactionContext instead, so the caller must only know the last
* XactId when this call happened to know if his tables are still valid or
* already gone!
HASH_ELEM | HASH_FUNCTION | mcxt_flags);
/*
- * Initialize the number of known backends to zero, just in case we do
- * a silent error return below.
+ * Initialize the number of known backends to zero, just in case we do a
+ * silent error return below.
*/
if (numbackends != NULL)
*numbackends = 0;
*betab = NULL;
/*
- * Try to open the status file. If it doesn't exist, the backends
- * simply return zero for anything and the collector simply starts
- * from scratch with empty counters.
+ * Try to open the status file. If it doesn't exist, the backends simply
+ * return zero for anything and the collector simply starts from scratch
+ * with empty counters.
*/
if ((fpin = AllocateFile(PGSTAT_STAT_FILENAME, PG_BINARY_R)) == NULL)
return;
{
/*
* 'D' A PgStat_StatDBEntry struct describing a database
- * follows. Subsequently, zero to many 'T' entries will
- * follow until a 'd' is encountered.
+ * follows. Subsequently, zero to many 'T' entries will follow
+ * until a 'd' is encountered.
*/
case 'D':
if (fread(&dbbuf, 1, sizeof(dbbuf), fpin) != sizeof(dbbuf))
* Add to the DB hash
*/
dbentry = (PgStat_StatDBEntry *) hash_search(*dbhash,
- (void *) &dbbuf.databaseid,
+ (void *) &dbbuf.databaseid,
HASH_ENTER,
&found);
if (found)
{
if (dbbuf.databaseid != onlydb &&
dbbuf.databaseid != InvalidOid)
- break;
+ break;
}
memset(&hash_ctl, 0, sizeof(hash_ctl));
dbentry->tables = hash_create("Per-database table",
PGSTAT_TAB_HASH_SIZE,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | mcxt_flags);
+ HASH_ELEM | HASH_FUNCTION | mcxt_flags);
/*
- * Arrange that following 'T's add entries to this
- * databases tables hash table.
+ * Arrange that following 'T's add entries to this databases
+ * tables hash table.
*/
tabhash = dbentry->tables;
break;
break;
tabentry = (PgStat_StatTabEntry *) hash_search(tabhash,
- (void *) &tabbuf.tableid,
- HASH_ENTER, &found);
+ (void *) &tabbuf.tableid,
+ HASH_ENTER, &found);
if (found)
{
else
*betab = (PgStat_StatBeEntry *)
MemoryContextAlloc(use_mcxt,
- sizeof(PgStat_StatBeEntry) * maxbackends);
+ sizeof(PgStat_StatBeEntry) * maxbackends);
break;
/*
PgStat_StatBeEntry *entry;
/*
- * If the backend is known dead, we ignore the message -- we don't
- * want to update the backend entry's state since this BESTART
- * message refers to an old, dead backend
+ * If the backend is known dead, we ignore the message -- we don't want to
+ * update the backend entry's state since this BESTART message refers to
+ * an old, dead backend
*/
if (pgstat_add_backend(&msg->m_hdr) != 0)
return;
/* ----------
* pgstat_recv_autovac() -
*
- * Process an autovacuum signalling message.
+ * Process an autovacuum signalling message.
* ----------
*/
static void
/*
* Lookup the database in the hashtable. Don't create the entry if it
* doesn't exist, because autovacuum may be processing a template
- * database. If this isn't the case, the database is most likely to
- * have an entry already. (If it doesn't, not much harm is done
- * anyway -- it'll get created as soon as somebody actually uses
- * the database.)
+ * database. If this isn't the case, the database is most likely to have
+ * an entry already. (If it doesn't, not much harm is done anyway --
+ * it'll get created as soon as somebody actually uses the database.)
*/
dbentry = pgstat_get_db_entry(msg->m_databaseid, false);
if (dbentry == NULL)
/* ----------
* pgstat_recv_vacuum() -
*
- * Process a VACUUM message.
+ * Process a VACUUM message.
* ----------
*/
static void
bool create;
/*
- * If we don't know about the database, ignore the message, because it
- * may be autovacuum processing a template database. But if the message
- * is for database InvalidOid, don't ignore it, because we are getting
- * a message from vacuuming a shared relation.
+ * If we don't know about the database, ignore the message, because it may
+ * be autovacuum processing a template database. But if the message is
+ * for database InvalidOid, don't ignore it, because we are getting a
+ * message from vacuuming a shared relation.
*/
create = (msg->m_databaseid == InvalidOid);
/* ----------
* pgstat_recv_analyze() -
*
- * Process an ANALYZE message.
+ * Process an ANALYZE message.
* ----------
*/
static void
bool found;
/*
- * Note that we do create the database entry here, as opposed to what
- * we do on AutovacStart and Vacuum messages. This is because
- * autovacuum never executes ANALYZE on template databases.
+ * Note that we do create the database entry here, as opposed to what we
+ * do on AutovacStart and Vacuum messages. This is because autovacuum
+ * never executes ANALYZE on template databases.
*/
dbentry = pgstat_get_db_entry(msg->m_databaseid, true);
PgStat_StatBeEntry *entry;
/*
- * Here we check explicitly for 0 return, since we don't want to
- * mangle the activity of an active backend by a delayed packet from a
- * dead one.
+ * Here we check explicitly for 0 return, since we don't want to mangle
+ * the activity of an active backend by a delayed packet from a dead one.
*/
if (pgstat_add_backend(&msg->m_hdr) != 0)
return;
dbentry = pgstat_get_db_entry(msg->m_databaseid, true);
/*
- * If the database is marked for destroy, this is a delayed UDP packet
- * and not worth being counted.
+ * If the database is marked for destroy, this is a delayed UDP packet and
+ * not worth being counted.
*/
if (dbentry->destroy > 0)
return;
for (i = 0; i < msg->m_nentries; i++)
{
tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
- (void *) &(tabmsg[i].t_id),
- HASH_ENTER, &found);
+ (void *) &(tabmsg[i].t_id),
+ HASH_ENTER, &found);
if (!found)
{
/*
- * If it's a new table entry, initialize counters to the
- * values we just got.
+ * If it's a new table entry, initialize counters to the values we
+ * just got.
*/
tabentry->numscans = tabmsg[i].t_numscans;
tabentry->tuples_returned = tabmsg[i].t_tuples_returned;
tabentry->tuples_inserted = tabmsg[i].t_tuples_inserted;
tabentry->tuples_updated = tabmsg[i].t_tuples_updated;
tabentry->tuples_deleted = tabmsg[i].t_tuples_deleted;
-
+
tabentry->n_live_tuples = tabmsg[i].t_tuples_inserted;
tabentry->n_dead_tuples = tabmsg[i].t_tuples_updated +
tabmsg[i].t_tuples_deleted;
return;
/*
- * If the database is marked for destroy, this is a delayed UDP packet
- * and the tables will go away at DB destruction.
+ * If the database is marked for destroy, this is a delayed UDP packet and
+ * the tables will go away at DB destruction.
*/
if (dbentry->destroy > 0)
return;
for (i = 0; i < msg->m_nentries; i++)
{
tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
- (void *) &(msg->m_tableid[i]),
+ (void *) &(msg->m_tableid[i]),
HASH_FIND, NULL);
if (tabentry)
tabentry->destroy = PGSTAT_DESTROY_COUNT;
return;
/*
- * We simply throw away all the database's table entries by
- * recreating a new hash table for them.
+ * We simply throw away all the database's table entries by recreating a
+ * new hash table for them.
*/
if (dbentry->tables != NULL)
hash_destroy(dbentry->tables);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.468 2005/09/22 15:33:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.469 2005/10/15 02:49:23 momjian Exp $
*
* NOTES
*
#ifdef WIN32
typedef struct
{
- SOCKET origsocket; /* Original socket value, or -1 if not a socket */
+ SOCKET origsocket; /* Original socket value, or -1 if not a
+ * socket */
WSAPROTOCOL_INFO wsainfo;
-} InheritableSocket;
+} InheritableSocket;
#else
typedef int InheritableSocket;
#endif
*/
typedef struct
{
- Port port;
+ Port port;
InheritableSocket portsocket;
- char DataDir[MAXPGPATH];
- int ListenSocket[MAXLISTEN];
- long MyCancelKey;
+ char DataDir[MAXPGPATH];
+ int ListenSocket[MAXLISTEN];
+ long MyCancelKey;
unsigned long UsedShmemSegID;
- void *UsedShmemSegAddr;
- slock_t *ShmemLock;
- slock_t *ShmemIndexLock;
+ void *UsedShmemSegAddr;
+ slock_t *ShmemLock;
+ slock_t *ShmemIndexLock;
VariableCache ShmemVariableCache;
- void *ShmemIndexAlloc;
- Backend *ShmemBackendArray;
- LWLock *LWLockArray;
- slock_t *ProcStructLock;
+ void *ShmemIndexAlloc;
+ Backend *ShmemBackendArray;
+ LWLock *LWLockArray;
+ slock_t *ProcStructLock;
InheritableSocket pgStatSock;
InheritableSocket pgStatPipe0;
InheritableSocket pgStatPipe1;
- pid_t PostmasterPid;
+ pid_t PostmasterPid;
TimestampTz PgStartTime;
#ifdef WIN32
- HANDLE PostmasterHandle;
- HANDLE initial_signal_pipe;
- HANDLE syslogPipe[2];
+ HANDLE PostmasterHandle;
+ HANDLE initial_signal_pipe;
+ HANDLE syslogPipe[2];
#else
- int syslogPipe[2];
+ int syslogPipe[2];
#endif
- char my_exec_path[MAXPGPATH];
- char pkglib_path[MAXPGPATH];
- char ExtraOptions[MAXPGPATH];
- char lc_collate[LOCALE_NAME_BUFLEN];
- char lc_ctype[LOCALE_NAME_BUFLEN];
-} BackendParameters;
+ char my_exec_path[MAXPGPATH];
+ char pkglib_path[MAXPGPATH];
+ char ExtraOptions[MAXPGPATH];
+ char lc_collate[LOCALE_NAME_BUFLEN];
+ char lc_ctype[LOCALE_NAME_BUFLEN];
+} BackendParameters;
static void read_backend_variables(char *id, Port *port);
-static void restore_backend_variables(BackendParameters *param, Port *port);
+static void restore_backend_variables(BackendParameters * param, Port *port);
+
#ifndef WIN32
-static bool save_backend_variables(BackendParameters *param, Port *port);
+static bool save_backend_variables(BackendParameters * param, Port *port);
#else
-static bool save_backend_variables(BackendParameters *param, Port *port,
- HANDLE childProcess, pid_t childPid);
+static bool save_backend_variables(BackendParameters * param, Port *port,
+ HANDLE childProcess, pid_t childPid);
#endif
static void ShmemBackendArrayAdd(Backend *bn);
static void ShmemBackendArrayRemove(pid_t pid);
-
#endif /* EXEC_BACKEND */
#define StartupDataBase() StartChildProcess(BS_XLOG_STARTUP)
int i;
/* This will call exit() if strdup() fails. */
- progname = get_progname(argv[0]);
+ progname = get_progname(argv[0]);
MyProcPid = PostmasterPid = getpid();
#endif
/*
- * for security, no dir or file created can be group or other
- * accessible
+ * for security, no dir or file created can be group or other accessible
*/
umask((mode_t) 0077);
MemoryContextInit();
/*
- * By default, palloc() requests in the postmaster will be allocated
- * in the PostmasterContext, which is space that can be recycled by
- * backends. Allocated data that needs to be available to backends
- * should be allocated in TopMemoryContext.
+ * By default, palloc() requests in the postmaster will be allocated in
+ * the PostmasterContext, which is space that can be recycled by backends.
+ * Allocated data that needs to be available to backends should be
+ * allocated in TopMemoryContext.
*/
PostmasterContext = AllocSetContextCreate(TopMemoryContext,
"Postmaster",
/*
* ignore this flag. This may be passed in because the
- * program was run as 'postgres -M' instead of
- * 'postmaster'
+ * program was run as 'postgres -M' instead of 'postmaster'
*/
break;
case 'N':
case 'o':
/*
- * Other options to pass to the backend on the command
- * line
+ * Other options to pass to the backend on the command line
*/
snprintf(ExtraOptions + strlen(ExtraOptions),
sizeof(ExtraOptions) - strlen(ExtraOptions),
case 'S':
/*
- * Start in 'S'ilent mode (disassociate from controlling
- * tty). You may also think of this as 'S'ysV mode since
- * it's most badly needed on SysV-derived systems like
- * SVR4 and HP-UX.
+ * Start in 'S'ilent mode (disassociate from controlling tty).
+ * You may also think of this as 'S'ysV mode since it's most
+ * badly needed on SysV-derived systems like SVR4 and HP-UX.
*/
SetConfigOption("silent_mode", "true", PGC_POSTMASTER, PGC_S_ARGV);
break;
case 's':
/*
- * In the event that some backend dumps core, send
- * SIGSTOP, rather than SIGQUIT, to all its peers. This
- * lets the wily post_hacker collect core dumps from
- * everyone.
+ * In the event that some backend dumps core, send SIGSTOP,
+ * rather than SIGQUIT, to all its peers. This lets the wily
+ * post_hacker collect core dumps from everyone.
*/
SendStop = true;
break;
if (find_other_exec(argv[0], "postgres", PG_VERSIONSTR,
postgres_exec_path) < 0)
ereport(FATAL,
- (errmsg("%s: could not locate matching postgres executable",
- progname)));
+ (errmsg("%s: could not locate matching postgres executable",
+ progname)));
#endif
/*
- * Locate the proper configuration files and data directory, and
- * read postgresql.conf for the first time.
+ * Locate the proper configuration files and data directory, and read
+ * postgresql.conf for the first time.
*/
if (!SelectConfigFiles(userDoption, progname))
ExitPostmaster(2);
if (NBuffers < 2 * MaxBackends || NBuffers < 16)
{
/*
- * Do not accept -B so small that backends are likely to starve
- * for lack of buffers. The specific choices here are somewhat
- * arbitrary.
+ * Do not accept -B so small that backends are likely to starve for
+ * lack of buffers. The specific choices here are somewhat arbitrary.
*/
write_stderr("%s: the number of buffers (-B) must be at least twice the number of allowed connections (-N) and at least 16\n", progname);
ExitPostmaster(1);
char **p;
ereport(DEBUG3,
- (errmsg_internal("%s: PostmasterMain: initial environ dump:",
- progname)));
+ (errmsg_internal("%s: PostmasterMain: initial environ dump:",
+ progname)));
ereport(DEBUG3,
- (errmsg_internal("-----------------------------------------")));
+ (errmsg_internal("-----------------------------------------")));
for (p = environ; *p; ++p)
ereport(DEBUG3,
(errmsg_internal("\t%s", *p)));
ereport(DEBUG3,
- (errmsg_internal("-----------------------------------------")));
+ (errmsg_internal("-----------------------------------------")));
}
/*
/*
* Fork away from controlling terminal, if -S specified.
*
- * Must do this before we grab any interlock files, else the interlocks
- * will show the wrong PID.
+ * Must do this before we grab any interlock files, else the interlocks will
+ * show the wrong PID.
*/
if (SilentMode)
pmdaemonize();
/*
* Create lockfile for data directory.
*
- * We want to do this before we try to grab the input sockets, because
- * the data directory interlock is more reliable than the socket-file
- * interlock (thanks to whoever decided to put socket files in /tmp
- * :-(). For the same reason, it's best to grab the TCP socket(s)
- * before the Unix socket.
+ * We want to do this before we try to grab the input sockets, because the
+ * data directory interlock is more reliable than the socket-file
+ * interlock (thanks to whoever decided to put socket files in /tmp :-().
+ * For the same reason, it's best to grab the TCP socket(s) before the
+ * Unix socket.
*/
CreateDataDirLockFile(true);
/*
* Remove old temporary files. At this point there can be no other
- * Postgres processes running in this directory, so this should be
- * safe.
+ * Postgres processes running in this directory, so this should be safe.
*/
RemovePgTempFiles();
/* syntax error in list */
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for \"listen_addresses\"")));
+ errmsg("invalid list syntax for \"listen_addresses\"")));
}
foreach(l, elemlist)
success++;
else
ereport(WARNING,
- (errmsg("could not create listen socket for \"%s\"",
- curhost)));
+ (errmsg("could not create listen socket for \"%s\"",
+ curhost)));
}
if (!success && list_length(elemlist))
"",
htonl(PostPortNumber),
"",
- (DNSServiceRegistrationReply) reg_reply,
+ (DNSServiceRegistrationReply) reg_reply,
NULL);
}
#endif
reset_shared(PostPortNumber);
/*
- * Estimate number of openable files. This must happen after setting
- * up semaphores, because on some platforms semaphores count as open
- * files.
+ * Estimate number of openable files. This must happen after setting up
+ * semaphores, because on some platforms semaphores count as open files.
*/
set_max_safe_fds();
TRUE,
DUPLICATE_SAME_ACCESS) == 0)
ereport(FATAL,
- (errmsg_internal("could not duplicate postmaster handle: error code %d",
- (int) GetLastError())));
+ (errmsg_internal("could not duplicate postmaster handle: error code %d",
+ (int) GetLastError())));
#endif
/*
- * Record postmaster options. We delay this till now to avoid
- * recording bogus options (eg, NBuffers too high for available
- * memory).
+ * Record postmaster options. We delay this till now to avoid recording
+ * bogus options (eg, NBuffers too high for available memory).
*/
if (!CreateOptsFile(argc, argv, my_exec_path))
ExitPostmaster(1);
SysLoggerPID = SysLogger_Start();
/*
- * Reset whereToSendOutput from Debug (its starting state) to None.
- * This stops ereport from sending log messages to stderr unless
+ * Reset whereToSendOutput from Debug (its starting state) to None. This
+ * stops ereport from sending log messages to stderr unless
* Log_destination permits. We don't do this until the postmaster is
* fully launched, since startup failures may as well be reported to
* stderr.
status = ServerLoop();
/*
- * ServerLoop probably shouldn't ever return, but if it does, close
- * down.
+ * ServerLoop probably shouldn't ever return, but if it does, close down.
*/
ExitPostmaster(status != STATUS_OK);
else
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not read permissions of directory \"%s\": %m",
- DataDir)));
+ errmsg("could not read permissions of directory \"%s\": %m",
+ DataDir)));
}
/*
/*
* Check if the directory has group or world access. If so, reject.
*
- * It would be possible to allow weaker constraints (for example, allow
- * group access) but we cannot make a general assumption that that is
- * okay; for example there are platforms where nearly all users customarily
- * belong to the same group. Perhaps this test should be configurable.
+ * It would be possible to allow weaker constraints (for example, allow group
+ * access) but we cannot make a general assumption that that is okay; for
+ * example there are platforms where nearly all users customarily belong
+ * to the same group. Perhaps this test should be configurable.
*
- * XXX temporarily suppress check when on Windows, because there may not
- * be proper support for Unix-y file permissions. Need to think of a
+ * XXX temporarily suppress check when on Windows, because there may not be
+ * proper support for Unix-y file permissions. Need to think of a
* reasonable check to apply on Windows.
*/
#if !defined(WIN32) && !defined(__CYGWIN__)
printf(_(" -s send SIGSTOP to all backend servers if one dies\n"));
printf(_("\nPlease read the documentation for the complete list of run-time\n"
- "configuration settings and how to set them on the command line or in\n"
- "the configuration file.\n\n"
-
"Report bugs to
.\n"));
+ "configuration settings and how to set them on the command line or in\n"
+ "the configuration file.\n\n"
+ "Report bugs to
.\n"));
}
/*
* Wait for something to happen.
*
- * We wait at most one minute, or the minimum autovacuum delay, to
- * ensure that the other background tasks handled below get done
- * even when no requests are arriving.
+ * We wait at most one minute, or the minimum autovacuum delay, to ensure
+ * that the other background tasks handled below get done even when no
+ * requests are arriving.
*/
memcpy((char *) &rmask, (char *) &readmask, sizeof(fd_set));
selres = select(nSockets, &rmask, NULL, NULL, &timeout);
/*
- * Block all signals until we wait again. (This makes it safe for
- * our signal handlers to do nontrivial work.)
+ * Block all signals until we wait again. (This makes it safe for our
+ * signal handlers to do nontrivial work.)
*/
PG_SETMASK(&BlockSig);
}
/*
- * New connection pending on any of our sockets? If so, fork a
- * child process to deal with it.
+ * New connection pending on any of our sockets? If so, fork a child
+ * process to deal with it.
*/
if (selres > 0)
{
/*
- * Select a random seed at the time of first receiving a
- * request.
+ * Select a random seed at the time of first receiving a request.
*/
while (random_seed == 0)
{
/*
* We are not sure how much precision is in tv_usec, so we
* swap the high and low 16 bits of 'later' and XOR them with
- * 'earlier'. On the off chance that the result is 0, we
- * loop until it isn't.
+ * 'earlier'. On the off chance that the result is 0, we loop
+ * until it isn't.
*/
random_seed = earlier.tv_usec ^
((later.tv_usec << 16) |
BackendStartup(port);
/*
- * We no longer need the open socket or port
- * structure in this process
+ * We no longer need the open socket or port structure
+ * in this process
*/
StreamClose(port->sock);
ConnFree(port);
/*
* Start a new autovacuum process, if there isn't one running already.
- * (It'll die relatively quickly.) We check that it's not started
- * too frequently in autovac_start.
+ * (It'll die relatively quickly.) We check that it's not started too
+ * frequently in autovac_start.
*/
if (AutoVacuumingActive() && AutoVacPID == 0 &&
StartupPID == 0 && !FatalError && Shutdown == NoShutdown)
PgStatPID = pgstat_start();
/*
- * Touch the socket and lock file every 58 minutes, to
- * ensure that they are not removed by overzealous /tmp-cleaning
- * tasks. We assume no one runs cleaners with cutoff times of
- * less than an hour ...
+ * Touch the socket and lock file every 58 minutes, to ensure that
+ * they are not removed by overzealous /tmp-cleaning tasks. We assume
+ * no one runs cleaners with cutoff times of less than an hour ...
*/
now = time(NULL);
if (now - last_touch_time >= 58 * SECS_PER_MINUTE)
{
/*
* EOF after SSLdone probably means the client didn't like our
- * response to NEGOTIATE_SSL_CODE. That's not an error condition,
- * so don't clutter the log with a complaint.
+ * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
+ * don't clutter the log with a complaint.
*/
if (!SSLdone)
ereport(COMMERROR,
/*
* Allocate at least the size of an old-style startup packet, plus one
- * extra byte, and make sure all are zeroes. This ensures we will
- * have null termination of all strings, in both fixed- and
- * variable-length packet layouts.
+ * extra byte, and make sure all are zeroes. This ensures we will have
+ * null termination of all strings, in both fixed- and variable-length
+ * packet layouts.
*/
if (len <= (int32) sizeof(StartupPacket))
buf = palloc0(sizeof(StartupPacket) + 1);
{
ereport(COMMERROR,
(errcode_for_socket_access(),
- errmsg("failed to send SSL negotiation response: %m")));
+ errmsg("failed to send SSL negotiation response: %m")));
return STATUS_ERROR; /* close the connection */
}
/* Could add additional special packet types here */
/*
- * Set FrontendProtocol now so that ereport() knows what format to
- * send if we fail during startup.
+ * Set FrontendProtocol now so that ereport() knows what format to send if
+ * we fail during startup.
*/
FrontendProtocol = proto;
/* Check we can handle the protocol the frontend is using. */
if (PG_PROTOCOL_MAJOR(proto) < PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST) ||
- PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) ||
- (PG_PROTOCOL_MAJOR(proto) == PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) &&
- PG_PROTOCOL_MINOR(proto) > PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST)))
+ PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) ||
+ (PG_PROTOCOL_MAJOR(proto) == PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) &&
+ PG_PROTOCOL_MINOR(proto) > PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST)))
ereport(FATAL,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u",
- PG_PROTOCOL_MAJOR(proto), PG_PROTOCOL_MINOR(proto),
+ PG_PROTOCOL_MAJOR(proto), PG_PROTOCOL_MINOR(proto),
PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST),
PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST),
PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST))));
/*
- * Now fetch parameters out of startup packet and save them into the
- * Port structure. All data structures attached to the Port struct
- * must be allocated in TopMemoryContext so that they won't disappear
- * when we pass them to PostgresMain (see BackendRun). We need not
- * worry about leaking this storage on failure, since we aren't in the
- * postmaster process anymore.
+ * Now fetch parameters out of startup packet and save them into the Port
+ * structure. All data structures attached to the Port struct must be
+ * allocated in TopMemoryContext so that they won't disappear when we pass
+ * them to PostgresMain (see BackendRun). We need not worry about leaking
+ * this storage on failure, since we aren't in the postmaster process
+ * anymore.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
int32 offset = sizeof(ProtocolVersion);
/*
- * Scan packet body for name/option pairs. We can assume any
- * string beginning within the packet body is null-terminated,
- * thanks to zeroing extra byte above.
+ * Scan packet body for name/option pairs. We can assume any string
+ * beginning within the packet body is null-terminated, thanks to
+ * zeroing extra byte above.
*/
port->guc_options = NIL;
else
{
/*
- * Get the parameters from the old-style, fixed-width-fields
- * startup packet as C strings. The packet destination was
- * cleared first so a short packet has zeros silently added. We
- * have to be prepared to truncate the pstrdup result for oversize
- * fields, though.
+ * Get the parameters from the old-style, fixed-width-fields startup
+ * packet as C strings. The packet destination was cleared first so a
+ * short packet has zeros silently added. We have to be prepared to
+ * truncate the pstrdup result for oversize fields, though.
*/
StartupPacket *packet = (StartupPacket *) buf;
if (port->user_name == NULL || port->user_name[0] == '\0')
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no PostgreSQL user name specified in startup packet")));
+ errmsg("no PostgreSQL user name specified in startup packet")));
/* The database defaults to the user name. */
if (port->database_name == NULL || port->database_name[0] == '\0')
if (Db_user_namespace)
{
/*
- * If user@, it is a global user, remove '@'. We only want to do
- * this if there is an '@' at the end and no earlier in the user
- * string or they may fake as a local user of another database
- * attaching to this database.
+ * If user@, it is a global user, remove '@'. We only want to do this
+ * if there is an '@' at the end and no earlier in the user string or
+ * they may fake as a local user of another database attaching to this
+ * database.
*/
if (strchr(port->user_name, '@') ==
port->user_name + strlen(port->user_name) - 1)
}
/*
- * Truncate given database and user names to length of a Postgres
- * name. This avoids lookup failures when overlength names are given.
+ * Truncate given database and user names to length of a Postgres name.
+ * This avoids lookup failures when overlength names are given.
*/
if (strlen(port->database_name) >= NAMEDATALEN)
port->database_name[NAMEDATALEN - 1] = '\0';
MemoryContextSwitchTo(oldcontext);
/*
- * If we're going to reject the connection due to database state, say
- * so now instead of wasting cycles on an authentication exchange.
- * (This also allows a pg_ping utility to be written.)
+ * If we're going to reject the connection due to database state, say so
+ * now instead of wasting cycles on an authentication exchange. (This also
+ * allows a pg_ping utility to be written.)
*/
switch (port->canAcceptConnections)
{
int backendPID;
long cancelAuthCode;
Backend *bp;
+
#ifndef EXEC_BACKEND
Dlelem *curr;
#else
cancelAuthCode = (long) ntohl(canc->cancelAuthCode);
/*
- * See if we have a matching backend. In the EXEC_BACKEND case, we
- * can no longer access the postmaster's own backend list, and must
- * rely on the duplicate array in shared memory.
+ * See if we have a matching backend. In the EXEC_BACKEND case, we can no
+ * longer access the postmaster's own backend list, and must rely on the
+ * duplicate array in shared memory.
*/
#ifndef EXEC_BACKEND
for (curr = DLGetHead(BackendList); curr; curr = DLGetSucc(curr))
else
/* Right PID, wrong key: no way, Jose */
ereport(DEBUG2,
- (errmsg_internal("bad key in cancel request for process %d",
- backendPID)));
+ (errmsg_internal("bad key in cancel request for process %d",
+ backendPID)));
return;
}
}
* Don't start too many children.
*
* We allow more connections than we can have backends here because some
- * might still be authenticating; they might fail auth, or some
- * existing backend might exit before the auth cycle is completed. The
- * exact MaxBackends limit is enforced when a new backend tries to
- * join the shared-inval backend array.
+ * might still be authenticating; they might fail auth, or some existing
+ * backend might exit before the auth cycle is completed. The exact
+ * MaxBackends limit is enforced when a new backend tries to join the
+ * shared-inval backend array.
*/
if (CountChildren() >= 2 * MaxBackends)
return CAC_TOOMANY;
else
{
/*
- * Precompute password salt values to use for this connection.
- * It's slightly annoying to do this long in advance of knowing
- * whether we'll need 'em or not, but we must do the random()
- * calls before we fork, not after. Else the postmaster's random
- * sequence won't get advanced, and all backends would end up
- * using the same salt...
+ * Precompute password salt values to use for this connection. It's
+ * slightly annoying to do this long in advance of knowing whether
+ * we'll need 'em or not, but we must do the random() calls before we
+ * fork, not after. Else the postmaster's random sequence won't get
+ * advanced, and all backends would end up using the same salt...
*/
RandomSalt(port->cryptSalt, port->md5Salt);
}
/*
* Create or re-create shared memory and semaphores.
*
- * Note: in each "cycle of life" we will normally assign the same IPC
- * keys (if using SysV shmem and/or semas), since the port number is
- * used to determine IPC keys. This helps ensure that we will clean
- * up dead IPC objects if the postmaster crashes and is restarted.
+ * Note: in each "cycle of life" we will normally assign the same IPC keys
+ * (if using SysV shmem and/or semas), since the port number is used to
+ * determine IPC keys. This helps ensure that we will clean up dead IPC
+ * objects if the postmaster crashes and is restarted.
*/
CreateSharedMemoryAndSemaphores(false, port);
}
if (Shutdown <= SmartShutdown)
{
ereport(LOG,
- (errmsg("received SIGHUP, reloading configuration files")));
+ (errmsg("received SIGHUP, reloading configuration files")));
ProcessConfigFile(PGC_SIGHUP);
SignalChildren(SIGHUP);
if (BgWriterPID != 0)
/*
* Fast Shutdown:
*
- * Abort all children with SIGTERM (rollback active transactions
- * and exit) and shut down when they are gone.
+ * Abort all children with SIGTERM (rollback active transactions and
+ * exit) and shut down when they are gone.
*/
if (Shutdown >= FastShutdown)
break;
/*
* No children left. Begin shutdown of data base system.
*
- * Note: if we previously got SIGTERM then we may send SIGUSR2 to
- * the bgwriter a second time here. This should be harmless.
+ * Note: if we previously got SIGTERM then we may send SIGUSR2 to the
+ * bgwriter a second time here. This should be harmless.
*/
if (StartupPID != 0 || FatalError)
break; /* let reaper() handle this */
#ifdef HAVE_WAITPID
int status; /* backend exit status */
-
#else
#ifndef WIN32
union wait status; /* backend exit status */
while ((pid = win32_waitpid(&exitstatus)) > 0)
{
/*
- * We need to do this here, and not in CleanupBackend, since this
- * is to be called on all children when we are done with them.
- * Could move to LogChildExit, but that seems like asking for
- * future trouble...
+ * We need to do this here, and not in CleanupBackend, since this is
+ * to be called on all children when we are done with them. Could move
+ * to LogChildExit, but that seems like asking for future trouble...
*/
win32_RemoveChild(pid);
#endif /* WIN32 */
LogChildExit(LOG, _("startup process"),
pid, exitstatus);
ereport(LOG,
- (errmsg("aborting startup due to startup process failure")));
+ (errmsg("aborting startup due to startup process failure")));
ExitPostmaster(1);
}
FatalError = false;
/*
- * Load the flat authorization file into postmaster's cache.
- * The startup process has recomputed this from the database
- * contents, so we wait till it finishes before loading it.
+ * Load the flat authorization file into postmaster's cache. The
+ * startup process has recomputed this from the database contents,
+ * so we wait till it finishes before loading it.
*/
load_role();
/*
* Go to shutdown mode if a shutdown request was pending.
- * Otherwise, try to start the archiver and stats collector
- * too. (We could, but don't, try to start autovacuum here.)
+ * Otherwise, try to start the archiver and stats collector too.
+ * (We could, but don't, try to start autovacuum here.)
*/
if (Shutdown > NoShutdown && BgWriterPID != 0)
kill(BgWriterPID, SIGUSR2);
!DLGetHead(BackendList) && AutoVacPID == 0)
{
/*
- * Normal postmaster exit is here: we've seen normal exit
- * of the bgwriter after it's been told to shut down. We
- * expect that it wrote a shutdown checkpoint. (If for
- * some reason it didn't, recovery will occur on next
- * postmaster start.)
+ * Normal postmaster exit is here: we've seen normal exit of
+ * the bgwriter after it's been told to shut down. We expect
+ * that it wrote a shutdown checkpoint. (If for some reason
+ * it didn't, recovery will occur on next postmaster start.)
*
- * Note: we do not wait around for exit of the archiver or
- * stats processes. They've been sent SIGQUIT by this
- * point, and in any case contain logic to commit
- * hara-kiri if they notice the postmaster is gone.
+ * Note: we do not wait around for exit of the archiver or stats
+ * processes. They've been sent SIGQUIT by this point, and in
+ * any case contain logic to commit hara-kiri if they notice
+ * the postmaster is gone.
*/
ExitPostmaster(0);
}
}
/*
- * Was it the autovacuum process? Normal exit can be ignored;
- * we'll start a new one at the next iteration of the postmaster's
- * main loop, if necessary.
+ * Was it the autovacuum process? Normal exit can be ignored; we'll
+ * start a new one at the next iteration of the postmaster's main
+ * loop, if necessary.
*
* An unexpected exit must crash the system.
*/
}
/*
- * Was it the archiver? If so, just try to start a new one; no
- * need to force reset of the rest of the system. (If fail, we'll
- * try again in future cycles of the main loop.)
+ * Was it the archiver? If so, just try to start a new one; no need
+ * to force reset of the rest of the system. (If fail, we'll try
+ * again in future cycles of the main loop.)
*/
if (PgArchPID != 0 && pid == PgArchPID)
{
}
/*
- * Was it the statistics collector? If so, just try to start a
- * new one; no need to force reset of the rest of the system. (If
- * fail, we'll try again in future cycles of the main loop.)
+ * Was it the statistics collector? If so, just try to start a new
+ * one; no need to force reset of the rest of the system. (If fail,
+ * we'll try again in future cycles of the main loop.)
*/
if (PgStatPID != 0 && pid == PgStatPID)
{
{
/*
* Wait for all important children to exit, then reset shmem and
- * StartupDataBase. (We can ignore the archiver and stats
- * processes here since they are not connected to shmem.)
+ * StartupDataBase. (We can ignore the archiver and stats processes
+ * here since they are not connected to shmem.)
*/
if (DLGetHead(BackendList) || StartupPID != 0 || BgWriterPID != 0 ||
AutoVacPID != 0)
goto reaper_done;
ereport(LOG,
- (errmsg("all server processes terminated; reinitializing")));
+ (errmsg("all server processes terminated; reinitializing")));
shmem_exit(0);
reset_shared(PostPortNumber);
LogChildExit(DEBUG2, _("server process"), pid, exitstatus);
/*
- * If a backend dies in an ugly way (i.e. exit status not 0) then we
- * must signal all other backends to quickdie. If exit status is zero
- * we assume everything is hunky dory and simply remove the backend
- * from the active backend list.
+ * If a backend dies in an ugly way (i.e. exit status not 0) then we must
+ * signal all other backends to quickdie. If exit status is zero we
+ * assume everything is hunky dory and simply remove the backend from the
+ * active backend list.
*/
if (exitstatus != 0)
{
Backend *bp;
/*
- * Make log entry unless there was a previous crash (if so, nonzero
- * exit status is to be expected in SIGQUIT response; don't clutter
- * log)
+ * Make log entry unless there was a previous crash (if so, nonzero exit
+ * status is to be expected in SIGQUIT response; don't clutter log)
*/
if (!FatalError)
{
LogChildExit(LOG, procname, pid, exitstatus);
ereport(LOG,
- (errmsg("terminating any other active server processes")));
+ (errmsg("terminating any other active server processes")));
}
/* Process regular backends */
else
{
/*
- * This backend is still alive. Unless we did so already,
- * tell it to commit hara-kiri.
+ * This backend is still alive. Unless we did so already, tell it
+ * to commit hara-kiri.
*
- * SIGQUIT is the special signal that says exit without proc_exit
- * and let the user know what's going on. But if SendStop is
- * set (-s on command line), then we send SIGSTOP instead, so
- * that we can get core dumps from all backends by hand.
+ * SIGQUIT is the special signal that says exit without proc_exit and
+ * let the user know what's going on. But if SendStop is set (-s
+ * on command line), then we send SIGSTOP instead, so that we can
+ * get core dumps from all backends by hand.
*/
if (!FatalError)
{
ereport(DEBUG2,
(errmsg_internal("sending %s to process %d",
- (SendStop ? "SIGSTOP" : "SIGQUIT"),
+ (SendStop ? "SIGSTOP" : "SIGQUIT"),
(int) bp->pid)));
kill(bp->pid, (SendStop ? SIGSTOP : SIGQUIT));
}
ereport(lev,
/*
- * translator: %s is a noun phrase describing a child process,
- * such as "server process"
+ * translator: %s is a noun phrase describing a child process, such as
+ * "server process"
*/
(errmsg("%s (PID %d) exited with exit code %d",
procname, pid, WEXITSTATUS(exitstatus))));
ereport(lev,
/*
- * translator: %s is a noun phrase describing a child process,
- * such as "server process"
+ * translator: %s is a noun phrase describing a child process, such as
+ * "server process"
*/
(errmsg("%s (PID %d) was terminated by signal %d",
procname, pid, WTERMSIG(exitstatus))));
ereport(lev,
/*
- * translator: %s is a noun phrase describing a child process,
- * such as "server process"
+ * translator: %s is a noun phrase describing a child process, such as
+ * "server process"
*/
(errmsg("%s (PID %d) exited with unexpected status %d",
procname, pid, exitstatus)));
MyCancelKey = PostmasterRandom();
/*
- * Make room for backend data structure. Better before the fork() so
- * we can handle failure cleanly.
+ * Make room for backend data structure. Better before the fork() so we
+ * can handle failure cleanly.
*/
bn = (Backend *) malloc(sizeof(Backend));
if (!bn)
free(bn);
errno = save_errno;
ereport(LOG,
- (errmsg("could not fork new process for connection: %m")));
+ (errmsg("could not fork new process for connection: %m")));
report_fork_failure_to_client(port, save_errno);
return STATUS_ERROR;
}
(int) pid, port->sock)));
/*
- * Everything's been successful, it's safe to add this backend to our
- * list of backends.
+ * Everything's been successful, it's safe to add this backend to our list
+ * of backends.
*/
bn->pid = pid;
bn->cancel_key = MyCancelKey;
/*
* PreAuthDelay is a debugging aid for investigating problems in the
- * authentication cycle: it can be set in postgresql.conf to allow
- * time to attach to the newly-forked backend with a debugger. (See
- * also the -W backend switch, which we allow clients to pass through
- * PGOPTIONS, but it is not honored until after authentication.)
+ * authentication cycle: it can be set in postgresql.conf to allow time to
+ * attach to the newly-forked backend with a debugger. (See also the -W
+ * backend switch, which we allow clients to pass through PGOPTIONS, but
+ * it is not honored until after authentication.)
*/
if (PreAuthDelay > 0)
pg_usleep(PreAuthDelay * 1000000L);
port->commandTag = "";
/*
- * Initialize libpq and enable reporting of ereport errors to the
- * client. Must do this now because authentication uses libpq to send
- * messages.
+ * Initialize libpq and enable reporting of ereport errors to the client.
+ * Must do this now because authentication uses libpq to send messages.
*/
pq_init(); /* initialize libpq to talk to client */
whereToSendOutput = Remote; /* now safe to ereport to client */
/*
- * We arrange for a simple exit(0) if we receive SIGTERM or SIGQUIT
- * during any client authentication related communication. Otherwise
- * the postmaster cannot shutdown the database FAST or IMMED cleanly
- * if a buggy client blocks a backend during authentication.
+ * We arrange for a simple exit(0) if we receive SIGTERM or SIGQUIT during
+ * any client authentication related communication. Otherwise the
+ * postmaster cannot shutdown the database FAST or IMMED cleanly if a
+ * buggy client blocks a backend during authentication.
*/
pqsignal(SIGTERM, authdie);
pqsignal(SIGQUIT, authdie);
if (getnameinfo_all(&port->raddr.addr, port->raddr.salen,
remote_host, sizeof(remote_host),
remote_port, sizeof(remote_port),
- (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV))
+ (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV))
{
int ret = getnameinfo_all(&port->raddr.addr, port->raddr.salen,
- remote_host, sizeof(remote_host),
- remote_port, sizeof(remote_port),
- NI_NUMERICHOST | NI_NUMERICSERV);
+ remote_host, sizeof(remote_host),
+ remote_port, sizeof(remote_port),
+ NI_NUMERICHOST | NI_NUMERICSERV);
if (ret)
ereport(WARNING,
/*
* In EXEC_BACKEND case, we didn't inherit the contents of pg_hba.conf
- * etcetera from the postmaster, and have to load them ourselves.
- * Build the PostmasterContext (which didn't exist before, in this
- * process) to contain the data.
+ * etcetera from the postmaster, and have to load them ourselves. Build
+ * the PostmasterContext (which didn't exist before, in this process) to
+ * contain the data.
*
* FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
#endif
/*
- * Ready to begin client interaction. We will give up and exit(0)
- * after a time delay, so that a broken client can't hog a connection
+ * Ready to begin client interaction. We will give up and exit(0) after a
+ * time delay, so that a broken client can't hog a connection
* indefinitely. PreAuthDelay doesn't count against the time limit.
*/
if (!enable_sig_alarm(AuthenticationTimeout * 1000, false))
elog(FATAL, "could not set timer for authorization timeout");
/*
- * Receive the startup packet (which might turn out to be a cancel
- * request packet).
+ * Receive the startup packet (which might turn out to be a cancel request
+ * packet).
*/
status = ProcessStartupPacket(port, false);
/*
* Now that we have the user and database name, we can set the process
- * title for ps. It's good to do this as early as possible in
- * startup.
+ * title for ps. It's good to do this as early as possible in startup.
*/
init_ps_display(port->user_name, port->database_name, remote_ps_data);
set_ps_display("authentication");
ClientAuthentication(port); /* might not return, if failure */
/*
- * Done with authentication. Disable timeout, and prevent
- * SIGTERM/SIGQUIT again until backend startup is complete.
+ * Done with authentication. Disable timeout, and prevent SIGTERM/SIGQUIT
+ * again until backend startup is complete.
*/
if (!disable_sig_alarm(false))
elog(FATAL, "could not disable timer for authorization timeout");
/*
* Don't want backend to be able to see the postmaster random number
- * generator state. We have to clobber the static random_seed *and*
- * start a new random sequence in the random() library function.
+ * generator state. We have to clobber the static random_seed *and* start
+ * a new random sequence in the random() library function.
*/
random_seed = 0;
srandom((unsigned int) (MyProcPid ^ port->session_start.tv_usec));
av[ac++] = port->database_name;
/*
- * Pass the (insecure) option switches from the connection request.
- * (It's OK to mangle port->cmdline_options now.)
+ * Pass the (insecure) option switches from the connection request. (It's
+ * OK to mangle port->cmdline_options now.)
*/
if (port->cmdline_options)
split_opts(av, &ac, port->cmdline_options);
Assert(ac < maxac);
/*
- * Release postmaster's working memory context so that backend can
- * recycle the space. Note this does not trash *MyProcPort, because
- * ConnCreate() allocated that space with malloc() ... else we'd need
- * to copy the Port data here. Also, subsidiary data such as the
- * username isn't lost either; see ProcessStartupPacket().
+ * Release postmaster's working memory context so that backend can recycle
+ * the space. Note this does not trash *MyProcPort, because ConnCreate()
+ * allocated that space with malloc() ... else we'd need to copy the Port
+ * data here. Also, subsidiary data such as the username isn't lost
+ * either; see ProcessStartupPacket().
*/
MemoryContextSwitchTo(TopMemoryContext);
MemoryContextDelete(PostmasterContext);
*/
ereport(DEBUG3,
(errmsg_internal("%s child[%d]: starting with (",
- progname, (int)getpid())));
+ progname, (int) getpid())));
for (i = 0; i < ac; ++i)
ereport(DEBUG3,
(errmsg_internal("\t%s", av[i])));
ereport(DEBUG3,
(errmsg_internal(")")));
- ClientAuthInProgress = false; /* client_min_messages is active
- * now */
+ ClientAuthInProgress = false; /* client_min_messages is active now */
return (PostgresMain(ac, av, port->user_name));
}
pid_t pid;
char tmpfilename[MAXPGPATH];
BackendParameters param;
- FILE *fp;
+ FILE *fp;
if (!save_backend_variables(¶m, port))
return -1; /* log made by save_backend_variables */
}
}
- return pid; /* Parent returns pid, or -1 on fork
- * failure */
+ return pid; /* Parent returns pid, or -1 on fork failure */
}
-
-#else /* WIN32 */
+#else /* WIN32 */
/*
* internal_forkexec win32 implementation
*
* - starts backend using CreateProcess(), in suspended state
* - writes out backend variables to the parameter file
- * - during this, duplicates handles and sockets required for
- * inheritance into the new process
+ * - during this, duplicates handles and sockets required for
+ * inheritance into the new process
* - resumes execution of the new process once the backend parameter
- * file is complete.
+ * file is complete.
*/
static pid_t
internal_forkexec(int argc, char *argv[], Port *port)
char cmdLine[MAXPGPATH * 2];
HANDLE childHandleCopy;
HANDLE waiterThread;
- HANDLE paramHandle;
+ HANDLE paramHandle;
BackendParameters *param;
SECURITY_ATTRIBUTES sa;
- char paramHandleStr[32];
+ char paramHandleStr[32];
/* Make sure caller set up argv properly */
Assert(argc >= 3);
Assert(argv[2] == NULL);
/* Set up shared memory for parameter passing */
- ZeroMemory(&sa,sizeof(sa));
+ ZeroMemory(&sa, sizeof(sa));
sa.nLength = sizeof(sa);
sa.bInheritHandle = TRUE;
paramHandle = CreateFileMapping(INVALID_HANDLE_VALUE,
}
/* Insert temp file name after -fork argument */
- sprintf(paramHandleStr, "%lu", (DWORD)paramHandle);
+ sprintf(paramHandleStr, "%lu", (DWORD) paramHandle);
argv[2] = paramHandleStr;
/* Format the cmd line */
memset(&pi, 0, sizeof(pi));
memset(&si, 0, sizeof(si));
si.cb = sizeof(si);
+
/*
- * Create the subprocess in a suspended state. This will be resumed
- * later, once we have written out the parameter file.
+ * Create the subprocess in a suspended state. This will be resumed later,
+ * once we have written out the parameter file.
*/
if (!CreateProcess(NULL, cmdLine, NULL, NULL, TRUE, CREATE_SUSPENDED,
NULL, NULL, &si, &pi))
if (!save_backend_variables(param, port, pi.hProcess, pi.dwProcessId))
{
/*
- * log made by save_backend_variables, but we have to clean
- * up the mess with the half-started process
+ * log made by save_backend_variables, but we have to clean up the
+ * mess with the half-started process
*/
if (!TerminateProcess(pi.hProcess, 255))
ereport(ERROR,
(int) GetLastError());
/*
- * Now that the backend variables are written out, we start the
- * child thread so it can start initializing while we set up
- * the rest of the parent state.
+ * Now that the backend variables are written out, we start the child
+ * thread so it can start initializing while we set up the rest of the
+ * parent state.
*/
if (ResumeThread(pi.hThread) == -1)
{
FALSE,
DUPLICATE_SAME_ACCESS) == 0)
ereport(FATAL,
- (errmsg_internal("could not duplicate child handle: error code %d",
- (int) GetLastError())));
+ (errmsg_internal("could not duplicate child handle: error code %d",
+ (int) GetLastError())));
waiterThread = CreateThread(NULL, 64 * 1024, win32_sigchld_waiter,
(LPVOID) childHandleCopy, 0, NULL);
if (!waiterThread)
ereport(FATAL,
- (errmsg_internal("could not create sigchld waiter thread: error code %d",
- (int) GetLastError())));
+ (errmsg_internal("could not create sigchld waiter thread: error code %d",
+ (int) GetLastError())));
CloseHandle(waiterThread);
if (IsUnderPostmaster)
return pi.dwProcessId;
}
-
-#endif /* WIN32 */
+#endif /* WIN32 */
/*
elog(FATAL, "invalid subpostmaster invocation");
/*
- * If appropriate, physically re-attach to shared memory segment.
- * We want to do this before going any further to ensure that we
- * can attach at the same address the postmaster used.
+ * If appropriate, physically re-attach to shared memory segment. We want
+ * to do this before going any further to ensure that we can attach at the
+ * same address the postmaster used.
*/
if (strcmp(argv[1], "-forkbackend") == 0 ||
strcmp(argv[1], "-forkautovac") == 0 ||
PGSharedMemoryReAttach();
/*
- * Start our win32 signal implementation. This has to be done
- * after we read the backend variables, because we need to pick
- * up the signal pipe from the parent process.
+ * Start our win32 signal implementation. This has to be done after we
+ * read the backend variables, because we need to pick up the signal pipe
+ * from the parent process.
*/
#ifdef WIN32
pgwin32_signal_initialize();
CreateSharedMemoryAndSemaphores(false, 0);
#ifdef USE_SSL
+
/*
- * Need to reinitialize the SSL library in the backend,
- * since the context structures contain function pointers
- * and cannot be passed through the parameter file.
+ * Need to reinitialize the SSL library in the backend, since the
+ * context structures contain function pointers and cannot be passed
+ * through the parameter file.
*/
if (EnableSSL)
secure_initialize();
if (strcmp(argv[1], "-forkcol") == 0)
{
/*
- * Do NOT close postmaster sockets here, because we are forking
- * from pgstat buffer process, which already did it.
+ * Do NOT close postmaster sockets here, because we are forking from
+ * pgstat buffer process, which already did it.
*/
/* Do not want to attach to shared memory */
return 1; /* shouldn't get here */
}
-
#endif /* EXEC_BACKEND */
/* should cleanup shared memory and kill all backends */
/*
- * Not sure of the semantics here. When the Postmaster dies, should
- * the backends all be killed? probably not.
+ * Not sure of the semantics here. When the Postmaster dies, should the
+ * backends all be killed? probably not.
*
* MUST -- vadim 05-10-1999
*/
if (CheckPostmasterSignal(PMSIGNAL_WAKEN_CHILDREN))
{
/*
- * Send SIGUSR1 to all children (triggers
- * CatchupInterruptHandler). See storage/ipc/sinval[adt].c for the
- * use of this.
+ * Send SIGUSR1 to all children (triggers CatchupInterruptHandler).
+ * See storage/ipc/sinval[adt].c for the use of this.
*/
if (Shutdown <= SmartShutdown)
{
PgArchPID != 0 && Shutdown == NoShutdown)
{
/*
- * Send SIGUSR1 to archiver process, to wake it up and begin
- * archiving next transaction log file.
+ * Send SIGUSR1 to archiver process, to wake it up and begin archiving
+ * next transaction log file.
*/
kill(PgArchPID, SIGUSR1);
}
SysLoggerPID != 0)
{
/* Tell syslogger to rotate logfile */
- kill(SysLoggerPID, SIGUSR1);
+ kill(SysLoggerPID, SIGUSR1);
}
PG_SETMASK(&UnBlockSig);
* bytes, since only one of the two salts will be sent to the client.
* After that we need to compute more random bits.
*
- * We use % 255, sacrificing one possible byte value, so as to ensure
- * that all bits of the random() value participate in the result.
- * While at it, add one to avoid generating any null bytes.
+ * We use % 255, sacrificing one possible byte value, so as to ensure that
+ * all bits of the random() value participate in the result. While at it,
+ * add one to avoid generating any null bytes.
*/
md5Salt[0] = (rand % 255) + 1;
rand = PostmasterRandom();
/*
* StartChildProcess -- start a non-backend child process for the postmaster
*
- * xlop determines what kind of child will be started. All child types
+ * xlop determines what kind of child will be started. All child types
* initially go to BootstrapMain, which will handle common setup.
*
* Return value of StartChildProcess is subprocess' PID, or 0 if failed
if (pid == 0) /* child */
{
- IsUnderPostmaster = true; /* we are a postmaster subprocess
- * now */
+ IsUnderPostmaster = true; /* we are a postmaster subprocess now */
/* Close the postmaster's sockets */
ClosePostmasterPorts(false);
{
/* in parent, fork failed */
int save_errno = errno;
+
errno = save_errno;
switch (xlop)
{
break;
case BS_XLOG_BGWRITER:
ereport(LOG,
- (errmsg("could not fork background writer process: %m")));
+ (errmsg("could not fork background writer process: %m")));
break;
default:
ereport(LOG,
}
/*
- * fork failure is fatal during startup, but there's no need to
- * choke immediately if starting other child types fails.
+ * fork failure is fatal during startup, but there's no need to choke
+ * immediately if starting other child types fails.
*/
if (xlop == BS_XLOG_STARTUP)
ExitPostmaster(1);
extern LWLock *LWLockArray;
extern slock_t *ProcStructLock;
extern int pgStatSock;
-extern int pgStatPipe[2];
+extern int pgStatPipe[2];
#ifndef WIN32
#define write_inheritable_socket(dest, src, childpid) (*(dest) = (src))
#define read_inheritable_socket(dest, src) (*(dest) = *(src))
#else
-static void write_duplicated_handle(HANDLE *dest, HANDLE src, HANDLE child);
-static void write_inheritable_socket(InheritableSocket *dest, SOCKET src,
- pid_t childPid);
-static void read_inheritable_socket(SOCKET *dest, InheritableSocket *src);
+static void write_duplicated_handle(HANDLE * dest, HANDLE src, HANDLE child);
+static void write_inheritable_socket(InheritableSocket * dest, SOCKET src,
+ pid_t childPid);
+static void read_inheritable_socket(SOCKET * dest, InheritableSocket * src);
#endif
/* Save critical backend variables into the BackendParameters struct */
#ifndef WIN32
static bool
-save_backend_variables(BackendParameters *param, Port *port)
+save_backend_variables(BackendParameters * param, Port *port)
#else
static bool
-save_backend_variables(BackendParameters *param, Port *port,
+save_backend_variables(BackendParameters * param, Port *port,
HANDLE childProcess, pid_t childPid)
#endif
{
* process instance of the handle to the parameter file.
*/
static void
-write_duplicated_handle(HANDLE *dest, HANDLE src, HANDLE childProcess)
+write_duplicated_handle(HANDLE * dest, HANDLE src, HANDLE childProcess)
{
- HANDLE hChild = INVALID_HANDLE_VALUE;
+ HANDLE hChild = INVALID_HANDLE_VALUE;
if (!DuplicateHandle(GetCurrentProcess(),
src,
* straight socket inheritance.
*/
static void
-write_inheritable_socket(InheritableSocket *dest, SOCKET src, pid_t childpid)
+write_inheritable_socket(InheritableSocket * dest, SOCKET src, pid_t childpid)
{
dest->origsocket = src;
if (src != 0 && src != -1)
* Read a duplicate socket structure back, and get the socket descriptor.
*/
static void
-read_inheritable_socket(SOCKET *dest, InheritableSocket *src)
+read_inheritable_socket(SOCKET * dest, InheritableSocket * src)
{
- SOCKET s;
+ SOCKET s;
- if (src->origsocket == -1 || src->origsocket == 0)
+ if (src->origsocket == -1 || src->origsocket == 0)
{
/* Not a real socket! */
*dest = src->origsocket;
*dest = s;
/*
- * To make sure we don't get two references to the same socket,
- * close the original one. (This would happen when inheritance
- * actually works..
+ * To make sure we don't get two references to the same socket, close
+ * the original one. (This would happen when inheritance actually
+ * works..
*/
closesocket(src->origsocket);
}
#ifndef WIN32
/* Non-win32 implementation reads from file */
- FILE *fp;
+ FILE *fp;
/* Open file */
fp = AllocateFile(id, PG_BINARY_R);
}
#else
/* Win32 version uses mapped file */
- HANDLE paramHandle;
+ HANDLE paramHandle;
BackendParameters *paramp;
- paramHandle = (HANDLE)atol(id);
+ paramHandle = (HANDLE) atol(id);
paramp = MapViewOfFile(paramHandle, FILE_MAP_READ, 0, 0, 0);
if (!paramp)
{
/* Restore critical backend variables from the BackendParameters struct */
static void
-restore_backend_variables(BackendParameters *param, Port *port)
+restore_backend_variables(BackendParameters * param, Port *port)
{
memcpy(port, ¶m->port, sizeof(Port));
read_inheritable_socket(&port->sock, ¶m->portsocket);
(errmsg_internal("could not find backend entry with pid %d",
(int) pid)));
}
-
#endif /* EXEC_BACKEND */
case WAIT_FAILED:
ereport(LOG,
(errmsg_internal("failed to wait on %lu of %lu children: error code %d",
- num, win32_numChildren, (int) GetLastError())));
+ num, win32_numChildren, (int) GetLastError())));
return -1;
case WAIT_TIMEOUT:
default:
/*
- * Get the exit code, and return the PID of, the
- * respective process
+ * Get the exit code, and return the PID of, the respective
+ * process
*/
index = offset + ret - WAIT_OBJECT_0;
Assert(index >= 0 && index < win32_numChildren);
if (!GetExitCodeProcess(win32_childHNDArray[index], &exitCode))
{
/*
- * If we get this far, this should never happen, but,
- * then again... No choice other than to assume a
- * catastrophic failure.
+ * If we get this far, this should never happen, but, then
+ * again... No choice other than to assume a catastrophic
+ * failure.
*/
ereport(FATAL,
- (errmsg_internal("failed to get exit code for child %lu",
- (unsigned long) win32_childPIDArray[index])));
+ (errmsg_internal("failed to get exit code for child %lu",
+ (unsigned long) win32_childPIDArray[index])));
}
*exitstatus = (int) exitCode;
return win32_childPIDArray[index];
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.19 2005/08/12 03:23:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.20 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* These must be exported for EXEC_BACKEND case ... annoying */
#ifndef WIN32
int syslogPipe[2] = {-1, -1};
-
#else
HANDLE syslogPipe[2] = {0, 0};
#endif
set_ps_display("");
/*
- * If we restarted, our stderr is already redirected into our own
- * input pipe. This is of course pretty useless, not to mention that
- * it interferes with detecting pipe EOF. Point stderr to /dev/null.
- * This assumes that all interesting messages generated in the
- * syslogger will come through elog.c and will be sent to
- * write_syslogger_file.
+ * If we restarted, our stderr is already redirected into our own input
+ * pipe. This is of course pretty useless, not to mention that it
+ * interferes with detecting pipe EOF. Point stderr to /dev/null. This
+ * assumes that all interesting messages generated in the syslogger will
+ * come through elog.c and will be sent to write_syslogger_file.
*/
if (redirection_done)
{
int fd = open(NULL_DEV, O_WRONLY);
/*
- * The closes might look redundant, but they are not: we want to
- * be darn sure the pipe gets closed even if the open failed. We
- * can survive running with stderr pointing nowhere, but we can't
- * afford to have extra pipe input descriptors hanging around.
+ * The closes might look redundant, but they are not: we want to be
+ * darn sure the pipe gets closed even if the open failed. We can
+ * survive running with stderr pointing nowhere, but we can't afford
+ * to have extra pipe input descriptors hanging around.
*/
close(fileno(stdout));
close(fileno(stderr));
}
/*
- * Also close our copy of the write end of the pipe. This is needed
- * to ensure we can detect pipe EOF correctly. (But note that in the
- * restart case, the postmaster already did this.)
+ * Also close our copy of the write end of the pipe. This is needed to
+ * ensure we can detect pipe EOF correctly. (But note that in the restart
+ * case, the postmaster already did this.)
*/
#ifndef WIN32
if (syslogPipe[1] >= 0)
/*
* Properly accept or ignore signals the postmaster might send us
*
- * Note: we ignore all termination signals, and instead exit only when
- * all upstream processes are gone, to ensure we don't miss any dying
- * gasps of broken backends...
+ * Note: we ignore all termination signals, and instead exit only when all
+ * upstream processes are gone, to ensure we don't miss any dying gasps of
+ * broken backends...
*/
pqsignal(SIGHUP, sigHupHandler); /* set flag to read config file */
pqsignal(SIGQUIT, SIG_IGN);
pqsignal(SIGALRM, SIG_IGN);
pqsignal(SIGPIPE, SIG_IGN);
- pqsignal(SIGUSR1, sigUsr1Handler); /* request log rotation */
+ pqsignal(SIGUSR1, sigUsr1Handler); /* request log rotation */
pqsignal(SIGUSR2, SIG_IGN);
/*
ProcessConfigFile(PGC_SIGHUP);
/*
- * Check if the log directory or filename pattern changed in
- * postgresql.conf. If so, force rotation to make sure we're
+ * Check if the log directory or filename pattern changed in
+ * postgresql.conf. If so, force rotation to make sure we're
* writing the logfiles in the right place.
*/
if (strcmp(Log_directory, currentLogDir) != 0)
currentLogFilename = pstrdup(Log_filename);
rotation_requested = true;
}
+
/*
* If rotation time parameter changed, reset next rotation time,
* but don't immediately force a rotation.
if (errno != EINTR)
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("select() failed in logger process: %m")));
+ errmsg("select() failed in logger process: %m")));
}
else if (rc > 0 && FD_ISSET(syslogPipe[0], &rfds))
{
if (errno != EINTR)
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not read from logger pipe: %m")));
+ errmsg("could not read from logger pipe: %m")));
}
else if (bytesRead > 0)
{
else
{
/*
- * Zero bytes read when select() is saying read-ready
- * means EOF on the pipe: that is, there are no longer any
- * processes with the pipe write end open. Therefore, the
- * postmaster and all backends are shut down, and we are
- * done.
+ * Zero bytes read when select() is saying read-ready means
+ * EOF on the pipe: that is, there are no longer any processes
+ * with the pipe write end open. Therefore, the postmaster
+ * and all backends are shut down, and we are done.
*/
pipe_eof_seen = true;
}
#else /* WIN32 */
/*
- * On Windows we leave it to a separate thread to transfer data
- * and detect pipe EOF. The main thread just wakes up once a
- * second to check for SIGHUP and rotation conditions.
+ * On Windows we leave it to a separate thread to transfer data and
+ * detect pipe EOF. The main thread just wakes up once a second to
+ * check for SIGHUP and rotation conditions.
*/
pgwin32_backend_usleep(1000000);
#endif /* WIN32 */
/*
* Normal exit from the syslogger is here. Note that we
- * deliberately do not close syslogFile before exiting; this
- * is to allow for the possibility of elog messages being
- * generated inside proc_exit. Regular exit() will take care
- * of flushing and closing stdio channels.
+ * deliberately do not close syslogFile before exiting; this is to
+ * allow for the possibility of elog messages being generated
+ * inside proc_exit. Regular exit() will take care of flushing
+ * and closing stdio channels.
*/
proc_exit(0);
}
* If first time through, create the pipe which will receive stderr
* output.
*
- * If the syslogger crashes and needs to be restarted, we continue to use
- * the same pipe (indeed must do so, since extant backends will be
- * writing into that pipe).
+ * If the syslogger crashes and needs to be restarted, we continue to use the
+ * same pipe (indeed must do so, since extant backends will be writing
+ * into that pipe).
*
- * This means the postmaster must continue to hold the read end of the
- * pipe open, so we can pass it down to the reincarnated syslogger.
- * This is a bit klugy but we have little choice.
+ * This means the postmaster must continue to hold the read end of the pipe
+ * open, so we can pass it down to the reincarnated syslogger. This is a
+ * bit klugy but we have little choice.
*/
#ifndef WIN32
if (syslogPipe[0] < 0)
if (pgpipe(syslogPipe) < 0)
ereport(FATAL,
(errcode_for_socket_access(),
- (errmsg("could not create pipe for syslog: %m"))));
+ (errmsg("could not create pipe for syslog: %m"))));
}
#else
if (!syslogPipe[0])
if (!CreatePipe(&syslogPipe[0], &syslogPipe[1], &sa, 32768))
ereport(FATAL,
(errcode_for_file_access(),
- (errmsg("could not create pipe for syslog: %m"))));
+ (errmsg("could not create pipe for syslog: %m"))));
}
#endif
mkdir(Log_directory, 0700);
/*
- * The initial logfile is created right in the postmaster, to verify
- * that the Log_directory is writable.
+ * The initial logfile is created right in the postmaster, to verify that
+ * the Log_directory is writable.
*/
filename = logfile_getname(time(NULL));
rotation_requested = false;
/*
- * When doing a time-based rotation, invent the new logfile name based
- * on the planned rotation time, not current time, to avoid "slippage"
- * in the file name when we don't do the rotation immediately.
+ * When doing a time-based rotation, invent the new logfile name based on
+ * the planned rotation time, not current time, to avoid "slippage" in the
+ * file name when we don't do the rotation immediately.
*/
if (time_based_rotation)
filename = logfile_getname(next_rotation_time);
/*
* Decide whether to overwrite or append. We can overwrite if (a)
* Log_truncate_on_rotation is set, (b) the rotation was triggered by
- * elapsed time and not something else, and (c) the computed file name
- * is different from what we were previously logging into.
+ * elapsed time and not something else, and (c) the computed file name is
+ * different from what we were previously logging into.
*
* Note: during the first rotation after forking off from the postmaster,
* last_file_name will be NULL. (We don't bother to set it in the
- * postmaster because it ain't gonna work in the EXEC_BACKEND case.)
- * So we will always append in that situation, even though truncating
- * would usually be safe.
+ * postmaster because it ain't gonna work in the EXEC_BACKEND case.) So we
+ * will always append in that situation, even though truncating would
+ * usually be safe.
*/
if (Log_truncate_on_rotation && time_based_rotation &&
last_file_name != NULL && strcmp(filename, last_file_name) != 0)
filename)));
/*
- * ENFILE/EMFILE are not too surprising on a busy system; just
- * keep using the old file till we manage to get a new one.
- * Otherwise, assume something's wrong with Log_directory and stop
- * trying to create files.
+ * ENFILE/EMFILE are not too surprising on a busy system; just keep
+ * using the old file till we manage to get a new one. Otherwise,
+ * assume something's wrong with Log_directory and stop trying to
+ * create files.
*/
if (saveerrno != ENFILE && saveerrno != EMFILE)
{
ereport(LOG,
- (errmsg("disabling automatic rotation (use SIGHUP to reenable)")));
+ (errmsg("disabling automatic rotation (use SIGHUP to reenable)")));
Log_RotationAge = 0;
Log_RotationSize = 0;
}
tm = pg_localtime(×tamp, global_timezone);
pg_strftime(filename + len, MAXPGPATH - len, Log_filename, tm);
}
- else
+ else
{
/* no strftime escapes, so append timestamp to new filename */
snprintf(filename + len, MAXPGPATH - len, "%s.%lu",
/*
* The requirements here are to choose the next time > now that is a
* "multiple" of the log rotation interval. "Multiple" can be interpreted
- * fairly loosely. In this version we align to local time rather than
+ * fairly loosely. In this version we align to local time rather than
* GMT.
*/
- rotinterval = Log_RotationAge * SECS_PER_MINUTE; /* convert to seconds */
+ rotinterval = Log_RotationAge * SECS_PER_MINUTE; /* convert to seconds */
now = time(NULL);
tm = pg_localtime(&now, global_timezone);
now += tm->tm_gmtoff;
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_color.c,v 1.4 2003/11/29 19:51:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_color.c,v 1.5 2005/10/15 02:49:24 momjian Exp $
*
*
* Note that there are some incestuous relationships between this code and
if (t == fillt || t == cb)
{ /* must allocate a new block */
newt = (union tree *) MALLOC((bottom) ?
- sizeof(struct colors) : sizeof(struct ptrs));
+ sizeof(struct colors) : sizeof(struct ptrs));
if (newt == NULL)
{
CERR(REG_ESPACE);
}
else
new = (struct colordesc *) REALLOC(cm->cd,
- n * sizeof(struct colordesc));
+ n * sizeof(struct colordesc));
if (new == NULL)
{
CERR(REG_ESPACE);
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_cvec.c,v 1.4 2003/11/29 19:51:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_cvec.c,v 1.5 2005/10/15 02:49:24 momjian Exp $
*
*/
if (cv == NULL)
return NULL;
cv->chrspace = nchrs;
- cv->chrs = (chr *) &cv->mcces[nmcces]; /* chrs just after MCCE
- * ptrs */
+ cv->chrs = (chr *) &cv->mcces[nmcces]; /* chrs just after MCCE ptrs */
cv->mccespace = nmcces;
cv->ranges = cv->chrs + nchrs + nmcces * (MAXMCCE + 1);
cv->rangespace = nranges;
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_lex.c,v 1.4 2003/11/29 19:51:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_lex.c,v 1.5 2005/10/15 02:49:24 momjian Exp $
*
*/
* lexescape - parse an ARE backslash escape (backslash already eaten)
* Note slightly nonstandard use of the CCLASS type code.
*/
-static int /* not actually used, but convenient for
- * RETV */
+static int /* not actually used, but convenient for RETV */
lexescape(struct vars * v)
{
chr c;
break;
case CHR('x'):
NOTE(REG_UUNPORT);
- c = lexdigits(v, 16, 1, 255); /* REs >255 long outside
- * spec */
+ c = lexdigits(v, 16, 1, 255); /* REs >255 long outside spec */
if (ISERR())
FAILW(REG_EESCAPE);
RETV(PLAIN, c);
case CHR('9'):
save = v->now;
v->now--; /* put first digit back */
- c = lexdigits(v, 10, 1, 255); /* REs >255 long outside
- * spec */
+ c = lexdigits(v, 10, 1, 255); /* REs >255 long outside spec */
if (ISERR())
FAILW(REG_EESCAPE);
/* ugly heuristic (first test is "exactly 1 digit?") */
* permission to use and distribute the software in accordance with the
* terms specified in this license.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_locale.c,v 1.6 2004/05/07 00:24:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_locale.c,v 1.7 2005/10/15 02:49:24 momjian Exp $
*/
/* ASCII character-name table */
}
/*
- * When case-independent, it's hard to decide when cvec ranges are
- * usable, so for now at least, we won't try. We allocate enough
- * space for two case variants plus a little extra for the two title
- * case variants.
+ * When case-independent, it's hard to decide when cvec ranges are usable,
+ * so for now at least, we won't try. We allocate enough space for two
+ * case variants plus a little extra for the two title case variants.
*/
nchrs = (b - a + 1) * 2 + 4;
/*
* Now compute the character class contents.
*
- * For the moment, assume that only char codes < 256 can be in these
- * classes.
+ * For the moment, assume that only char codes < 256 can be in these classes.
*/
switch ((enum classes) index)
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_nfa.c,v 1.3 2003/11/29 19:51:55 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_nfa.c,v 1.4 2005/10/15 02:49:24 momjian Exp $
*
*
* One or two things that technically ought to be in here
nfa->states = s->next;
}
s->prev = NULL;
- s->next = nfa->free; /* don't delete it, put it on the free
- * list */
+ s->next = nfa->free; /* don't delete it, put it on the free list */
nfa->free = s;
}
a->from = from;
/*
- * Put the new arc on the beginning, not the end, of the chains. Not
- * only is this easier, it has the very useful side effect that
- * deleting the most-recently-added arc is the cheapest case rather
- * than the most expensive one.
+ * Put the new arc on the beginning, not the end, of the chains. Not only
+ * is this easier, it has the very useful side effect that deleting the
+ * most-recently-added arc is the cheapest case rather than the most
+ * expensive one.
*/
a->inchain = to->ins;
to->ins = a;
static void
markreachable(struct nfa * nfa,
struct state * s,
- struct state * okay, /* consider only states with this
- * mark */
+ struct state * okay, /* consider only states with this mark */
struct state * mark) /* the value to mark with */
{
struct arc *a;
static void
markcanreach(struct nfa * nfa,
struct state * s,
- struct state * okay, /* consider only states with this
- * mark */
+ struct state * okay, /* consider only states with this mark */
struct state * mark) /* the value to mark with */
{
struct arc *a;
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regcomp.c,v 1.43 2005/05/25 21:40:40 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regcomp.c,v 1.44 2005/10/15 02:49:24 momjian Exp $
*
*/
regex_t *re;
chr *now; /* scan pointer into string */
chr *stop; /* end of string */
- chr *savenow; /* saved now and stop for "subroutine
- * call" */
+ chr *savenow; /* saved now and stop for "subroutine call" */
chr *savestop;
int err; /* error code (0 if none) */
int cflags; /* copy of compile flags */
#define NOERR() {if (ISERR()) return;} /* if error seen, return */
#define NOERRN() {if (ISERR()) return NULL;} /* NOERR with retval */
#define NOERRZ() {if (ISERR()) return 0;} /* NOERR with retval */
-#define INSIST(c, e) ((c) ? 0 : ERR(e)) /* if condition false,
- * error */
+#define INSIST(c, e) ((c) ? 0 : ERR(e)) /* if condition false, error */
#define NOTE(b) (v->re->re_info |= (b)) /* note visible condition */
#define EMPTYARC(x, y) newarc(v->nfa, EMPTY, 0, x, y)
#ifdef REG_DEBUG
FILE *debug = (flags & REG_PROGRESS) ? stdout : (FILE *) NULL;
-
#else
FILE *debug = (FILE *) NULL;
#endif
/*
* Now here's the subtle part. Because many REs have no lookback
* constraints, often knowing when you were in the pre state tells you
- * little; it's the next state(s) that are informative. But some of
- * them may have other inarcs, i.e. it may be possible to make actual
- * progress and then return to one of them. We must de-optimize such
- * cases, splitting each such state into progress and no-progress
- * states.
+ * little; it's the next state(s) that are informative. But some of them
+ * may have other inarcs, i.e. it may be possible to make actual progress
+ * and then return to one of them. We must de-optimize such cases,
+ * splitting each such state into progress and no-progress states.
*/
/* first, make a list of the states */
{ /* must be split */
if (s->tmp == NULL)
{ /* if not already in the list */
- /* (fixes bugs 505048, 230589, */
- /* 840258, 504785) */
+ /* (fixes bugs 505048, 230589, */
+ /* 840258, 504785) */
s->tmp = slist;
slist = s;
}
}
/*
- * hard part: something messy That is, capturing parens, back
- * reference, short/long clash, or an atom with substructure
- * containing one of those.
+ * hard part: something messy That is, capturing parens, back reference,
+ * short/long clash, or an atom with substructure containing one of those.
*/
/* now we'll need a subre for the contents even if they're boring */
endc = startc;
/*
- * Ranges are unportable. Actually, standard C does guarantee that
- * digits are contiguous, but making that an exception is just too
- * complicated.
+ * Ranges are unportable. Actually, standard C does guarantee that digits
+ * are contiguous, but making that an exception is just too complicated.
*/
if (startc != endc)
NOTE(REG_UUNPORT);
assert(s != v->mccepend);
}
p++;
- assert(*p != 0 && *(p + 1) == 0); /* only 2-char MCCEs for
- * now */
+ assert(*p != 0 && *(p + 1) == 0); /* only 2-char MCCEs for now */
newarc(v->nfa, PLAIN, subcolor(v->cm, *p), s, v->mccepend);
okcolors(v->nfa, v->cm);
}
else
{
v->lacons = (struct subre *) REALLOC(v->lacons,
- (v->nlacons + 1) * sizeof(struct subre));
+ (v->nlacons + 1) * sizeof(struct subre));
n = v->nlacons++;
}
if (v->lacons == NULL)
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/rege_dfa.c,v 1.5 2005/09/24 22:54:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/rege_dfa.c,v 1.6 2005/10/15 02:49:24 momjian Exp $
*
*/
chr *start, /* where the match should start */
chr *min, /* match must end at or after here */
chr *max, /* match must end at or before here */
- chr **coldp, /* store coldstart pointer here, if
- * nonNULL */
+ chr **coldp, /* store coldstart pointer here, if nonNULL */
int *hitstopp) /* record whether hit v->stop, if non-NULL */
{
chr *cp;
if (ss == NULL)
return NULL;
- if (coldp != NULL) /* report last no-progress state set, if
- * any */
+ if (coldp != NULL) /* report last no-progress state set, if any */
*coldp = lastcold(v, d);
if ((ss->flags & POSTSTATE) && cp > min)
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regexec.c,v 1.26 2005/09/24 22:54:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regexec.c,v 1.27 2005/10/15 02:49:24 momjian Exp $
*
*/
struct cnfa *cnfa;
struct colormap *cm;
chr *lastpost; /* location of last cache-flushed success */
- chr *lastnopr; /* location of last cache-flushed
- * NOPROGRESS */
+ chr *lastnopr; /* location of last cache-flushed NOPROGRESS */
struct sset *search; /* replacement-search-pointer memory */
int cptsmalloced; /* were the areas individually malloced? */
char *mallocarea; /* self, or master malloced area, or NULL */
#define ISERR() VISERR(v)
#define VERR(vv,e) (((vv)->err) ? (vv)->err : ((vv)->err = (e)))
#define ERR(e) VERR(v, e) /* record an error */
-#define NOERR() {if (ISERR()) return v->err;} /* if error seen, return
- * it */
+#define NOERR() {if (ISERR()) return v->err;} /* if error seen, return it */
#define OFF(p) ((p) - v->start)
#define LOFF(p) ((long)OFF(p))
chr *begin;
chr *end = NULL;
chr *cold;
- chr *open; /* open and close of range of possible
- * starts */
+ chr *open; /* open and close of range of possible starts */
chr *close;
int hitend;
int shorter = (v->g->tree->flags & SHORTER) ? 1 : 0;
chr *begin;
chr *end;
chr *cold;
- chr *open; /* open and close of range of possible
- * starts */
+ chr *open; /* open and close of range of possible starts */
chr *close;
chr *estart;
chr *estop;
#define UNTRIED 0 /* not yet tried at all */
#define TRYING 1 /* top matched, trying submatches */
-#define TRIED 2 /* top didn't match or submatches
- * exhausted */
+#define TRIED 2 /* top didn't match or submatches exhausted */
if (t == NULL)
return REG_NOMATCH;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.105 2005/06/28 05:08:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.106 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!replace)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("rule \"%s\" for relation \"%s\" already exists",
- rulname, get_rel_name(eventrel_oid))));
+ errmsg("rule \"%s\" for relation \"%s\" already exists",
+ rulname, get_rel_name(eventrel_oid))));
/*
* When replacing, we don't need to replace every attribute
/*
* Install dependency on rule's relation to ensure it will go away on
* relation deletion. If the rule is ON SELECT, make the dependency
- * implicit --- this prevents deleting a view's SELECT rule. Other
- * kinds of rules can be AUTO.
+ * implicit --- this prevents deleting a view's SELECT rule. Other kinds
+ * of rules can be AUTO.
*/
myself.classId = RewriteRelationId;
myself.objectId = rewriteObjectId;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced,
- (evtype == CMD_SELECT) ? DEPENDENCY_INTERNAL : DEPENDENCY_AUTO);
+ (evtype == CMD_SELECT) ? DEPENDENCY_INTERNAL : DEPENDENCY_AUTO);
/*
* Also install dependencies on objects referenced in action and qual.
/*
* If we are installing an ON SELECT rule, we had better grab
- * AccessExclusiveLock to ensure no SELECTs are currently running on
- * the event relation. For other types of rules, it might be
- * sufficient to grab ShareLock to lock out insert/update/delete
- * actions. But for now, let's just grab AccessExclusiveLock all the
- * time.
+ * AccessExclusiveLock to ensure no SELECTs are currently running on the
+ * event relation. For other types of rules, it might be sufficient to
+ * grab ShareLock to lock out insert/update/delete actions. But for now,
+ * let's just grab AccessExclusiveLock all the time.
*/
event_relation = heap_openrv(event_obj, AccessExclusiveLock);
ev_relid = RelationGetRelid(event_relation);
if (list_length(action) == 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("INSTEAD NOTHING rules on SELECT are not implemented"),
+ errmsg("INSTEAD NOTHING rules on SELECT are not implemented"),
errhint("Use views instead.")));
/*
if (!is_instead || query->commandType != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("rules on SELECT must have action INSTEAD SELECT")));
+ errmsg("rules on SELECT must have action INSTEAD SELECT")));
/*
* ... there can be no rule qual, ...
if (i > event_relation->rd_att->natts)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("SELECT rule's target list has too many entries")));
+ errmsg("SELECT rule's target list has too many entries")));
attr = event_relation->rd_att->attrs[i - 1];
attname = NameStr(attr->attname);
/*
- * Disallow dropped columns in the relation. This won't
- * happen in the cases we actually care about (namely creating
- * a view via CREATE TABLE then CREATE RULE). Trying to cope
- * with it is much more trouble than it's worth, because we'd
- * have to modify the rule to insert dummy NULLs at the right
- * positions.
+ * Disallow dropped columns in the relation. This won't happen in
+ * the cases we actually care about (namely creating a view via
+ * CREATE TABLE then CREATE RULE). Trying to cope with it is much
+ * more trouble than it's worth, because we'd have to modify the
+ * rule to insert dummy NULLs at the right positions.
*/
if (attr->attisdropped)
ereport(ERROR,
errmsg("SELECT rule's target entry %d has different type from column \"%s\"", i, attname)));
/*
- * Allow typmods to be different only if one of them is -1,
- * ie, "unspecified". This is necessary for cases like
- * "numeric", where the table will have a filled-in default
- * length but the select rule's expression will probably have
- * typmod = -1.
+ * Allow typmods to be different only if one of them is -1, ie,
+ * "unspecified". This is necessary for cases like "numeric",
+ * where the table will have a filled-in default length but the
+ * select rule's expression will probably have typmod = -1.
*/
tletypmod = exprTypmod((Node *) tle->expr);
if (attr->atttypmod != tletypmod &&
if (i != event_relation->rd_att->natts)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("SELECT rule's target list has too few entries")));
+ errmsg("SELECT rule's target list has too few entries")));
/*
* ... there must not be another ON SELECT rule already ...
rule = event_relation->rd_rules->rules[i];
if (rule->event == CMD_SELECT)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("\"%s\" is already a view",
- RelationGetRelationName(event_relation))));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("\"%s\" is already a view",
+ RelationGetRelationName(event_relation))));
}
}
if (strcmp(stmt->rulename, ViewSelectRuleName) != 0)
{
/*
- * In versions before 7.3, the expected name was _RETviewname.
- * For backwards compatibility with old pg_dump output, accept
- * that and silently change it to _RETURN. Since this is just
- * a quick backwards-compatibility hack, limit the number of
- * characters checked to a few less than NAMEDATALEN; this
- * saves having to worry about where a multibyte character
- * might have gotten truncated.
+ * In versions before 7.3, the expected name was _RETviewname. For
+ * backwards compatibility with old pg_dump output, accept that
+ * and silently change it to _RETURN. Since this is just a quick
+ * backwards-compatibility hack, limit the number of characters
+ * checked to a few less than NAMEDATALEN; this saves having to
+ * worry about where a multibyte character might have gotten
+ * truncated.
*/
if (strncmp(stmt->rulename, "_RET", 4) != 0 ||
strncmp(stmt->rulename + 4, event_obj->relname,
NAMEDATALEN - 4 - 4) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("view rule for \"%s\" must be named \"%s\"",
- event_obj->relname, ViewSelectRuleName)));
+ errmsg("view rule for \"%s\" must be named \"%s\"",
+ event_obj->relname, ViewSelectRuleName)));
stmt->rulename = pstrdup(ViewSelectRuleName);
}
/*
* Are we converting a relation to a view?
*
- * If so, check that the relation is empty because the storage for
- * the relation is going to be deleted. Also insist that the rel
- * not have any triggers, indexes, or child tables.
+ * If so, check that the relation is empty because the storage for the
+ * relation is going to be deleted. Also insist that the rel not have
+ * any triggers, indexes, or child tables.
*/
if (event_relation->rd_rel->relkind != RELKIND_VIEW)
{
scanDesc = heap_beginscan(event_relation, SnapshotNow, 0, NULL);
if (heap_getnext(scanDesc, ForwardScanDirection) != NULL)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it is not empty",
- event_obj->relname)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it is not empty",
+ event_obj->relname)));
heap_endscan(scanDesc);
if (event_relation->rd_rel->reltriggers != 0)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it has triggers",
- event_obj->relname),
- errhint("In particular, the table may not be involved in any foreign key relationships.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it has triggers",
+ event_obj->relname),
+ errhint("In particular, the table may not be involved in any foreign key relationships.")));
if (event_relation->rd_rel->relhasindex)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it has indexes",
- event_obj->relname)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it has indexes",
+ event_obj->relname)));
if (event_relation->rd_rel->relhassubclass)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it has child tables",
- event_obj->relname)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it has child tables",
+ event_obj->relname)));
RelisBecomingView = true;
}
event_attype = InvalidOid;
/*
- * We want the rule's table references to be checked as though by the
- * rule owner, not the user referencing the rule. Therefore, scan
- * through the rule's rtables and set the checkAsUser field on all
- * rtable entries. We have to look at event_qual as well, in case it
- * contains sublinks.
+ * We want the rule's table references to be checked as though by the rule
+ * owner, not the user referencing the rule. Therefore, scan through the
+ * rule's rtables and set the checkAsUser field on all rtable entries. We
+ * have to look at event_qual as well, in case it contains sublinks.
*/
foreach(l, action)
{
* appropriate, also modify the 'relkind' field to show that the
* relation is now a view.
*
- * Important side effect: an SI notice is broadcast to force all
- * backends (including me!) to update relcache entries with the
- * new rule.
+ * Important side effect: an SI notice is broadcast to force all backends
+ * (including me!) to update relcache entries with the new rule.
*/
SetRelationRuleStatus(ev_relid, true, RelisBecomingView);
}
/*
- * IF the relation is becoming a view, delete the storage files
- * associated with it. NB: we had better have AccessExclusiveLock to
- * do this ...
+ * IF the relation is becoming a view, delete the storage files associated
+ * with it. NB: we had better have AccessExclusiveLock to do this ...
*
* XXX what about getting rid of its TOAST table? For now, we don't.
*/
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.157 2005/08/01 20:31:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.158 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
const char *attrName);
static Node *get_assignment_input(Node *node);
static void markQueryForLocking(Query *qry, bool forUpdate, bool noWait,
- bool skipOldNew);
+ bool skipOldNew);
static List *matchLocks(CmdType event, RuleLock *rulelocks,
int varno, Query *parsetree);
static Query *fireRIRrules(Query *parsetree, List *activeRIRs);
switch (rte->rtekind)
{
case RTE_RELATION:
+
/*
- * Grab the appropriate lock type for the relation, and
- * do not release it until end of transaction. This protects
- * the rewriter and planner against schema changes mid-query.
+ * Grab the appropriate lock type for the relation, and do not
+ * release it until end of transaction. This protects the
+ * rewriter and planner against schema changes mid-query.
*
- * If the relation is the query's result relation, then we
- * need RowExclusiveLock. Otherwise, check to see if the
- * relation is accessed FOR UPDATE/SHARE or not. We can't
- * just grab AccessShareLock because then the executor
- * would be trying to upgrade the lock, leading to possible
- * deadlocks.
+ * If the relation is the query's result relation, then we need
+ * RowExclusiveLock. Otherwise, check to see if the relation
+ * is accessed FOR UPDATE/SHARE or not. We can't just grab
+ * AccessShareLock because then the executor would be trying
+ * to upgrade the lock, leading to possible deadlocks.
*/
if (rt_index == parsetree->resultRelation)
lockmode = RowExclusiveLock;
break;
case RTE_JOIN:
+
/*
- * Scan the join's alias var list to see if any columns
- * have been dropped, and if so replace those Vars with
- * NULL Consts.
+ * Scan the join's alias var list to see if any columns have
+ * been dropped, and if so replace those Vars with NULL
+ * Consts.
*
- * Since a join has only two inputs, we can expect to
- * see multiple references to the same input RTE; optimize
- * away multiple fetches.
+ * Since a join has only two inputs, we can expect to see
+ * multiple references to the same input RTE; optimize away
+ * multiple fetches.
*/
newaliasvars = NIL;
curinputvarno = 0;
* If the list item isn't a simple Var, then it must
* represent a merged column, ie a USING column, and so it
* couldn't possibly be dropped, since it's referenced in
- * the join clause. (Conceivably it could also be a
- * NULL constant already? But that's OK too.)
+ * the join clause. (Conceivably it could also be a NULL
+ * constant already? But that's OK too.)
*/
if (IsA(aliasvar, Var))
{
/*
* The elements of an alias list have to refer to
- * earlier RTEs of the same rtable, because that's
- * the order the planner builds things in. So we
- * already processed the referenced RTE, and so it's
- * safe to use get_rte_attribute_is_dropped on it.
- * (This might not hold after rewriting or planning,
- * but it's OK to assume here.)
+ * earlier RTEs of the same rtable, because that's the
+ * order the planner builds things in. So we already
+ * processed the referenced RTE, and so it's safe to
+ * use get_rte_attribute_is_dropped on it. (This might
+ * not hold after rewriting or planning, but it's OK
+ * to assume here.)
*/
Assert(aliasvar->varlevelsup == 0);
if (aliasvar->varno != curinputvarno)
break;
case RTE_SUBQUERY:
+
/*
* The subquery RTE itself is all right, but we have to
* recurse to process the represented subquery.
}
/*
- * Recurse into sublink subqueries, too. But we already did the ones
- * in the rtable.
+ * Recurse into sublink subqueries, too. But we already did the ones in
+ * the rtable.
*/
if (parsetree->hasSubLinks)
query_tree_walker(parsetree, acquireLocksOnSubLinks, NULL,
Query **sub_action_ptr;
/*
- * Make modifiable copies of rule action and qual (what we're passed
- * are the stored versions in the relcache; don't touch 'em!).
+ * Make modifiable copies of rule action and qual (what we're passed are
+ * the stored versions in the relcache; don't touch 'em!).
*/
rule_action = (Query *) copyObject(rule_action);
rule_qual = (Node *) copyObject(rule_qual);
new_varno = PRS2_NEW_VARNO + rt_length;
/*
- * Adjust rule action and qual to offset its varnos, so that we can
- * merge its rtable with the main parsetree's rtable.
+ * Adjust rule action and qual to offset its varnos, so that we can merge
+ * its rtable with the main parsetree's rtable.
*
- * If the rule action is an INSERT...SELECT, the OLD/NEW rtable entries
- * will be in the SELECT part, and we have to modify that rather than
- * the top-level INSERT (kluge!).
+ * If the rule action is an INSERT...SELECT, the OLD/NEW rtable entries will
+ * be in the SELECT part, and we have to modify that rather than the
+ * top-level INSERT (kluge!).
*/
sub_action = getInsertSelectQuery(rule_action, &sub_action_ptr);
/*
* Generate expanded rtable consisting of main parsetree's rtable plus
* rule action's rtable; this becomes the complete rtable for the rule
- * action. Some of the entries may be unused after we finish
- * rewriting, but we leave them all in place for two reasons:
+ * action. Some of the entries may be unused after we finish rewriting,
+ * but we leave them all in place for two reasons:
*
- * We'd have a much harder job to adjust the query's varnos if we
- * selectively removed RT entries.
+ * We'd have a much harder job to adjust the query's varnos if we selectively
+ * removed RT entries.
*
- * If the rule is INSTEAD, then the original query won't be executed at
- * all, and so its rtable must be preserved so that the executor will
- * do the correct permissions checks on it.
+ * If the rule is INSTEAD, then the original query won't be executed at all,
+ * and so its rtable must be preserved so that the executor will do the
+ * correct permissions checks on it.
*
* RT entries that are not referenced in the completed jointree will be
- * ignored by the planner, so they do not affect query semantics. But
- * any permissions checks specified in them will be applied during
- * executor startup (see ExecCheckRTEPerms()). This allows us to
- * check that the caller has, say, insert-permission on a view, when
- * the view is not semantically referenced at all in the resulting
- * query.
+ * ignored by the planner, so they do not affect query semantics. But any
+ * permissions checks specified in them will be applied during executor
+ * startup (see ExecCheckRTEPerms()). This allows us to check that the
+ * caller has, say, insert-permission on a view, when the view is not
+ * semantically referenced at all in the resulting query.
*
- * When a rule is not INSTEAD, the permissions checks done on its copied
- * RT entries will be redundant with those done during execution of
- * the original query, but we don't bother to treat that case
- * differently.
+ * When a rule is not INSTEAD, the permissions checks done on its copied RT
+ * entries will be redundant with those done during execution of the
+ * original query, but we don't bother to treat that case differently.
*
- * NOTE: because planner will destructively alter rtable, we must ensure
- * that rule action's rtable is separate and shares no substructure
- * with the main rtable. Hence do a deep copy here.
+ * NOTE: because planner will destructively alter rtable, we must ensure that
+ * rule action's rtable is separate and shares no substructure with the
+ * main rtable. Hence do a deep copy here.
*/
sub_action->rtable = list_concat((List *) copyObject(parsetree->rtable),
sub_action->rtable);
/*
* Each rule action's jointree should be the main parsetree's jointree
- * plus that rule's jointree, but usually *without* the original
- * rtindex that we're replacing (if present, which it won't be for
- * INSERT). Note that if the rule action refers to OLD, its jointree
- * will add a reference to rt_index. If the rule action doesn't refer
- * to OLD, but either the rule_qual or the user query quals do, then
- * we need to keep the original rtindex in the jointree to provide
- * data for the quals. We don't want the original rtindex to be
- * joined twice, however, so avoid keeping it if the rule action
- * mentions it.
+ * plus that rule's jointree, but usually *without* the original rtindex
+ * that we're replacing (if present, which it won't be for INSERT). Note
+ * that if the rule action refers to OLD, its jointree will add a
+ * reference to rt_index. If the rule action doesn't refer to OLD, but
+ * either the rule_qual or the user query quals do, then we need to keep
+ * the original rtindex in the jointree to provide data for the quals. We
+ * don't want the original rtindex to be joined twice, however, so avoid
+ * keeping it if the rule action mentions it.
*
- * As above, the action's jointree must not share substructure with the
- * main parsetree's.
+ * As above, the action's jointree must not share substructure with the main
+ * parsetree's.
*/
if (sub_action->commandType != CMD_UTILITY)
{
keeporig = (!rangeTableEntry_used((Node *) sub_action->jointree,
rt_index, 0)) &&
(rangeTableEntry_used(rule_qual, rt_index, 0) ||
- rangeTableEntry_used(parsetree->jointree->quals, rt_index, 0));
+ rangeTableEntry_used(parsetree->jointree->quals, rt_index, 0));
newjointree = adjustJoinTreeList(parsetree, !keeporig, rt_index);
if (newjointree != NIL)
{
/*
- * If sub_action is a setop, manipulating its jointree will do
- * no good at all, because the jointree is dummy. (Perhaps
- * someday we could push the joining and quals down to the
- * member statements of the setop?)
+ * If sub_action is a setop, manipulating its jointree will do no
+ * good at all, because the jointree is dummy. (Perhaps someday
+ * we could push the joining and quals down to the member
+ * statements of the setop?)
*/
if (sub_action->setOperations != NULL)
ereport(ERROR,
}
/*
- * Event Qualification forces copying of parsetree and splitting into
- * two queries one w/rule_qual, one w/NOT rule_qual. Also add user
- * query qual onto rule action
+ * Event Qualification forces copying of parsetree and splitting into two
+ * queries one w/rule_qual, one w/NOT rule_qual. Also add user query qual
+ * onto rule action
*/
AddQual(sub_action, rule_qual);
* Rewrite new.attribute w/ right hand side of target-list entry for
* appropriate field name in insert/update.
*
- * KLUGE ALERT: since ResolveNew returns a mutated copy, we can't just
- * apply it to sub_action; we have to remember to update the sublink
- * inside rule_action, too.
+ * KLUGE ALERT: since ResolveNew returns a mutated copy, we can't just apply
+ * it to sub_action; we have to remember to update the sublink inside
+ * rule_action, too.
*/
if ((event == CMD_INSERT || event == CMD_UPDATE) &&
sub_action->commandType != CMD_UTILITY)
newjointree = list_delete_ptr(newjointree, rtr);
/*
- * foreach is safe because we exit loop after
- * list_delete...
+ * foreach is safe because we exit loop after list_delete...
*/
break;
}
ListCell *temp;
/*
- * We process the normal (non-junk) attributes by scanning the input
- * tlist once and transferring TLEs into an array, then scanning the
- * array to build an output tlist. This avoids O(N^2) behavior for
- * large numbers of attributes.
+ * We process the normal (non-junk) attributes by scanning the input tlist
+ * once and transferring TLEs into an array, then scanning the array to
+ * build an output tlist. This avoids O(N^2) behavior for large numbers
+ * of attributes.
*
- * Junk attributes are tossed into a separate list during the same
- * tlist scan, then appended to the reconstructed tlist.
+ * Junk attributes are tossed into a separate list during the same tlist
+ * scan, then appended to the reconstructed tlist.
*/
numattrs = RelationGetNumberOfAttributes(target_relation);
new_tles = (TargetEntry **) palloc0(numattrs * sizeof(TargetEntry *));
else
{
/*
- * Copy all resjunk tlist entries to junk_tlist, and
- * assign them resnos above the last real resno.
+ * Copy all resjunk tlist entries to junk_tlist, and assign them
+ * resnos above the last real resno.
*
- * Typical junk entries include ORDER BY or GROUP BY expressions
- * (are these actually possible in an INSERT or UPDATE?), system
+ * Typical junk entries include ORDER BY or GROUP BY expressions (are
+ * these actually possible in an INSERT or UPDATE?), system
* attribute references, etc.
*/
continue;
/*
- * Handle the two cases where we need to insert a default
- * expression: it's an INSERT and there's no tlist entry for the
- * column, or the tlist entry is a DEFAULT placeholder node.
+ * Handle the two cases where we need to insert a default expression:
+ * it's an INSERT and there's no tlist entry for the column, or the
+ * tlist entry is a DEFAULT placeholder node.
*/
if ((new_tle == NULL && commandType == CMD_INSERT) ||
(new_tle && new_tle->expr && IsA(new_tle->expr, SetToDefault)))
new_expr = build_column_default(target_relation, attrno);
/*
- * If there is no default (ie, default is effectively NULL),
- * we can omit the tlist entry in the INSERT case, since the
- * planner can insert a NULL for itself, and there's no point
- * in spending any more rewriter cycles on the entry. But in
- * the UPDATE case we've got to explicitly set the column to
- * NULL.
+ * If there is no default (ie, default is effectively NULL), we
+ * can omit the tlist entry in the INSERT case, since the planner
+ * can insert a NULL for itself, and there's no point in spending
+ * any more rewriter cycles on the entry. But in the UPDATE case
+ * we've got to explicitly set the column to NULL.
*/
if (!new_expr)
{
if (prior_tle == NULL)
{
/*
- * Normal case where this is the first assignment to the
- * attribute.
+ * Normal case where this is the first assignment to the attribute.
*/
return src_tle;
}
attrName)));
/*
- * Prior TLE could be a nest of assignments if we do this more than
- * once.
+ * Prior TLE could be a nest of assignments if we do this more than once.
*/
priorbottom = prior_input;
for (;;)
memcpy(fstore, prior_expr, sizeof(FieldStore));
fstore->newvals =
list_concat(list_copy(((FieldStore *) prior_expr)->newvals),
- list_copy(((FieldStore *) src_expr)->newvals));
+ list_copy(((FieldStore *) src_expr)->newvals));
fstore->fieldnums =
list_concat(list_copy(((FieldStore *) prior_expr)->fieldnums),
- list_copy(((FieldStore *) src_expr)->fieldnums));
+ list_copy(((FieldStore *) src_expr)->fieldnums));
}
else
{
if (expr == NULL)
{
/*
- * No per-column default, so look for a default for the type
- * itself.
+ * No per-column default, so look for a default for the type itself.
*/
expr = get_typdefault(atttype);
}
/*
* Make sure the value is coerced to the target column type; this will
* generally be true already, but there seem to be some corner cases
- * involving domain defaults where it might not be true. This should
- * match the parser's processing of non-defaulted expressions --- see
+ * involving domain defaults where it might not be true. This should match
+ * the parser's processing of non-defaulted expressions --- see
* updateTargetListEntry().
*/
exprtype = exprType(expr);
NameStr(att_tup->attname),
format_type_be(atttype),
format_type_be(exprtype)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
return expr;
}
elog(ERROR, "cannot handle per-attribute ON SELECT rule");
/*
- * Make a modifiable copy of the view query, and acquire needed locks
- * on the relations it mentions.
+ * Make a modifiable copy of the view query, and acquire needed locks on
+ * the relations it mentions.
*/
rule_action = copyObject(linitial(rule->actions));
rule_action = fireRIRrules(rule_action, activeRIRs);
/*
- * VIEWs are really easy --- just plug the view query in as a
- * subselect, replacing the relation's original RTE.
+ * VIEWs are really easy --- just plug the view query in as a subselect,
+ * replacing the relation's original RTE.
*/
rte = rt_fetch(rt_index, parsetree->rtable);
rte->inh = false; /* must not be set for a subquery */
/*
- * We move the view's permission check data down to its rangetable.
- * The checks will actually be done against the *OLD* entry therein.
+ * We move the view's permission check data down to its rangetable. The
+ * checks will actually be done against the *OLD* entry therein.
*/
subrte = rt_fetch(PRS2_OLD_VARNO, rule_action->rtable);
Assert(subrte->relid == relation->rd_id);
if (list_member_int(parsetree->rowMarks, rt_index))
{
/*
- * Remove the view from the list of rels that will actually be
- * marked FOR UPDATE/SHARE by the executor. It will still be access-
- * checked for write access, though.
+ * Remove the view from the list of rels that will actually be marked
+ * FOR UPDATE/SHARE by the executor. It will still be access- checked
+ * for write access, though.
*/
parsetree->rowMarks = list_delete_int(parsetree->rowMarks, rt_index);
if (forUpdate != qry->forUpdate)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use both FOR UPDATE and FOR SHARE in one query")));
+ errmsg("cannot use both FOR UPDATE and FOR SHARE in one query")));
if (noWait != qry->rowNoWait)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
}
/*
- * Do NOT recurse into Query nodes, because fireRIRrules already
- * processed subselects of subselects for us.
+ * Do NOT recurse into Query nodes, because fireRIRrules already processed
+ * subselects of subselects for us.
*/
return expression_tree_walker(node, fireRIRonSubLink,
(void *) activeRIRs);
int rt_index;
/*
- * don't try to convert this into a foreach loop, because rtable list
- * can get changed each time through...
+ * don't try to convert this into a foreach loop, because rtable list can
+ * get changed each time through...
*/
rt_index = 0;
while (rt_index < list_length(parsetree->rtable))
rte = rt_fetch(rt_index, parsetree->rtable);
/*
- * A subquery RTE can't have associated rules, so there's nothing
- * to do to this level of the query, but we must recurse into the
+ * A subquery RTE can't have associated rules, so there's nothing to
+ * do to this level of the query, but we must recurse into the
* subquery to expand any rule references in it.
*/
if (rte->rtekind == RTE_SUBQUERY)
* If the table is not referenced in the query, then we ignore it.
* This prevents infinite expansion loop due to new rtable entries
* inserted by expansion of a rule. A table is referenced if it is
- * part of the join set (a source table), or is referenced by any
- * Var nodes, or is the result table.
+ * part of the join set (a source table), or is referenced by any Var
+ * nodes, or is the result table.
*/
if (rt_index != parsetree->resultRelation &&
!rangeTableEntry_used((Node *) parsetree, rt_index, 0))
}
/*
- * Recurse into sublink subqueries, too. But we already did the ones
- * in the rtable.
+ * Recurse into sublink subqueries, too. But we already did the ones in
+ * the rtable.
*/
if (parsetree->hasSubLinks)
query_tree_walker(parsetree, fireRIRonSubLink, (void *) activeRIRs,
/*
* In case there are subqueries in the qual, acquire necessary locks and
* fix any deleted JOIN RTE entries. (This is somewhat redundant with
- * rewriteRuleAction, but not entirely ... consider restructuring so
- * that we only need to process the qual this way once.)
+ * rewriteRuleAction, but not entirely ... consider restructuring so that
+ * we only need to process the qual this way once.)
*/
(void) acquireLocksOnSubLinks(new_qual, NULL);
if (qsrc == QSRC_QUAL_INSTEAD_RULE)
{
/*
- * If there are INSTEAD rules with qualifications, the
- * original query is still performed. But all the negated rule
- * qualifications of the INSTEAD rules are added so it does
- * its actions only in cases where the rule quals of all
- * INSTEAD rules are false. Think of it as the default action
- * in a case. We save this in *qual_product so RewriteQuery()
- * can add it to the query list after we mangled it up enough.
+ * If there are INSTEAD rules with qualifications, the original
+ * query is still performed. But all the negated rule
+ * qualifications of the INSTEAD rules are added so it does its
+ * actions only in cases where the rule quals of all INSTEAD rules
+ * are false. Think of it as the default action in a case. We save
+ * this in *qual_product so RewriteQuery() can add it to the query
+ * list after we mangled it up enough.
*
* If we have already found an unqualified INSTEAD rule, then
* *qual_product won't be used, so don't bother building it.
/*
* If the statement is an update, insert or delete - fire rules on it.
*
- * SELECT rules are handled later when we have all the queries that
- * should get executed. Also, utilities aren't rewritten at all (do
- * we still need that check?)
+ * SELECT rules are handled later when we have all the queries that should
+ * get executed. Also, utilities aren't rewritten at all (do we still
+ * need that check?)
*/
if (event != CMD_SELECT && event != CMD_UTILITY)
{
rt_entry_relation = heap_open(rt_entry->relid, NoLock);
/*
- * If it's an INSERT or UPDATE, rewrite the targetlist into
- * standard form. This will be needed by the planner anyway, and
- * doing it now ensures that any references to NEW.field will
- * behave sanely.
+ * If it's an INSERT or UPDATE, rewrite the targetlist into standard
+ * form. This will be needed by the planner anyway, and doing it now
+ * ensures that any references to NEW.field will behave sanely.
*/
if (event == CMD_INSERT || event == CMD_UPDATE)
rewriteTargetList(parsetree, rt_entry_relation);
&qual_product);
/*
- * If we got any product queries, recursively rewrite them ---
- * but first check for recursion!
+ * If we got any product queries, recursively rewrite them --- but
+ * first check for recursion!
*/
if (product_queries != NIL)
{
if (rev->relation == RelationGetRelid(rt_entry_relation) &&
rev->event == event)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("infinite recursion detected in rules for relation \"%s\"",
- RelationGetRelationName(rt_entry_relation))));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("infinite recursion detected in rules for relation \"%s\"",
+ RelationGetRelationName(rt_entry_relation))));
}
rev = (rewrite_event *) palloc(sizeof(rewrite_event));
}
/*
- * For INSERTs, the original query is done first; for UPDATE/DELETE,
- * it is done last. This is needed because update and delete rule
- * actions might not do anything if they are invoked after the update
- * or delete is performed. The command counter increment between the
- * query executions makes the deleted (and maybe the updated) tuples
- * disappear so the scans for them in the rule actions cannot find
- * them.
+ * For INSERTs, the original query is done first; for UPDATE/DELETE, it is
+ * done last. This is needed because update and delete rule actions might
+ * not do anything if they are invoked after the update or delete is
+ * performed. The command counter increment between the query executions
+ * makes the deleted (and maybe the updated) tuples disappear so the scans
+ * for them in the rule actions cannot find them.
*
* If we found any unqualified INSTEAD, the original query is not done at
* all, in any form. Otherwise, we add the modified form if qualified
/*
* Step 3
*
- * Determine which, if any, of the resulting queries is supposed to set
- * the command-result tag; and update the canSetTag fields
- * accordingly.
+ * Determine which, if any, of the resulting queries is supposed to set the
+ * command-result tag; and update the canSetTag fields accordingly.
*
* If the original query is still in the list, it sets the command tag.
- * Otherwise, the last INSTEAD query of the same kind as the original
- * is allowed to set the tag. (Note these rules can leave us with no
- * query setting the tag. The tcop code has to cope with this by
- * setting up a default tag based on the original un-rewritten query.)
+ * Otherwise, the last INSTEAD query of the same kind as the original is
+ * allowed to set the tag. (Note these rules can leave us with no query
+ * setting the tag. The tcop code has to cope with this by setting up a
+ * default tag based on the original un-rewritten query.)
*
* The Asserts verify that at most one query in the result list is marked
- * canSetTag. If we aren't checking asserts, we can fall out of the
- * loop as soon as we find the original query.
+ * canSetTag. If we aren't checking asserts, we can fall out of the loop
+ * as soon as we find the original query.
*/
origCmdType = parsetree->commandType;
foundOriginalQuery = false;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.91 2005/06/04 19:19:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.92 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
context.sublevels_up = 0;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_walker(node,
checkExprHasAggs_walker,
if (IsA(node, Aggref))
{
if (((Aggref *) node)->agglevelsup == context->sublevels_up)
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
/* else fall through to examine argument */
}
if (IsA(node, Query))
if (node == NULL)
return false;
if (IsA(node, SubLink))
- return true; /* abort the tree traversal and return
- * true */
+ return true; /* abort the tree traversal and return true */
return expression_tree_walker(node, checkExprHasSubLink_walker, context);
}
context.sublevels_up = sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, go straight to query_tree_walker to make sure that
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, go straight to query_tree_walker to make sure that
* sublevels_up doesn't get incremented prematurely.
*/
if (node && IsA(node, Query))
Query *qry = (Query *) node;
/*
- * If we are starting at a Query, and sublevels_up is zero, then
- * we must also fix rangetable indexes in the Query itself ---
- * namely resultRelation and rowMarks entries. sublevels_up
- * cannot be zero when recursing into a subquery, so there's no
- * need to have the same logic inside OffsetVarNodes_walker.
+ * If we are starting at a Query, and sublevels_up is zero, then we
+ * must also fix rangetable indexes in the Query itself --- namely
+ * resultRelation and rowMarks entries. sublevels_up cannot be zero
+ * when recursing into a subquery, so there's no need to have the same
+ * logic inside OffsetVarNodes_walker.
*/
if (sublevels_up == 0)
{
context.sublevels_up = sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, go straight to query_tree_walker to make sure that
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, go straight to query_tree_walker to make sure that
* sublevels_up doesn't get incremented prematurely.
*/
if (node && IsA(node, Query))
Query *qry = (Query *) node;
/*
- * If we are starting at a Query, and sublevels_up is zero, then
- * we must also fix rangetable indexes in the Query itself ---
- * namely resultRelation and rowMarks entries. sublevels_up
- * cannot be zero when recursing into a subquery, so there's no
- * need to have the same logic inside ChangeVarNodes_walker.
+ * If we are starting at a Query, and sublevels_up is zero, then we
+ * must also fix rangetable indexes in the Query itself --- namely
+ * resultRelation and rowMarks entries. sublevels_up cannot be zero
+ * when recursing into a subquery, so there's no need to have the same
+ * logic inside ChangeVarNodes_walker.
*/
if (sublevels_up == 0)
{
context.min_sublevels_up = min_sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
query_or_expression_tree_walker(node,
IncrementVarSublevelsUp_walker,
context.sublevels_up = sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_walker(node,
rangeTableEntry_used_walker,
context.sublevels_up = sublevels_up;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_walker(node,
attribute_used_walker,
* they've been pushed down to the SELECT.
*/
if (list_length(parsetree->rtable) >= 2 &&
- strcmp(rt_fetch(PRS2_OLD_VARNO, parsetree->rtable)->eref->aliasname,
- "*OLD*") == 0 &&
- strcmp(rt_fetch(PRS2_NEW_VARNO, parsetree->rtable)->eref->aliasname,
- "*NEW*") == 0)
+ strcmp(rt_fetch(PRS2_OLD_VARNO, parsetree->rtable)->eref->aliasname,
+ "*OLD*") == 0 &&
+ strcmp(rt_fetch(PRS2_NEW_VARNO, parsetree->rtable)->eref->aliasname,
+ "*NEW*") == 0)
return parsetree;
Assert(parsetree->jointree && IsA(parsetree->jointree, FromExpr));
if (list_length(parsetree->jointree->fromlist) != 1)
selectquery->commandType == CMD_SELECT))
elog(ERROR, "expected to find SELECT subquery");
if (list_length(selectquery->rtable) >= 2 &&
- strcmp(rt_fetch(PRS2_OLD_VARNO, selectquery->rtable)->eref->aliasname,
- "*OLD*") == 0 &&
- strcmp(rt_fetch(PRS2_NEW_VARNO, selectquery->rtable)->eref->aliasname,
- "*NEW*") == 0)
+ strcmp(rt_fetch(PRS2_OLD_VARNO, selectquery->rtable)->eref->aliasname,
+ "*OLD*") == 0 &&
+ strcmp(rt_fetch(PRS2_NEW_VARNO, selectquery->rtable)->eref->aliasname,
+ "*NEW*") == 0)
{
if (subquery_ptr)
*subquery_ptr = &(selectrte->subquery);
/*
* There's noplace to put the qual on a utility statement.
*
- * If it's a NOTIFY, silently ignore the qual; this means that the
- * NOTIFY will execute, whether or not there are any qualifying
- * rows. While clearly wrong, this is much more useful than
- * refusing to execute the rule at all, and extra NOTIFY events
- * are harmless for typical uses of NOTIFY.
+ * If it's a NOTIFY, silently ignore the qual; this means that the NOTIFY
+ * will execute, whether or not there are any qualifying rows. While
+ * clearly wrong, this is much more useful than refusing to execute
+ * the rule at all, and extra NOTIFY events are harmless for typical
+ * uses of NOTIFY.
*
* If it isn't a NOTIFY, error out, since unconditional execution of
- * other utility stmts is unlikely to be wanted. (This case is
- * not currently allowed anyway, but keep the test for safety.)
+ * other utility stmts is unlikely to be wanted. (This case is not
+ * currently allowed anyway, but keep the test for safety.)
*/
if (parsetree->utilityStmt && IsA(parsetree->utilityStmt, NotifyStmt))
return;
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("conditional utility statements are not implemented")));
+ errmsg("conditional utility statements are not implemented")));
}
if (parsetree->setOperations != NULL)
{
/*
- * There's noplace to put the qual on a setop statement, either.
- * (This could be fixed, but right now the planner simply ignores
- * any qual condition on a setop query.)
+ * There's noplace to put the qual on a setop statement, either. (This
+ * could be fixed, but right now the planner simply ignores any qual
+ * condition on a setop query.)
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
Assert(!checkExprHasAggs(copy));
/*
- * Make sure query is marked correctly if added qual has sublinks.
- * Need not search qual when query is already marked.
+ * Make sure query is marked correctly if added qual has sublinks. Need
+ * not search qual when query is already marked.
*/
if (!parsetree->hasSubLinks)
parsetree->hasSubLinks = checkExprHasSubLink(copy);
/*
* If generating an expansion for a var of a named rowtype
- * (ie, this is a plain relation RTE), then we must
- * include dummy items for dropped columns. If the var is
- * RECORD (ie, this is a JOIN), then omit dropped columns.
+ * (ie, this is a plain relation RTE), then we must include
+ * dummy items for dropped columns. If the var is RECORD (ie,
+ * this is a JOIN), then omit dropped columns.
*/
expandRTE(context->target_rte,
this_varno, this_varlevelsup,
context.inserted_sublink = false;
/*
- * Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, we don't want to increment sublevels_up.
+ * Must be prepared to start with a Query or a bare expression tree; if
+ * it's a Query, we don't want to increment sublevels_up.
*/
return query_or_expression_tree_mutator(node,
ResolveNew_mutator,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteRemove.c,v 1.62 2005/04/14 20:03:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteRemove.c,v 1.63 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
elog(ERROR, "could not find tuple for rule %u", ruleOid);
/*
- * We had better grab AccessExclusiveLock so that we know no other
- * rule additions/deletions are going on for this relation. Else we
- * cannot set relhasrules correctly. Besides, we don't want to be
- * changing the ruleset while queries are executing on the rel.
+ * We had better grab AccessExclusiveLock so that we know no other rule
+ * additions/deletions are going on for this relation. Else we cannot set
+ * relhasrules correctly. Besides, we don't want to be changing the
+ * ruleset while queries are executing on the rel.
*/
eventRelationOid = ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class;
event_relation = heap_open(eventRelationOid, AccessExclusiveLock);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteSupport.c,v 1.61 2005/04/14 20:03:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteSupport.c,v 1.62 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Form_pg_class classForm;
/*
- * Find the tuple to update in pg_class, using syscache for the
- * lookup.
+ * Find the tuple to update in pg_class, using syscache for the lookup.
*/
relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_init.c,v 1.76 2005/08/20 23:26:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_init.c,v 1.77 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
buf->buf_id = i;
/*
- * Initially link all the buffers together as unused.
- * Subsequent management of this list is done by freelist.c.
+ * Initially link all the buffers together as unused. Subsequent
+ * management of this list is done by freelist.c.
*/
buf->freeNext = i + 1;
* buf_table.c
* routines for mapping BufferTags to buffer indexes.
*
- * Note: the routines in this file do no locking of their own. The caller
+ * Note: the routines in this file do no locking of their own. The caller
* must hold a suitable lock on the BufMappingLock, as specified in the
* comments.
*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.42 2005/08/20 23:26:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.43 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Insert a hashtable entry for given tag and buffer ID,
* unless an entry already exists for that tag
*
- * Returns -1 on successful insertion. If a conflicting entry exists
+ * Returns -1 on successful insertion. If a conflicting entry exists
* already, returns the buffer ID in that entry.
*
* Caller must hold write lock on BufMappingLock
bool found;
Assert(buf_id >= 0); /* -1 is reserved for not-in-table */
- Assert(tagPtr->blockNum != P_NEW); /* invalid tag */
+ Assert(tagPtr->blockNum != P_NEW); /* invalid tag */
result = (BufferLookupEnt *)
hash_search(SharedBufHash, (void *) tagPtr, HASH_ENTER, &found);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.196 2005/10/12 16:45:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.197 2005/10/15 02:49:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define BufferGetLSN(bufHdr) (*((XLogRecPtr*) BufHdrGetBlock(bufHdr)))
/* Note: this macro only works on local buffers, not shared ones! */
-#define LocalBufHdrGetBlock(bufHdr) \
+#define LocalBufHdrGetBlock(bufHdr) \
LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
int bgwriter_all_maxpages = 5;
-long NDirectFileRead; /* some I/O's are direct file access.
- * bypass bufmgr */
+long NDirectFileRead; /* some I/O's are direct file access. bypass
+ * bufmgr */
long NDirectFileWrite; /* e.g., I/O in psort and hashjoin. */
/* local state for StartBufferIO and related functions */
static volatile BufferDesc *InProgressBuf = NULL;
static bool IsForInput;
+
/* local state for LockBufferForCleanup */
static volatile BufferDesc *PinCountWaitBuf = NULL;
static void WaitIO(volatile BufferDesc *buf);
static bool StartBufferIO(volatile BufferDesc *buf, bool forInput);
static void TerminateBufferIO(volatile BufferDesc *buf, bool clear_dirty,
- int set_flag_bits);
+ int set_flag_bits);
static void buffer_write_error_callback(void *arg);
static volatile BufferDesc *BufferAlloc(Relation reln, BlockNumber blockNum,
bool *foundPtr);
ReadBufferCount++;
/*
- * lookup the buffer. IO_IN_PROGRESS is set if the requested
- * block is not currently in memory.
+ * lookup the buffer. IO_IN_PROGRESS is set if the requested block is
+ * not currently in memory.
*/
bufHdr = BufferAlloc(reln, blockNum, &found);
if (found)
/*
* if we have gotten to this point, we have allocated a buffer for the
- * page but its contents are not yet valid. IO_IN_PROGRESS is set for
- * it, if it's a shared buffer.
+ * page but its contents are not yet valid. IO_IN_PROGRESS is set for it,
+ * if it's a shared buffer.
*
- * Note: if smgrextend fails, we will end up with a buffer that is
- * allocated but not marked BM_VALID. P_NEW will still select the
- * same block number (because the relation didn't get any longer on
- * disk) and so future attempts to extend the relation will find the
- * same buffer (if it's not been recycled) but come right back here to
- * try smgrextend again.
+ * Note: if smgrextend fails, we will end up with a buffer that is allocated
+ * but not marked BM_VALID. P_NEW will still select the same block number
+ * (because the relation didn't get any longer on disk) and so future
+ * attempts to extend the relation will find the same buffer (if it's not
+ * been recycled) but come right back here to try smgrextend again.
*/
- Assert(!(bufHdr->flags & BM_VALID)); /* spinlock not needed */
+ Assert(!(bufHdr->flags & BM_VALID)); /* spinlock not needed */
bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
if (!PageHeaderIsValid((PageHeader) bufBlock))
{
/*
- * During WAL recovery, the first access to any data page
- * should overwrite the whole page from the WAL; so a
- * clobbered page header is not reason to fail. Hence, when
- * InRecovery we may always act as though zero_damaged_pages
- * is ON.
+ * During WAL recovery, the first access to any data page should
+ * overwrite the whole page from the WAL; so a clobbered page
+ * header is not reason to fail. Hence, when InRecovery we may
+ * always act as though zero_damaged_pages is ON.
*/
if (zero_damaged_pages || InRecovery)
{
ereport(WARNING,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page header in block %u of relation \"%s\"; zeroing out page",
- blockNum, RelationGetRelationName(reln))));
+ blockNum, RelationGetRelationName(reln))));
MemSet((char *) bufBlock, 0, BLCKSZ);
}
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("invalid page header in block %u of relation \"%s\"",
- blockNum, RelationGetRelationName(reln))));
+ errmsg("invalid page header in block %u of relation \"%s\"",
+ blockNum, RelationGetRelationName(reln))));
}
}
{
/*
* Found it. Now, pin the buffer so no one can steal it from the
- * buffer pool, and check to see if the correct data has been
- * loaded into the buffer.
+ * buffer pool, and check to see if the correct data has been loaded
+ * into the buffer.
*/
buf = &BufferDescriptors[buf_id];
if (!valid)
{
/*
- * We can only get here if (a) someone else is still reading
- * in the page, or (b) a previous read attempt failed. We
- * have to wait for any active read attempt to finish, and
- * then set up our own read attempt if the page is still not
- * BM_VALID. StartBufferIO does it all.
+ * We can only get here if (a) someone else is still reading in
+ * the page, or (b) a previous read attempt failed. We have to
+ * wait for any active read attempt to finish, and then set up our
+ * own read attempt if the page is still not BM_VALID.
+ * StartBufferIO does it all.
*/
if (StartBufferIO(buf, true))
{
/*
- * If we get here, previous attempts to read the buffer
- * must have failed ... but we shall bravely try again.
+ * If we get here, previous attempts to read the buffer must
+ * have failed ... but we shall bravely try again.
*/
*foundPtr = FALSE;
}
/*
* Didn't find it in the buffer pool. We'll have to initialize a new
- * buffer. Remember to unlock BufMappingLock while doing the work.
+ * buffer. Remember to unlock BufMappingLock while doing the work.
*/
LWLockRelease(BufMappingLock);
for (;;)
{
/*
- * Select a victim buffer. The buffer is returned with its
- * header spinlock still held! Also the BufFreelistLock is
- * still held, since it would be bad to hold the spinlock
- * while possibly waking up other processes.
+ * Select a victim buffer. The buffer is returned with its header
+ * spinlock still held! Also the BufFreelistLock is still held, since
+ * it would be bad to hold the spinlock while possibly waking up other
+ * processes.
*/
buf = StrategyGetBuffer();
/*
* If the buffer was dirty, try to write it out. There is a race
- * condition here, in that someone might dirty it after we released
- * it above, or even while we are writing it out (since our share-lock
+ * condition here, in that someone might dirty it after we released it
+ * above, or even while we are writing it out (since our share-lock
* won't prevent hint-bit updates). We will recheck the dirty bit
* after re-locking the buffer header.
*/
{
/*
* We need a share-lock on the buffer contents to write it out
- * (else we might write invalid data, eg because someone else
- * is compacting the page contents while we write). We must use
- * a conditional lock acquisition here to avoid deadlock. Even
+ * (else we might write invalid data, eg because someone else is
+ * compacting the page contents while we write). We must use a
+ * conditional lock acquisition here to avoid deadlock. Even
* though the buffer was not pinned (and therefore surely not
* locked) when StrategyGetBuffer returned it, someone else could
- * have pinned and exclusive-locked it by the time we get here.
- * If we try to get the lock unconditionally, we'd block waiting
- * for them; if they later block waiting for us, deadlock ensues.
+ * have pinned and exclusive-locked it by the time we get here. If
+ * we try to get the lock unconditionally, we'd block waiting for
+ * them; if they later block waiting for us, deadlock ensues.
* (This has been observed to happen when two backends are both
* trying to split btree index pages, and the second one just
* happens to be trying to split the page the first one got from
else
{
/*
- * Someone else has pinned the buffer, so give it up and
- * loop back to get another one.
+ * Someone else has pinned the buffer, so give it up and loop
+ * back to get another one.
*/
UnpinBuffer(buf, true, false /* evidently recently used */ );
continue;
}
/*
- * Acquire exclusive mapping lock in preparation for changing
- * the buffer's association.
+ * Acquire exclusive mapping lock in preparation for changing the
+ * buffer's association.
*/
LWLockAcquire(BufMappingLock, LW_EXCLUSIVE);
* Try to make a hashtable entry for the buffer under its new tag.
* This could fail because while we were writing someone else
* allocated another buffer for the same block we want to read in.
- * Note that we have not yet removed the hashtable entry for the
- * old tag.
+ * Note that we have not yet removed the hashtable entry for the old
+ * tag.
*/
buf_id = BufTableInsert(&newTag, buf->buf_id);
if (buf_id >= 0)
{
/*
- * Got a collision. Someone has already done what we were about
- * to do. We'll just handle this as if it were found in
- * the buffer pool in the first place. First, give up the
- * buffer we were planning to use. Don't allow it to be
- * thrown in the free list (we don't want to hold both
- * global locks at once).
+ * Got a collision. Someone has already done what we were about to
+ * do. We'll just handle this as if it were found in the buffer
+ * pool in the first place. First, give up the buffer we were
+ * planning to use. Don't allow it to be thrown in the free list
+ * (we don't want to hold both global locks at once).
*/
UnpinBuffer(buf, true, false);
{
/*
* We can only get here if (a) someone else is still reading
- * in the page, or (b) a previous read attempt failed. We
+ * in the page, or (b) a previous read attempt failed. We
* have to wait for any active read attempt to finish, and
* then set up our own read attempt if the page is still not
* BM_VALID. StartBufferIO does it all.
/*
* Somebody could have pinned or re-dirtied the buffer while we were
- * doing the I/O and making the new hashtable entry. If so, we
- * can't recycle this buffer; we must undo everything we've done and
- * start over with a new victim buffer.
+ * doing the I/O and making the new hashtable entry. If so, we can't
+ * recycle this buffer; we must undo everything we've done and start
+ * over with a new victim buffer.
*/
if (buf->refcount == 1 && !(buf->flags & BM_DIRTY))
break;
/*
* Okay, it's finally safe to rename the buffer.
*
- * Clearing BM_VALID here is necessary, clearing the dirtybits
- * is just paranoia. We also clear the usage_count since any
- * recency of use of the old content is no longer relevant.
+ * Clearing BM_VALID here is necessary, clearing the dirtybits is just
+ * paranoia. We also clear the usage_count since any recency of use of
+ * the old content is no longer relevant.
*/
oldTag = buf->tag;
oldFlags = buf->flags;
/*
* Buffer contents are currently invalid. Try to get the io_in_progress
- * lock. If StartBufferIO returns false, then someone else managed
- * to read it before we did, so there's nothing left for BufferAlloc()
- * to do.
+ * lock. If StartBufferIO returns false, then someone else managed to
+ * read it before we did, so there's nothing left for BufferAlloc() to do.
*/
if (StartBufferIO(buf, true))
*foundPtr = FALSE;
* This is used only in contexts such as dropping a relation. We assume
* that no other backend could possibly be interested in using the page,
* so the only reason the buffer might be pinned is if someone else is
- * trying to write it out. We have to let them finish before we can
+ * trying to write it out. We have to let them finish before we can
* reclaim the buffer.
*
* The buffer could get reclaimed by someone else while we are waiting
UnlockBufHdr(buf);
retry:
+
/*
- * Acquire exclusive mapping lock in preparation for changing
- * the buffer's association.
+ * Acquire exclusive mapping lock in preparation for changing the buffer's
+ * association.
*/
LWLockAcquire(BufMappingLock, LW_EXCLUSIVE);
}
/*
- * We assume the only reason for it to be pinned is that someone else
- * is flushing the page out. Wait for them to finish. (This could be
- * an infinite loop if the refcount is messed up... it would be nice
- * to time out after awhile, but there seems no way to be sure how
- * many loops may be needed. Note that if the other guy has pinned
- * the buffer but not yet done StartBufferIO, WaitIO will fall through
- * and we'll effectively be busy-looping here.)
+ * We assume the only reason for it to be pinned is that someone else is
+ * flushing the page out. Wait for them to finish. (This could be an
+ * infinite loop if the refcount is messed up... it would be nice to time
+ * out after awhile, but there seems no way to be sure how many loops may
+ * be needed. Note that if the other guy has pinned the buffer but not
+ * yet done StartBufferIO, WaitIO will fall through and we'll effectively
+ * be busy-looping here.)
*/
if (buf->refcount != 0)
{
}
/*
- * Clear out the buffer's tag and flags. We must do this to ensure
- * that linear scans of the buffer array don't think the buffer is valid.
+ * Clear out the buffer's tag and flags. We must do this to ensure that
+ * linear scans of the buffer array don't think the buffer is valid.
*/
oldFlags = buf->flags;
CLEAR_BUFFERTAG(buf->tag);
*
* Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
* compared to calling the two routines separately. Now it's mainly just
- * a convenience function. However, if the passed buffer is valid and
+ * a convenience function. However, if the passed buffer is valid and
* already contains the desired block, we just return it as-is; and that
* does save considerable work compared to a full release and reacquire.
*
*
* Note that ResourceOwnerEnlargeBuffers must have been done already.
*
- * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
+ * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
* some callers to avoid an extra spinlock cycle.
*/
static bool
{
/*
* Use NoHoldoff here because we don't want the unlock to be a
- * potential place to honor a QueryCancel request.
- * (The caller should be holding off interrupts anyway.)
+ * potential place to honor a QueryCancel request. (The caller should
+ * be holding off interrupts anyway.)
*/
LockBufHdr_NoHoldoff(buf);
buf->refcount++;
PrivateRefCount[b]--;
if (PrivateRefCount[b] == 0)
{
- bool trash_buffer = false;
+ bool trash_buffer = false;
/* I'd better not still hold any locks on the buffer */
Assert(!LWLockHeldByMe(buf->content_lock));
if (buf->usage_count < BM_MAX_USAGE_COUNT)
buf->usage_count++;
}
- else if (trashOK &&
+ else if (trashOK &&
buf->refcount == 0 &&
buf->usage_count == 0)
trash_buffer = true;
buf->refcount == 1)
{
/* we just released the last pin other than the waiter's */
- int wait_backend_pid = buf->wait_backend_pid;
+ int wait_backend_pid = buf->wait_backend_pid;
buf->flags &= ~BM_PIN_COUNT_WAITER;
UnlockBufHdr_NoHoldoff(buf);
UnlockBufHdr_NoHoldoff(buf);
/*
- * If VACUUM is releasing an otherwise-unused buffer, send it to
- * the freelist for near-term reuse. We put it at the tail so that
- * it won't be used before any invalid buffers that may exist.
+ * If VACUUM is releasing an otherwise-unused buffer, send it to the
+ * freelist for near-term reuse. We put it at the tail so that it
+ * won't be used before any invalid buffers that may exist.
*/
if (trash_buffer)
StrategyFreeBuffer(buf, false);
* To minimize work at checkpoint time, we want to try to keep all the
* buffers clean; this motivates a scan that proceeds sequentially through
* all buffers. But we are also charged with ensuring that buffers that
- * will be recycled soon are clean when needed; these buffers are the
- * ones just ahead of the StrategySyncStart point. We make a separate
- * scan through those.
+ * will be recycled soon are clean when needed; these buffers are the ones
+ * just ahead of the StrategySyncStart point. We make a separate scan
+ * through those.
*/
/*
- * This loop runs over all buffers, including pinned ones. The
- * starting point advances through the buffer pool on successive calls.
+ * This loop runs over all buffers, including pinned ones. The starting
+ * point advances through the buffer pool on successive calls.
*
- * Note that we advance the static counter *before* trying to write.
- * This ensures that, if we have a persistent write failure on a dirty
- * buffer, we'll still be able to make progress writing other buffers.
- * (The bgwriter will catch the error and just call us again later.)
+ * Note that we advance the static counter *before* trying to write. This
+ * ensures that, if we have a persistent write failure on a dirty buffer,
+ * we'll still be able to make progress writing other buffers. (The
+ * bgwriter will catch the error and just call us again later.)
*/
if (bgwriter_all_percent > 0.0 && bgwriter_all_maxpages > 0)
{
* If skip_pinned is true, we don't write currently-pinned buffers, nor
* buffers marked recently used, as these are not replacement candidates.
*
- * Returns true if buffer was written, else false. (This could be in error
+ * Returns true if buffer was written, else false. (This could be in error
* if FlushBuffers finds the buffer clean after locking it, but we don't
* care all that much.)
*
/*
* Check whether buffer needs writing.
*
- * We can make this check without taking the buffer content lock
- * so long as we mark pages dirty in access methods *before* logging
- * changes with XLogInsert(): if someone marks the buffer dirty
- * just after our check we don't worry because our checkpoint.redo
- * points before log record for upcoming changes and so we are not
- * required to write such dirty buffer.
+ * We can make this check without taking the buffer content lock so long as
+ * we mark pages dirty in access methods *before* logging changes with
+ * XLogInsert(): if someone marks the buffer dirty just after our check we
+ * don't worry because our checkpoint.redo points before log record for
+ * upcoming changes and so we are not required to write such dirty buffer.
*/
LockBufHdr(bufHdr);
if (!(bufHdr->flags & BM_VALID) || !(bufHdr->flags & BM_DIRTY))
}
/*
- * Pin it, share-lock it, write it. (FlushBuffer will do nothing
- * if the buffer is clean by the time we've locked it.)
+ * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
+ * buffer is clean by the time we've locked it.)
*/
PinBuffer_Locked(bufHdr);
LWLockAcquire(bufHdr->content_lock, LW_SHARED);
localhitrate = (float) LocalBufferHitCount *100.0 / ReadLocalBufferCount;
appendStringInfo(&str,
- "!\tShared blocks: %10ld read, %10ld written, buffer hit rate = %.2f%%\n",
- ReadBufferCount - BufferHitCount, BufferFlushCount, hitrate);
+ "!\tShared blocks: %10ld read, %10ld written, buffer hit rate = %.2f%%\n",
+ ReadBufferCount - BufferHitCount, BufferFlushCount, hitrate);
appendStringInfo(&str,
- "!\tLocal blocks: %10ld read, %10ld written, buffer hit rate = %.2f%%\n",
+ "!\tLocal blocks: %10ld read, %10ld written, buffer hit rate = %.2f%%\n",
ReadLocalBufferCount - LocalBufferHitCount, LocalBufferFlushCount, localhitrate);
appendStringInfo(&str,
"!\tDirect blocks: %10ld read, %10ld written\n",
/*
* Acquire the buffer's io_in_progress lock. If StartBufferIO returns
- * false, then someone else flushed the buffer before we could, so
- * we need not do anything.
+ * false, then someone else flushed the buffer before we could, so we need
+ * not do anything.
*/
if (!StartBufferIO(buf, false))
return;
/*
* Force XLOG flush up to buffer's LSN. This implements the basic WAL
- * rule that log updates must hit disk before any of the data-file
- * changes they describe do.
+ * rule that log updates must hit disk before any of the data-file changes
+ * they describe do.
*/
recptr = BufferGetLSN(buf);
XLogFlush(recptr);
/*
* Now it's safe to write buffer to disk. Note that no one else should
- * have been able to write it while we were busy with log flushing
- * because we have the io_in_progress lock.
+ * have been able to write it while we were busy with log flushing because
+ * we have the io_in_progress lock.
*/
/* To check if block content changes while flushing. - vadim 01/17/97 */
BufferFlushCount++;
/*
- * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set)
- * and end the io_in_progress state.
+ * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
+ * end the io_in_progress state.
*/
TerminateBufferIO(buf, true, 0);
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* Currently, this is called only from smgr.c when the underlying file
* be deleted momentarily anyway, and there is no point in writing it.
* It is the responsibility of higher-level code to ensure that the
* deletion or truncation does not lose any data that could be needed
- * later. It is also the responsibility of higher-level code to ensure
+ * later. It is also the responsibility of higher-level code to ensure
* that no other process could be trying to load more pages of the
* relation into buffers.
*
LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
bufHdr->tag.blockNum >= firstDelBlock)
- InvalidateBuffer(bufHdr); /* releases spinlock */
+ InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr);
}
bufHdr = &BufferDescriptors[i];
LockBufHdr(bufHdr);
if (bufHdr->tag.rnode.dbNode == dbid)
- InvalidateBuffer(bufHdr); /* releases spinlock */
+ InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr);
}
LockBufHdr_NoHoldoff(buf);
/*
- * Don't complain if flag bit not set; it could have been
- * reset but we got a cancel/die interrupt before getting the
- * signal.
+ * Don't complain if flag bit not set; it could have been reset but we
+ * got a cancel/die interrupt before getting the signal.
*/
if ((buf->flags & BM_PIN_COUNT_WAITER) != 0 &&
buf->wait_backend_pid == MyProcPid)
LWLockAcquire(buf->content_lock, LW_EXCLUSIVE);
/*
- * This is not the best place to mark buffer dirty (eg indices do
- * not always change buffer they lock in excl mode). But please
- * remember that it's critical to set dirty bit *before* logging
- * changes with XLogInsert() - see comments in SyncOneBuffer().
+ * This is not the best place to mark buffer dirty (eg indices do not
+ * always change buffer they lock in excl mode). But please remember
+ * that it's critical to set dirty bit *before* logging changes with
+ * XLogInsert() - see comments in SyncOneBuffer().
*/
LockBufHdr_NoHoldoff(buf);
buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
if (LWLockConditionalAcquire(buf->content_lock, LW_EXCLUSIVE))
{
/*
- * This is not the best place to mark buffer dirty (eg indices do
- * not always change buffer they lock in excl mode). But please
- * remember that it's critical to set dirty bit *before* logging
- * changes with XLogInsert() - see comments in SyncOneBuffer().
+ * This is not the best place to mark buffer dirty (eg indices do not
+ * always change buffer they lock in excl mode). But please remember
+ * that it's critical to set dirty bit *before* logging changes with
+ * XLogInsert() - see comments in SyncOneBuffer().
*/
LockBufHdr_NoHoldoff(buf);
buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
/*
* Changed to wait until there's no IO - Inoue 01/13/2000
*
- * Note this is *necessary* because an error abort in the process doing
- * I/O could release the io_in_progress_lock prematurely. See
- * AbortBufferIO.
+ * Note this is *necessary* because an error abort in the process doing I/O
+ * could release the io_in_progress_lock prematurely. See AbortBufferIO.
*/
for (;;)
{
BufFlags sv_flags;
/*
- * It may not be necessary to acquire the spinlock to check the
- * flag here, but since this test is essential for correctness,
- * we'd better play it safe.
+ * It may not be necessary to acquire the spinlock to check the flag
+ * here, but since this test is essential for correctness, we'd better
+ * play it safe.
*/
LockBufHdr(buf);
sv_flags = buf->flags;
if (buf)
{
/*
- * Since LWLockReleaseAll has already been called, we're not
- * holding the buffer's io_in_progress_lock. We have to re-acquire
- * it so that we can use TerminateBufferIO. Anyone who's executing
- * WaitIO on the buffer will be in a busy spin until we succeed in
- * doing this.
+ * Since LWLockReleaseAll has already been called, we're not holding
+ * the buffer's io_in_progress_lock. We have to re-acquire it so that
+ * we can use TerminateBufferIO. Anyone who's executing WaitIO on the
+ * buffer will be in a busy spin until we succeed in doing this.
*/
LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.53 2005/10/12 16:45:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.54 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int nextVictimBuffer;
int firstFreeBuffer; /* Head of list of unused buffers */
- int lastFreeBuffer; /* Tail of list of unused buffers */
+ int lastFreeBuffer; /* Tail of list of unused buffers */
/*
- * NOTE: lastFreeBuffer is undefined when firstFreeBuffer is -1
- * (that is, when the list is empty)
+ * NOTE: lastFreeBuffer is undefined when firstFreeBuffer is -1 (that is,
+ * when the list is empty)
*/
} BufferStrategyControl;
buf->freeNext = FREENEXT_NOT_IN_LIST;
/*
- * If the buffer is pinned or has a nonzero usage_count,
- * we cannot use it; discard it and retry. (This can only happen
- * if VACUUM put a valid buffer in the freelist and then someone
- * else used it before we got to it.)
+ * If the buffer is pinned or has a nonzero usage_count, we cannot use
+ * it; discard it and retry. (This can only happen if VACUUM put a
+ * valid buffer in the freelist and then someone else used it before
+ * we got to it.)
*/
LockBufHdr(buf);
if (buf->refcount == 0 && buf->usage_count == 0)
StrategyControl->nextVictimBuffer = 0;
/*
- * If the buffer is pinned or has a nonzero usage_count,
- * we cannot use it; decrement the usage_count and keep scanning.
+ * If the buffer is pinned or has a nonzero usage_count, we cannot use
+ * it; decrement the usage_count and keep scanning.
*/
LockBufHdr(buf);
if (buf->refcount == 0 && buf->usage_count == 0)
else if (--trycounter == 0)
{
/*
- * We've scanned all the buffers without making any state
- * changes, so all the buffers are pinned (or were when we
- * looked at them). We could hope that someone will free
- * one eventually, but it's probably better to fail than to
- * risk getting stuck in an infinite loop.
+ * We've scanned all the buffers without making any state changes,
+ * so all the buffers are pinned (or were when we looked at them).
+ * We could hope that someone will free one eventually, but it's
+ * probably better to fail than to risk getting stuck in an
+ * infinite loop.
*/
UnlockBufHdr(buf);
elog(ERROR, "no unpinned buffers available");
LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
/*
- * It is possible that we are told to put something in the freelist
- * that is already in it; don't screw up the list if so.
+ * It is possible that we are told to put something in the freelist that
+ * is already in it; don't screw up the list if so.
*/
if (buf->freeNext == FREENEXT_NOT_IN_LIST)
{
int result;
/*
- * We could probably dispense with the locking here, but just to be
- * safe ...
+ * We could probably dispense with the locking here, but just to be safe
+ * ...
*/
LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
result = StrategyControl->nextVictimBuffer;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.69 2005/08/20 23:26:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.70 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} LocalBufferLookupEnt;
/* Note: this macro only works on local buffers, not shared ones! */
-#define LocalBufHdrGetBlock(bufHdr) \
+#define LocalBufHdrGetBlock(bufHdr) \
LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
int NLocBuffer = 0; /* until buffers are initialized */
#endif
/*
- * Need to get a new buffer. We use a clock sweep algorithm
- * (essentially the same as what freelist.c does now...)
+ * Need to get a new buffer. We use a clock sweep algorithm (essentially
+ * the same as what freelist.c does now...)
*/
trycounter = NLocBuffer;
for (;;)
}
/*
- * this buffer is not referenced but it might still be dirty. if
- * that's the case, write it out before reusing it!
+ * this buffer is not referenced but it might still be dirty. if that's
+ * the case, write it out before reusing it!
*/
if (bufHdr->flags & BM_DIRTY)
{
hresult = (LocalBufferLookupEnt *)
hash_search(LocalBufHash, (void *) &bufHdr->tag,
HASH_REMOVE, NULL);
- if (!hresult) /* shouldn't happen */
+ if (!hresult) /* shouldn't happen */
elog(ERROR, "local buffer hash table corrupted");
/* mark buffer invalid just in case hash insert fails */
CLEAR_BUFFERTAG(bufHdr->tag);
hresult = (LocalBufferLookupEnt *)
hash_search(LocalBufHash, (void *) &newTag, HASH_ENTER, &found);
- if (found) /* shouldn't happen */
+ if (found) /* shouldn't happen */
elog(ERROR, "local buffer hash table corrupted");
hresult->id = b;
BufferDesc *buf = &LocalBufferDescriptors[i];
/*
- * negative to indicate local buffer. This is tricky: shared
- * buffers start with 0. We have to start with -2. (Note that the
- * routine BufferDescriptorGetBuffer adds 1 to buf_id so our first
- * buffer id is -1.)
+ * negative to indicate local buffer. This is tricky: shared buffers
+ * start with 0. We have to start with -2. (Note that the routine
+ * BufferDescriptorGetBuffer adds 1 to buf_id so our first buffer id
+ * is -1.)
*/
buf->buf_id = -i - 2;
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/buffile.c,v 1.21 2004/12/31 22:00:51 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/buffile.c,v 1.22 2005/10/15 02:49:25 momjian Exp $
*
* NOTES:
*
long *offsets; /* palloc'd array with numFiles entries */
/*
- * offsets[i] is the current seek position of files[i]. We use this
- * to avoid making redundant FileSeek calls.
+ * offsets[i] is the current seek position of files[i]. We use this to
+ * avoid making redundant FileSeek calls.
*/
bool isTemp; /* can only add files if this is TRUE */
bool dirty; /* does buffer need to be written? */
/*
- * "current pos" is position of start of buffer within the logical
- * file. Position as seen by user of BufFile is (curFile, curOffset +
- * pos).
+ * "current pos" is position of start of buffer within the logical file.
+ * Position as seen by user of BufFile is (curFile, curOffset + pos).
*/
int curFile; /* file index (0..n) part of current pos */
int curOffset; /* offset part of current pos */
file->files = (File *) repalloc(file->files,
(file->numFiles + 1) * sizeof(File));
file->offsets = (long *) repalloc(file->offsets,
- (file->numFiles + 1) * sizeof(long));
+ (file->numFiles + 1) * sizeof(long));
file->files[file->numFiles] = pfile;
file->offsets[file->numFiles] = 0L;
file->numFiles++;
}
/*
- * Enforce per-file size limit only for temp files, else just try
- * to write as much as asked...
+ * Enforce per-file size limit only for temp files, else just try to
+ * write as much as asked...
*/
bytestowrite = file->nbytes - wpos;
if (file->isTemp)
file->dirty = false;
/*
- * At this point, curOffset has been advanced to the end of the
- * buffer, ie, its original value + nbytes. We need to make it point
- * to the logical file position, ie, original value + pos, in case
- * that is less (as could happen due to a small backwards seek in a
- * dirty buffer!)
+ * At this point, curOffset has been advanced to the end of the buffer,
+ * ie, its original value + nbytes. We need to make it point to the
+ * logical file position, ie, original value + pos, in case that is less
+ * (as could happen due to a small backwards seek in a dirty buffer!)
*/
file->curOffset -= (file->nbytes - file->pos);
if (file->curOffset < 0) /* handle possible segment crossing */
}
/*
- * Now we can set the buffer empty without changing the logical
- * position
+ * Now we can set the buffer empty without changing the logical position
*/
file->pos = 0;
file->nbytes = 0;
/*
* Relative seek considers only the signed offset, ignoring
- * fileno. Note that large offsets (> 1 gig) risk overflow in
- * this add...
+ * fileno. Note that large offsets (> 1 gig) risk overflow in this
+ * add...
*/
newFile = file->curFile;
newOffset = (file->curOffset + file->pos) + offset;
/*
* At this point and no sooner, check for seek past last segment. The
- * above flush could have created a new segment, so checking sooner
- * would not work (at least not with this code).
+ * above flush could have created a new segment, so checking sooner would
+ * not work (at least not with this code).
*/
if (file->isTemp)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.120 2005/08/08 03:11:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.121 2005/10/15 02:49:25 momjian Exp $
*
* NOTES:
*
{
signed short fd; /* current FD, or VFD_CLOSED if none */
unsigned short fdstate; /* bitflags for VFD's state */
- SubTransactionId create_subid; /* for TEMPORARY fds, creating subxact */
+ SubTransactionId create_subid; /* for TEMPORARY fds, creating subxact */
File nextFree; /* link to next free VFD, if in freelist */
File lruMoreRecently; /* doubly linked recency-of-use list */
File lruLessRecently;
#ifdef WIN32
return _commit(fd);
#elif defined(__darwin__)
- return (fcntl(fd, F_FULLFSYNC, 0) == -1) ? -1 : 0;
+ return (fcntl(fd, F_FULLFSYNC, 0) == -1) ? -1 : 0;
#else
return -1;
#endif
void
InitFileAccess(void)
{
- Assert(SizeVfdCache == 0); /* call me only once */
+ Assert(SizeVfdCache == 0); /* call me only once */
/* initialize cache header entry */
VfdCache = (Vfd *) malloc(sizeof(Vfd));
* We stop counting if usable_fds reaches max_to_probe. Note: a small
* value of max_to_probe might result in an underestimate of already_open;
* we must fill in any "gaps" in the set of used FDs before the calculation
- * of already_open will give the right answer. In practice, max_to_probe
+ * of already_open will give the right answer. In practice, max_to_probe
* of a couple of dozen should be enough to ensure good results.
*
* We assume stdin (FD 0) is available for dup'ing
pfree(fd);
/*
- * Return results. usable_fds is just the number of successful dups.
- * We assume that the system limit is highestfd+1 (remember 0 is a
- * legal FD number) and so already_open is highestfd+1 - usable_fds.
+ * Return results. usable_fds is just the number of successful dups. We
+ * assume that the system limit is highestfd+1 (remember 0 is a legal FD
+ * number) and so already_open is highestfd+1 - usable_fds.
*/
*usable_fds = used;
*already_open = highestfd + 1 - used;
ereport(LOG,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
- errmsg("out of file descriptors: %m; release and retry")));
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto tryAgain;
}
/*
- * The open could still fail for lack of file descriptors, eg due
- * to overall system file table being full. So, be prepared to
- * release another FD if necessary...
+ * The open could still fail for lack of file descriptors, eg due to
+ * overall system file table being full. So, be prepared to release
+ * another FD if necessary...
*/
vfdP->fd = BasicOpenFile(vfdP->fileName, vfdP->fileFlags,
vfdP->fileMode);
if (nfile > 0)
{
/*
- * There are opened files and so there should be at least one used
- * vfd in the ring.
+ * There are opened files and so there should be at least one used vfd
+ * in the ring.
*/
Assert(VfdCache[0].lruMoreRecently != 0);
LruDelete(VfdCache[0].lruMoreRecently);
DO_DB(elog(LOG, "AllocateVfd. Size %d", SizeVfdCache));
- Assert(SizeVfdCache > 0); /* InitFileAccess not called? */
+ Assert(SizeVfdCache > 0); /* InitFileAccess not called? */
if (VfdCache[0].nextFree == 0)
{
/*
- * The free list is empty so it is time to increase the size of
- * the array. We choose to double it each time this happens.
- * However, there's not much point in starting *real* small.
+ * The free list is empty so it is time to increase the size of the
+ * array. We choose to double it each time this happens. However,
+ * there's not much point in starting *real* small.
*/
Size newCacheSize = SizeVfdCache * 2;
Vfd *newVfdCache;
file, VfdCache[file].fileName));
/*
- * Is the file open? If not, open it and put it at the head of the
- * LRU ring (possibly closing the least recently used file to get an
- * FD).
+ * Is the file open? If not, open it and put it at the head of the LRU
+ * ring (possibly closing the least recently used file to get an FD).
*/
if (FileIsNotOpen(file))
else if (VfdCache[0].lruLessRecently != file)
{
/*
- * We now know that the file is open and that it is not the last
- * one accessed, so we need to move it to the head of the Lru
- * ring.
+ * We now know that the file is open and that it is not the last one
+ * accessed, so we need to move it to the head of the Lru ring.
*/
Delete(file);
MyProcPid, tempFileCounter++);
/*
- * Open the file. Note: we don't use O_EXCL, in case there is an
- * orphaned temp file that can be reused.
+ * Open the file. Note: we don't use O_EXCL, in case there is an orphaned
+ * temp file that can be reused.
*/
file = FileNameOpenFile(tempfilepath,
O_RDWR | O_CREAT | O_TRUNC | PG_BINARY,
char *dirpath;
/*
- * We might need to create the pg_tempfiles subdirectory, if no
- * one has yet done so.
+ * We might need to create the pg_tempfiles subdirectory, if no one
+ * has yet done so.
*
- * Don't check for error from mkdir; it could fail if someone else
- * just did the same thing. If it doesn't work then we'll bomb
- * out on the second create attempt, instead.
+ * Don't check for error from mkdir; it could fail if someone else just
+ * did the same thing. If it doesn't work then we'll bomb out on the
+ * second create attempt, instead.
*/
dirpath = make_database_relative(PG_TEMP_FILES_DIR);
mkdir(dirpath, S_IRWXU);
/*
* The test against MAX_ALLOCATED_DESCS prevents us from overflowing
- * allocatedFiles[]; the test against max_safe_fds prevents
- * AllocateFile from hogging every one of the available FDs, which'd
- * lead to infinite looping.
+ * allocatedFiles[]; the test against max_safe_fds prevents AllocateFile
+ * from hogging every one of the available FDs, which'd lead to infinite
+ * looping.
*/
if (numAllocatedDescs >= MAX_ALLOCATED_DESCS ||
numAllocatedDescs >= max_safe_fds - 1)
ereport(LOG,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
- errmsg("out of file descriptors: %m; release and retry")));
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto TryAgain;
/*
* The test against MAX_ALLOCATED_DESCS prevents us from overflowing
- * allocatedDescs[]; the test against max_safe_fds prevents
- * AllocateDir from hogging every one of the available FDs, which'd
- * lead to infinite looping.
+ * allocatedDescs[]; the test against max_safe_fds prevents AllocateDir
+ * from hogging every one of the available FDs, which'd lead to infinite
+ * looping.
*/
if (numAllocatedDescs >= MAX_ALLOCATED_DESCS ||
numAllocatedDescs >= max_safe_fds - 1)
ereport(LOG,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
- errmsg("out of file descriptors: %m; release and retry")));
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto TryAgain;
* Read a directory opened with AllocateDir, ereport'ing any error.
*
* This is easier to use than raw readdir() since it takes care of some
- * otherwise rather tedious and error-prone manipulation of errno. Also,
+ * otherwise rather tedious and error-prone manipulation of errno. Also,
* if you are happy with a generic error message for AllocateDir failure,
* you can just do
*
return dent;
#ifdef WIN32
+
/*
- * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- * not in released version
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but not in
+ * released version
*/
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
if ((fdstate & FD_TEMPORARY) && VfdCache[i].fileName != NULL)
{
/*
- * If we're in the process of exiting a backend process,
- * close all temporary files. Otherwise, only close
- * temporary files local to the current transaction.
+ * If we're in the process of exiting a backend process, close
+ * all temporary files. Otherwise, only close temporary files
+ * local to the current transaction.
*/
if (isProcExit || (fdstate & FD_XACT_TEMPORARY))
FileClose(i);
FreeDir(db_dir);
/*
- * In EXEC_BACKEND case there is a pgsql_tmp directory at the top
- * level of DataDir as well.
+ * In EXEC_BACKEND case there is a pgsql_tmp directory at the top level of
+ * DataDir as well.
*/
#ifdef EXEC_BACKEND
RemovePgTempFilesInDir(PG_TEMP_FILES_DIR);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.48 2005/08/20 23:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.49 2005/10/15 02:49:25 momjian Exp $
*
*
* NOTES:
static void CheckFreeSpaceMapStatistics(int elevel, int numRels,
- double needed);
+ double needed);
static FSMRelation *lookup_fsm_rel(RelFileNode *rel);
static FSMRelation *create_fsm_rel(RelFileNode *rel);
static void delete_fsm_rel(FSMRelation *fsmrel);
if (!FreeSpaceMapRelHash)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("insufficient shared memory for free space map")));
+ errmsg("insufficient shared memory for free space map")));
if (found)
return;
if (nchunks <= MaxFSMRelations)
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("max_fsm_pages must exceed max_fsm_relations * %d",
- CHUNKPAGES)));
+ errmsg("max_fsm_pages must exceed max_fsm_relations * %d",
+ CHUNKPAGES)));
FreeSpaceMap->arena = (char *) ShmemAlloc((Size) nchunks * CHUNKBYTES);
if (FreeSpaceMap->arena == NULL)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("insufficient shared memory for free space map")));
+ errmsg("insufficient shared memory for free space map")));
FreeSpaceMap->totalChunks = nchunks;
FreeSpaceMap->usedChunks = 0;
fsmrel = create_fsm_rel(rel);
/*
- * Update the moving average of space requests. This code implements
- * an exponential moving average with an equivalent period of about 63
- * requests. Ignore silly requests, however, to ensure that the
- * average stays sane.
+ * Update the moving average of space requests. This code implements an
+ * exponential moving average with an equivalent period of about 63
+ * requests. Ignore silly requests, however, to ensure that the average
+ * stays sane.
*/
if (spaceNeeded > 0 && spaceNeeded < BLCKSZ)
{
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
/*
- * Note we don't record info about a relation unless there's already
- * an FSM entry for it, implying someone has done GetPageWithFreeSpace
- * for it. Inactive rels thus will not clutter the map simply by
- * being vacuumed.
+ * Note we don't record info about a relation unless there's already an
+ * FSM entry for it, implying someone has done GetPageWithFreeSpace for
+ * it. Inactive rels thus will not clutter the map simply by being
+ * vacuumed.
*/
fsmrel = lookup_fsm_rel(rel);
if (fsmrel)
curAllocPages = curAlloc * CHUNKPAGES;
/*
- * If the data fits in our current allocation, just copy it;
- * otherwise must compress.
+ * If the data fits in our current allocation, just copy it; otherwise
+ * must compress.
*/
newLocation = (FSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
/*
- * Note we don't record info about a relation unless there's already
- * an FSM entry for it, implying someone has done GetFreeIndexPage for
- * it. Inactive rels thus will not clutter the map simply by being
- * vacuumed.
+ * Note we don't record info about a relation unless there's already an
+ * FSM entry for it, implying someone has done GetFreeIndexPage for it.
+ * Inactive rels thus will not clutter the map simply by being vacuumed.
*/
fsmrel = lookup_fsm_rel(rel);
if (fsmrel)
curAllocPages = curAlloc * INDEXCHUNKPAGES;
/*
- * If the data fits in our current allocation, just copy it;
- * otherwise must compress. But compression is easy: we merely
- * forget extra pages.
+ * If the data fits in our current allocation, just copy it; otherwise
+ * must compress. But compression is easy: we merely forget extra
+ * pages.
*/
newLocation = (IndexFSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
ereport(elevel,
(errmsg("free space map contains %d pages in %d relations",
storedPages, numRels),
- errdetail("A total of %.0f page slots are in use (including overhead).\n"
- "%.0f page slots are required to track all free space.\n"
- "Current limits are: %d page slots, %d relations, using %.0f KB.",
- Min(needed, MaxFSMPages),
- needed, MaxFSMPages, MaxFSMRelations,
- (double) FreeSpaceShmemSize() / 1024.0)));
+ errdetail("A total of %.0f page slots are in use (including overhead).\n"
+ "%.0f page slots are required to track all free space.\n"
+ "Current limits are: %d page slots, %d relations, using %.0f KB.",
+ Min(needed, MaxFSMPages),
+ needed, MaxFSMPages, MaxFSMRelations,
+ (double) FreeSpaceShmemSize() / 1024.0)));
CheckFreeSpaceMapStatistics(NOTICE, numRels, needed);
/* Print to server logs too because is deals with a config variable. */
CheckFreeSpaceMapStatistics(LOG, numRels, needed);
}
-
+
static void
CheckFreeSpaceMapStatistics(int elevel, int numRels, double needed)
{
- if (numRels == MaxFSMRelations)
+ if (numRels == MaxFSMRelations)
ereport(elevel,
- (errmsg("max_fsm_relations(%d) equals the number of relations checked",
- MaxFSMRelations),
- errhint("You have >= %d relations.\n"
- "Consider increasing the configuration parameter \"max_fsm_relations\".",
- numRels)));
+ (errmsg("max_fsm_relations(%d) equals the number of relations checked",
+ MaxFSMRelations),
+ errhint("You have >= %d relations.\n"
+ "Consider increasing the configuration parameter \"max_fsm_relations\".",
+ numRels)));
else if (needed > MaxFSMPages)
ereport(elevel,
- (errmsg("the number of page slots needed (%.0f) exceeds max_fsm_pages (%d)",
- needed, MaxFSMPages),
- errhint("Consider increasing the configuration parameter \"max_fsm_pages\"\n"
- "to a value over %.0f.", needed)));
+ (errmsg("the number of page slots needed (%.0f) exceeds max_fsm_pages (%d)",
+ needed, MaxFSMPages),
+ errhint("Consider increasing the configuration parameter \"max_fsm_pages\"\n"
+ "to a value over %.0f.", needed)));
}
/*
FSMRelation *fsmrel;
/* Try to create file */
- unlink(FSM_CACHE_FILENAME); /* in case it exists w/wrong permissions */
+ unlink(FSM_CACHE_FILENAME); /* in case it exists w/wrong permissions */
fp = AllocateFile(FSM_CACHE_FILENAME, PG_BINARY_W);
if (fp == NULL)
}
/*
- * Okay, create the FSM entry and insert data into it. Since the
- * rels were stored in reverse usage order, at the end of the loop
- * they will be correctly usage-ordered in memory; and if
- * MaxFSMRelations is less than it used to be, we will correctly
- * drop the least recently used ones.
+ * Okay, create the FSM entry and insert data into it. Since the rels
+ * were stored in reverse usage order, at the end of the loop they
+ * will be correctly usage-ordered in memory; and if MaxFSMRelations
+ * is less than it used to be, we will correctly drop the least
+ * recently used ones.
*/
fsmrel = create_fsm_rel(&relheader.key);
fsmrel->avgRequest = relheader.avgRequest;
/*
* If the data fits in our current allocation, just copy it;
- * otherwise must compress. But compression is easy: we
- * merely forget extra pages.
+ * otherwise must compress. But compression is easy: we merely
+ * forget extra pages.
*/
newLocation = (IndexFSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
myAlloc = fsm_calc_target_allocation(myRequest);
/*
- * Need to reallocate space if (a) my target allocation is more than
- * my current allocation, AND (b) my actual immediate need
- * (myRequest+1 chunks) is more than my current allocation. Otherwise
- * just store the new data in-place.
+ * Need to reallocate space if (a) my target allocation is more than my
+ * current allocation, AND (b) my actual immediate need (myRequest+1
+ * chunks) is more than my current allocation. Otherwise just store the
+ * new data in-place.
*/
curAlloc = fsm_current_allocation(fsmrel);
if (myAlloc > curAlloc && (myRequest + 1) > curAlloc && nPages > 0)
if (spaceAvail >= spaceNeeded)
{
/*
- * Found what we want --- adjust the entry, and update
- * nextPage.
+ * Found what we want --- adjust the entry, and update nextPage.
*/
FSMPageSetSpace(page, spaceAvail - spaceNeeded);
fsmrel->nextPage = pageIndex + 1;
BlockNumber result;
/*
- * If isIndex isn't set, it could be that RecordIndexFreeSpace() has
- * never yet been called on this relation, and we're still looking at
- * the default setting from create_fsm_rel(). If so, just act as
- * though there's no space.
+ * If isIndex isn't set, it could be that RecordIndexFreeSpace() has never
+ * yet been called on this relation, and we're still looking at the
+ * default setting from create_fsm_rel(). If so, just act as though
+ * there's no space.
*/
if (!fsmrel->isIndex)
{
}
/*
- * For indexes, there's no need for the nextPage state variable; we
- * just remove and return the first available page. (We could save
- * cycles here by returning the last page, but it seems better to
- * encourage re-use of lower-numbered pages.)
+ * For indexes, there's no need for the nextPage state variable; we just
+ * remove and return the first available page. (We could save cycles here
+ * by returning the last page, but it seems better to encourage re-use of
+ * lower-numbered pages.)
*/
if (fsmrel->storedPages <= 0)
return InvalidBlockNumber; /* no pages available */
else
{
/*
- * No existing entry; ignore the call. We used to add the page to
- * the FSM --- but in practice, if the page hasn't got enough
- * space to satisfy the caller who's kicking it back to us, then
- * it's probably uninteresting to everyone else as well.
+ * No existing entry; ignore the call. We used to add the page to the
+ * FSM --- but in practice, if the page hasn't got enough space to
+ * satisfy the caller who's kicking it back to us, then it's probably
+ * uninteresting to everyone else as well.
*/
}
}
/*
* It's possible that we have to move data down, not up, if the
- * allocations of previous rels expanded. This normally means
- * that our allocation expanded too (or at least got no worse),
- * and ditto for later rels. So there should be room to move all
- * our data down without dropping any --- but we might have to
- * push down following rels to acquire the room. We don't want to
- * do the push more than once, so pack everything against the end
- * of the arena if so.
+ * allocations of previous rels expanded. This normally means that
+ * our allocation expanded too (or at least got no worse), and ditto
+ * for later rels. So there should be room to move all our data down
+ * without dropping any --- but we might have to push down following
+ * rels to acquire the room. We don't want to do the push more than
+ * once, so pack everything against the end of the arena if so.
*
* In corner cases where we are on the short end of a roundoff choice
* that we were formerly on the long end of, it's possible that we
- * have to move down and compress our data too. In fact, even
- * after pushing down the following rels, there might not be as
- * much space as we computed for this rel above --- that would
- * imply that some following rel(s) are also on the losing end of
- * roundoff choices. We could handle this fairly by doing the
- * per-rel compactions out-of-order, but that seems like way too
- * much complexity to deal with a very infrequent corner case.
- * Instead, we simply drop pages from the end of the current rel's
- * data until it fits.
+ * have to move down and compress our data too. In fact, even after
+ * pushing down the following rels, there might not be as much space
+ * as we computed for this rel above --- that would imply that some
+ * following rel(s) are also on the losing end of roundoff choices. We
+ * could handle this fairly by doing the per-rel compactions
+ * out-of-order, but that seems like way too much complexity to deal
+ * with a very infrequent corner case. Instead, we simply drop pages
+ * from the end of the current rel's data until it fits.
*/
if (newChunkIndex > oldChunkIndex)
{
newAlloc = limitChunkIndex - newChunkIndex;
/*
- * If newAlloc < 0 at this point, we are moving the
- * rel's firstChunk into territory currently assigned
- * to a later rel. This is okay so long as we do not
- * copy any data. The rels will be back in
- * nondecreasing firstChunk order at completion of the
- * compaction pass.
+ * If newAlloc < 0 at this point, we are moving the rel's
+ * firstChunk into territory currently assigned to a later
+ * rel. This is okay so long as we do not copy any data.
+ * The rels will be back in nondecreasing firstChunk order
+ * at completion of the compaction pass.
*/
if (newAlloc < 0)
newAlloc = 0;
else if (newAllocPages < fsmrel->storedPages)
{
/*
- * Need to compress the page data. For an index,
- * "compression" just means dropping excess pages; otherwise
- * we try to keep the ones with the most space.
+ * Need to compress the page data. For an index, "compression"
+ * just means dropping excess pages; otherwise we try to keep the
+ * ones with the most space.
*/
if (fsmrel->isIndex)
{
relNum++;
fprintf(stderr, "Map %d: rel %u/%u/%u isIndex %d avgRequest %u lastPageCount %d nextPage %d\nMap= ",
relNum,
- fsmrel->key.spcNode, fsmrel->key.dbNode, fsmrel->key.relNode,
+ fsmrel->key.spcNode, fsmrel->key.dbNode, fsmrel->key.relNode,
(int) fsmrel->isIndex, fsmrel->avgRequest,
fsmrel->lastPageCount, fsmrel->nextPage);
if (fsmrel->isIndex)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.90 2004/12/31 22:00:56 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.91 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
proc_exit(int code)
{
/*
- * Once we set this flag, we are committed to exit. Any ereport()
- * will NOT send control back to the main loop, but right back here.
+ * Once we set this flag, we are committed to exit. Any ereport() will
+ * NOT send control back to the main loop, but right back here.
*/
proc_exit_inprogress = true;
/*
* call all the callbacks registered before calling exit().
*
- * Note that since we decrement on_proc_exit_index each time, if a
- * callback calls ereport(ERROR) or ereport(FATAL) then it won't be
- * invoked again when control comes back here (nor will the
- * previously-completed callbacks). So, an infinite loop should not
- * be possible.
+ * Note that since we decrement on_proc_exit_index each time, if a callback
+ * calls ereport(ERROR) or ereport(FATAL) then it won't be invoked again
+ * when control comes back here (nor will the previously-completed
+ * callbacks). So, an infinite loop should not be possible.
*/
while (--on_proc_exit_index >= 0)
(*on_proc_exit_list[on_proc_exit_index].function) (code,
- on_proc_exit_list[on_proc_exit_index].arg);
+ on_proc_exit_list[on_proc_exit_index].arg);
elog(DEBUG3, "exit(%d)", code);
exit(code);
/*
* call all the registered callbacks.
*
- * As with proc_exit(), we remove each callback from the list before
- * calling it, to avoid infinite loop in case of error.
+ * As with proc_exit(), we remove each callback from the list before calling
+ * it, to avoid infinite loop in case of error.
*/
while (--on_shmem_exit_index >= 0)
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
- on_shmem_exit_list[on_shmem_exit_index].arg);
+ on_shmem_exit_list[on_shmem_exit_index].arg);
on_shmem_exit_index = 0;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.78 2005/08/20 23:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.79 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Size of the Postgres shared-memory block is estimated via
- * moderately-accurate estimates for the big hogs, plus 100K for
- * the stuff that's too small to bother with estimating.
+ * moderately-accurate estimates for the big hogs, plus 100K for the
+ * stuff that's too small to bother with estimating.
*
- * We take some care during this phase to ensure that the total
- * size request doesn't overflow size_t. If this gets through,
- * we don't need to be so careful during the actual allocation
- * phase.
+ * We take some care during this phase to ensure that the total size
+ * request doesn't overflow size_t. If this gets through, we don't
+ * need to be so careful during the actual allocation phase.
*/
size = 100000;
size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
else
{
/*
- * We are reattaching to an existing shared memory segment.
- * This should only be reached in the EXEC_BACKEND case, and
- * even then only with makePrivate == false.
+ * We are reattaching to an existing shared memory segment. This
+ * should only be reached in the EXEC_BACKEND case, and even then only
+ * with makePrivate == false.
*/
#ifdef EXEC_BACKEND
Assert(!makePrivate);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.19 2005/08/20 23:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.20 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/*
* Use kill() to see if the postmaster is still alive. This can
- * sometimes give a false positive result, since the postmaster's
- * PID may get recycled, but it is good enough for existing uses
- * by indirect children.
+ * sometimes give a false positive result, since the postmaster's PID
+ * may get recycled, but it is good enough for existing uses by
+ * indirect children.
*/
return (kill(PostmasterPid, 0) == 0);
}
* prepared transactions. The xid and subxids fields of these are valid,
* as is the procLocks list. They can be distinguished from regular backend
* PGPROCs at need by checking for pid == 0.
- *
+ *
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.6 2005/08/20 23:26:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.7 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int maxProcs; /* allocated size of procs array */
/*
- * We declare procs[] as 1 entry because C wants a fixed-size array,
- * but actually it is maxProcs entries long.
+ * We declare procs[] as 1 entry because C wants a fixed-size array, but
+ * actually it is maxProcs entries long.
*/
PGPROC *procs[1]; /* VARIABLE LENGTH ARRAY */
} ProcArrayStruct;
#define xc_slow_answer_inc() (xc_slow_answer++)
static void DisplayXidCache(void);
-
#else /* !XIDCACHE_DEBUG */
#define xc_by_recent_xmin_inc() ((void) 0)
#define xc_by_main_xid_inc() ((void) 0)
#define xc_by_child_xid_inc() ((void) 0)
#define xc_slow_answer_inc() ((void) 0)
-
#endif /* XIDCACHE_DEBUG */
size = offsetof(ProcArrayStruct, procs);
size = add_size(size, mul_size(sizeof(PGPROC *),
- add_size(MaxBackends, max_prepared_xacts)));
+ add_size(MaxBackends, max_prepared_xacts)));
return size;
}
if (arrayP->numProcs >= arrayP->maxProcs)
{
/*
- * Ooops, no room. (This really shouldn't happen, since there is
- * a fixed supply of PGPROC structs too, and so we should have
- * failed earlier.)
+ * Ooops, no room. (This really shouldn't happen, since there is a
+ * fixed supply of PGPROC structs too, and so we should have failed
+ * earlier.)
*/
LWLockRelease(ProcArrayLock);
ereport(FATAL,
bool locked;
/*
- * Don't bother checking a transaction older than RecentXmin; it
- * could not possibly still be running.
+ * Don't bother checking a transaction older than RecentXmin; it could not
+ * possibly still be running.
*/
if (TransactionIdPrecedes(xid, RecentXmin))
{
}
/*
- * We can ignore main Xids that are younger than the target
- * Xid, since the target could not possibly be their child.
+ * We can ignore main Xids that are younger than the target Xid, since
+ * the target could not possibly be their child.
*/
if (TransactionIdPrecedes(xid, pxid))
continue;
}
/*
- * Save the main Xid for step 3. We only need to remember
- * main Xids that have uncached children. (Note: there is no
- * race condition here because the overflowed flag cannot be
- * cleared, only set, while we hold ProcArrayLock. So we can't
- * miss an Xid that we need to worry about.)
+ * Save the main Xid for step 3. We only need to remember main Xids
+ * that have uncached children. (Note: there is no race condition
+ * here because the overflowed flag cannot be cleared, only set, while
+ * we hold ProcArrayLock. So we can't miss an Xid that we need to
+ * worry about.)
*/
if (proc->subxids.overflowed)
xids[nxids++] = pxid;
/*
* Step 3: have to check pg_subtrans.
*
- * At this point, we know it's either a subtransaction of one of the Xids
- * in xids[], or it's not running. If it's an already-failed
- * subtransaction, we want to say "not running" even though its parent
- * may still be running. So first, check pg_clog to see if it's been
- * aborted.
+ * At this point, we know it's either a subtransaction of one of the Xids in
+ * xids[], or it's not running. If it's an already-failed subtransaction,
+ * we want to say "not running" even though its parent may still be
+ * running. So first, check pg_clog to see if it's been aborted.
*/
xc_slow_answer_inc();
goto result_known;
/*
- * It isn't aborted, so check whether the transaction tree it belongs
- * to is still running (or, more precisely, whether it was running
- * when this routine started -- note that we already released
- * ProcArrayLock).
+ * It isn't aborted, so check whether the transaction tree it belongs to
+ * is still running (or, more precisely, whether it was running when this
+ * routine started -- note that we already released ProcArrayLock).
*/
topxid = SubTransGetTopmostTransaction(xid);
Assert(TransactionIdIsValid(topxid));
int i;
/*
- * Don't bother checking a transaction older than RecentXmin; it
- * could not possibly still be running.
+ * Don't bother checking a transaction older than RecentXmin; it could not
+ * possibly still be running.
*/
if (TransactionIdPrecedes(xid, RecentXmin))
return false;
/*
* Normally we start the min() calculation with our own XID. But if
* called by checkpointer, we will not be inside a transaction, so use
- * next XID as starting point for min() calculation. (Note that if
- * there are no xacts running at all, that will be the subtrans
- * truncation point!)
+ * next XID as starting point for min() calculation. (Note that if there
+ * are no xacts running at all, that will be the subtrans truncation
+ * point!)
*/
if (IsTransactionState())
result = GetTopTransactionId();
* This ensures that the set of transactions seen as "running" by the
* current xact will not change after it takes the snapshot.
*
- * Note that only top-level XIDs are included in the snapshot. We can
+ * Note that only top-level XIDs are included in the snapshot. We can
* still apply the xmin and xmax limits to subtransaction XIDs, but we
* need to work a bit harder to see if XIDs in [xmin..xmax) are running.
*
* RecentXmin: the xmin computed for the most recent snapshot. XIDs
* older than this are known not running any more.
* RecentGlobalXmin: the global xmin (oldest TransactionXmin across all
- * running transactions). This is the same computation done by
+ * running transactions). This is the same computation done by
* GetOldestXmin(TRUE).
*----------
*/
TransactionIdIsValid(MyProc->xmin));
/*
- * Allocating space for maxProcs xids is usually overkill;
- * numProcs would be sufficient. But it seems better to do the
- * malloc while not holding the lock, so we can't look at numProcs.
+ * Allocating space for maxProcs xids is usually overkill; numProcs would
+ * be sufficient. But it seems better to do the malloc while not holding
+ * the lock, so we can't look at numProcs.
*
* This does open a possibility for avoiding repeated malloc/free: since
- * maxProcs does not change at runtime, we can simply reuse the
- * previous xip array if any. (This relies on the fact that all
- * callers pass static SnapshotData structs.)
+ * maxProcs does not change at runtime, we can simply reuse the previous
+ * xip array if any. (This relies on the fact that all callers pass
+ * static SnapshotData structs.)
*/
if (snapshot->xip == NULL)
{
TransactionId xid = proc->xid;
/*
- * Ignore my own proc (dealt with my xid above), procs not
- * running a transaction, and xacts started since we read the
- * next transaction ID. There's no need to store XIDs above
- * what we got from ReadNewTransactionId, since we'll treat
- * them as running anyway. We also assume that such xacts
- * can't compute an xmin older than ours, so they needn't be
- * considered in computing globalxmin.
+ * Ignore my own proc (dealt with my xid above), procs not running a
+ * transaction, and xacts started since we read the next transaction
+ * ID. There's no need to store XIDs above what we got from
+ * ReadNewTransactionId, since we'll treat them as running anyway. We
+ * also assume that such xacts can't compute an xmin older than ours,
+ * so they needn't be considered in computing globalxmin.
*/
if (proc == MyProc ||
!TransactionIdIsNormal(xid) ||
LWLockRelease(ProcArrayLock);
/*
- * Update globalxmin to include actual process xids. This is a
- * slightly different way of computing it than GetOldestXmin uses, but
- * should give the same result.
+ * Update globalxmin to include actual process xids. This is a slightly
+ * different way of computing it than GetOldestXmin uses, but should give
+ * the same result.
*/
if (TransactionIdPrecedes(xmin, globalxmin))
globalxmin = xmin;
* Returns 0 if not found or it's a prepared transaction. Note that
* it is up to the caller to be sure that the question remains
* meaningful for long enough for the answer to be used ...
- *
+ *
* Only main transaction Ids are considered. This function is mainly
* useful for determining what backend owns a lock.
*/
int
BackendXidGetPid(TransactionId xid)
{
- int result = 0;
+ int result = 0;
ProcArrayStruct *arrayP = procArray;
int index;
/*
* Note: for speed, we don't acquire ProcArrayLock. This is a little bit
- * bogus, but since we are only testing fields for zero or nonzero,
- * it should be OK. The result is only used for heuristic purposes
- * anyway...
+ * bogus, but since we are only testing fields for zero or nonzero, it
+ * should be OK. The result is only used for heuristic purposes anyway...
*/
for (index = 0; index < arrayP->numProcs; index++)
{
/*
* We must hold ProcArrayLock exclusively in order to remove transactions
- * from the PGPROC array. (See notes in GetSnapshotData.) It's
- * possible this could be relaxed since we know this routine is only
- * used to abort subtransactions, but pending closer analysis we'd
- * best be conservative.
+ * from the PGPROC array. (See notes in GetSnapshotData.) It's possible
+ * this could be relaxed since we know this routine is only used to abort
+ * subtransactions, but pending closer analysis we'd best be conservative.
*/
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
/*
- * Under normal circumstances xid and xids[] will be in increasing
- * order, as will be the entries in subxids. Scan backwards to avoid
- * O(N^2) behavior when removing a lot of xids.
+ * Under normal circumstances xid and xids[] will be in increasing order,
+ * as will be the entries in subxids. Scan backwards to avoid O(N^2)
+ * behavior when removing a lot of xids.
*/
for (i = nxids - 1; i >= 0; i--)
{
break;
}
}
+
/*
- * Ordinarily we should have found it, unless the cache has overflowed.
- * However it's also possible for this routine to be invoked multiple
- * times for the same subtransaction, in case of an error during
- * AbortSubTransaction. So instead of Assert, emit a debug warning.
+ * Ordinarily we should have found it, unless the cache has
+ * overflowed. However it's also possible for this routine to be
+ * invoked multiple times for the same subtransaction, in case of an
+ * error during AbortSubTransaction. So instead of Assert, emit a
+ * debug warning.
*/
if (j < 0 && !MyProc->subxids.overflowed)
elog(WARNING, "did not find subXID %u in MyProc", anxid);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.86 2005/10/07 21:42:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.87 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static SHMEM_OFFSET ShmemEnd; /* end+1 address of shared memory */
-slock_t *ShmemLock; /* spinlock for shared memory and LWLock allocation */
+slock_t *ShmemLock; /* spinlock for shared memory and LWLock
+ * allocation */
NON_EXEC_STATIC slock_t *ShmemIndexLock; /* spinlock for ShmemIndex */
-NON_EXEC_STATIC void *ShmemIndexAlloc = NULL; /* Memory actually
- * allocated for
- * ShmemIndex */
+NON_EXEC_STATIC void *ShmemIndexAlloc = NULL; /* Memory actually allocated
+ * for ShmemIndex */
static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
bool found;
/*
- * Since ShmemInitHash calls ShmemInitStruct, which expects the
- * ShmemIndex hashtable to exist already, we have a bit of a
- * circularity problem in initializing the ShmemIndex itself. The
- * special "ShmemIndex" hash table name will tell ShmemInitStruct
- * to fake it.
+ * Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
+ * hashtable to exist already, we have a bit of a circularity problem in
+ * initializing the ShmemIndex itself. The special "ShmemIndex" hash
+ * table name will tell ShmemInitStruct to fake it.
*/
/* create the shared memory shmem index */
void *location;
/*
- * Hash tables allocated in shared memory have a fixed directory; it
- * can't grow or other backends wouldn't be able to find it. So, make
- * sure we make it big enough to start with.
+ * Hash tables allocated in shared memory have a fixed directory; it can't
+ * grow or other backends wouldn't be able to find it. So, make sure we
+ * make it big enough to start with.
*
* The shared memory allocator must be specified too.
*/
/* look it up in the shmem index */
location = ShmemInitStruct(name,
- sizeof(HASHHDR) + infoP->dsize * sizeof(HASHSEGMENT),
+ sizeof(HASHHDR) + infoP->dsize * sizeof(HASHSEGMENT),
&found);
/*
- * shmem index is corrupted. Let someone else give the error
- * message since they have more information
+ * shmem index is corrupted. Let someone else give the error message
+ * since they have more information
*/
if (location == NULL)
return NULL;
/*
- * if it already exists, attach to it rather than allocate and
- * initialize new space
+ * if it already exists, attach to it rather than allocate and initialize
+ * new space
*/
if (found)
hash_flags |= HASH_ATTACH;
else
{
/*
- * If the shmem index doesn't exist, we are bootstrapping: we
- * must be trying to init the shmem index itself.
+ * If the shmem index doesn't exist, we are bootstrapping: we must
+ * be trying to init the shmem index itself.
*
- * Notice that the ShmemIndexLock is held until the shmem index
- * has been completely initialized.
+ * Notice that the ShmemIndexLock is held until the shmem index has
+ * been completely initialized.
*/
*foundPtr = FALSE;
ShmemIndexAlloc = ShmemAlloc(size);
if (*foundPtr)
{
/*
- * Structure is in the shmem index so someone else has allocated
- * it already. The size better be the same as the size we are
- * trying to initialize to or there is a name conflict (or worse).
+ * Structure is in the shmem index so someone else has allocated it
+ * already. The size better be the same as the size we are trying to
+ * initialize to or there is a name conflict (or worse).
*/
if (result->size != size)
{
ereport(WARNING,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("could not allocate shared memory segment \"%s\"", name)));
+ errmsg("could not allocate shared memory segment \"%s\"", name)));
*foundPtr = FALSE;
return NULL;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.77 2005/08/20 23:26:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.78 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
void
ReceiveSharedInvalidMessages(
- void (*invalFunction) (SharedInvalidationMessage *msg),
+ void (*invalFunction) (SharedInvalidationMessage *msg),
void (*resetFunction) (void))
{
SharedInvalidationMessage data;
for (;;)
{
/*
- * We can discard any pending catchup event, since we will not
- * exit this loop until we're fully caught up.
+ * We can discard any pending catchup event, since we will not exit
+ * this loop until we're fully caught up.
*/
catchupInterruptOccurred = 0;
/*
- * We can run SIGetDataEntry in parallel with other backends
- * running SIGetDataEntry for themselves, since each instance will
- * modify only fields of its own backend's ProcState, and no
- * instance will look at fields of other backends' ProcStates. We
- * express this by grabbing SInvalLock in shared mode. Note that
- * this is not exactly the normal (read-only) interpretation of a
- * shared lock! Look closely at the interactions before allowing
- * SInvalLock to be grabbed in shared mode for any other reason!
+ * We can run SIGetDataEntry in parallel with other backends running
+ * SIGetDataEntry for themselves, since each instance will modify only
+ * fields of its own backend's ProcState, and no instance will look at
+ * fields of other backends' ProcStates. We express this by grabbing
+ * SInvalLock in shared mode. Note that this is not exactly the
+ * normal (read-only) interpretation of a shared lock! Look closely at
+ * the interactions before allowing SInvalLock to be grabbed in shared
+ * mode for any other reason!
*/
LWLockAcquire(SInvalLock, LW_SHARED);
getResult = SIGetDataEntry(shmInvalBuffer, MyBackendId, &data);
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
/*
- * We may be called while ImmediateInterruptOK is true; turn it
- * off while messing with the catchup state. (We would have to
- * save and restore it anyway, because PGSemaphore operations
- * inside ProcessCatchupEvent() might reset it.)
+ * We may be called while ImmediateInterruptOK is true; turn it off
+ * while messing with the catchup state. (We would have to save and
+ * restore it anyway, because PGSemaphore operations inside
+ * ProcessCatchupEvent() might reset it.)
*/
ImmediateInterruptOK = false;
/*
* I'm not sure whether some flavors of Unix might allow another
- * SIGUSR1 occurrence to recursively interrupt this routine. To
- * cope with the possibility, we do the same sort of dance that
- * EnableCatchupInterrupt must do --- see that routine for
- * comments.
+ * SIGUSR1 occurrence to recursively interrupt this routine. To cope
+ * with the possibility, we do the same sort of dance that
+ * EnableCatchupInterrupt must do --- see that routine for comments.
*/
catchupInterruptEnabled = 0; /* disable any recursive signal */
catchupInterruptOccurred = 1; /* do at least one iteration */
}
/*
- * Restore ImmediateInterruptOK, and check for interrupts if
- * needed.
+ * Restore ImmediateInterruptOK, and check for interrupts if needed.
*/
ImmediateInterruptOK = save_ImmediateInterruptOK;
if (save_ImmediateInterruptOK)
else
{
/*
- * In this path it is NOT SAFE to do much of anything, except
- * this:
+ * In this path it is NOT SAFE to do much of anything, except this:
*/
catchupInterruptOccurred = 1;
}
EnableCatchupInterrupt(void)
{
/*
- * This code is tricky because we are communicating with a signal
- * handler that could interrupt us at any point. If we just checked
- * catchupInterruptOccurred and then set catchupInterruptEnabled, we
- * could fail to respond promptly to a signal that happens in between
- * those two steps. (A very small time window, perhaps, but Murphy's
- * Law says you can hit it...) Instead, we first set the enable flag,
- * then test the occurred flag. If we see an unserviced interrupt has
- * occurred, we re-clear the enable flag before going off to do the
- * service work. (That prevents re-entrant invocation of
- * ProcessCatchupEvent() if another interrupt occurs.) If an interrupt
- * comes in between the setting and clearing of
- * catchupInterruptEnabled, then it will have done the service work
- * and left catchupInterruptOccurred zero, so we have to check again
- * after clearing enable. The whole thing has to be in a loop in case
- * another interrupt occurs while we're servicing the first. Once we
- * get out of the loop, enable is set and we know there is no
+ * This code is tricky because we are communicating with a signal handler
+ * that could interrupt us at any point. If we just checked
+ * catchupInterruptOccurred and then set catchupInterruptEnabled, we could
+ * fail to respond promptly to a signal that happens in between those two
+ * steps. (A very small time window, perhaps, but Murphy's Law says you
+ * can hit it...) Instead, we first set the enable flag, then test the
+ * occurred flag. If we see an unserviced interrupt has occurred, we
+ * re-clear the enable flag before going off to do the service work.
+ * (That prevents re-entrant invocation of ProcessCatchupEvent() if
+ * another interrupt occurs.) If an interrupt comes in between the setting
+ * and clearing of catchupInterruptEnabled, then it will have done the
+ * service work and left catchupInterruptOccurred zero, so we have to
+ * check again after clearing enable. The whole thing has to be in a loop
+ * in case another interrupt occurs while we're servicing the first. Once
+ * we get out of the loop, enable is set and we know there is no
* unserviced interrupt.
*
- * NB: an overenthusiastic optimizing compiler could easily break this
- * code. Hopefully, they all understand what "volatile" means these
- * days.
+ * NB: an overenthusiastic optimizing compiler could easily break this code.
+ * Hopefully, they all understand what "volatile" means these days.
*/
for (;;)
{
notify_enabled = DisableNotifyInterrupt();
/*
- * What we need to do here is cause ReceiveSharedInvalidMessages() to
- * run, which will do the necessary work and also reset the
- * catchupInterruptOccurred flag. If we are inside a transaction we
- * can just call AcceptInvalidationMessages() to do this. If we
- * aren't, we start and immediately end a transaction; the call to
+ * What we need to do here is cause ReceiveSharedInvalidMessages() to run,
+ * which will do the necessary work and also reset the
+ * catchupInterruptOccurred flag. If we are inside a transaction we can
+ * just call AcceptInvalidationMessages() to do this. If we aren't, we
+ * start and immediately end a transaction; the call to
* AcceptInvalidationMessages() happens down inside transaction start.
*
- * It is awfully tempting to just call AcceptInvalidationMessages()
- * without the rest of the xact start/stop overhead, and I think that
- * would actually work in the normal case; but I am not sure that
- * things would clean up nicely if we got an error partway through.
+ * It is awfully tempting to just call AcceptInvalidationMessages() without
+ * the rest of the xact start/stop overhead, and I think that would
+ * actually work in the normal case; but I am not sure that things would
+ * clean up nicely if we got an error partway through.
*/
if (IsTransactionOrTransactionBlock())
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.60 2005/08/20 23:26:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.61 2005/10/15 02:49:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/*
* Don't panic just yet: slowest backend might have consumed some
- * messages but not yet have done SIDelExpiredDataEntries() to
- * advance minMsgNum. So, make sure minMsgNum is up-to-date.
+ * messages but not yet have done SIDelExpiredDataEntries() to advance
+ * minMsgNum. So, make sure minMsgNum is up-to-date.
*/
SIDelExpiredDataEntries(segP);
numMsgs = segP->maxMsgNum - segP->minMsgNum;
/*
* Try to prevent table overflow. When the table is 70% full send a
- * WAKEN_CHILDREN request to the postmaster. The postmaster will send
- * a SIGUSR1 signal to all the backends, which will cause sinval.c to
- * read any pending SI entries.
+ * WAKEN_CHILDREN request to the postmaster. The postmaster will send a
+ * SIGUSR1 signal to all the backends, which will cause sinval.c to read
+ * any pending SI entries.
*
* This should never happen if all the backends are actively executing
* queries, but if a backend is sitting idle then it won't be starting
stateP->nextMsgNum++;
/*
- * There may be other backends that haven't read the message, so we
- * cannot delete it here. SIDelExpiredDataEntries() should be called
- * to remove dead messages.
+ * There may be other backends that haven't read the message, so we cannot
+ * delete it here. SIDelExpiredDataEntries() should be called to remove
+ * dead messages.
*/
return 1; /* got a message */
}
segP->minMsgNum = min;
/*
- * When minMsgNum gets really large, decrement all message counters so
- * as to forestall overflow of the counters.
+ * When minMsgNum gets really large, decrement all message counters so as
+ * to forestall overflow of the counters.
*/
if (min >= MSGNUMWRAPAROUND)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.112 2005/08/12 01:35:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.113 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (lo_heap_r || lo_index_r)
{
/*
- * Only bother to close if committing; else abort cleanup will
- * handle it
+ * Only bother to close if committing; else abort cleanup will handle
+ * it
*/
if (isCommit)
{
inv_create(Oid lobjId)
{
/*
- * Allocate an OID to be the LO's identifier, unless we were told
- * what to use. We can use the index on pg_largeobject for checking
- * OID uniqueness, even though it has additional columns besides OID.
+ * Allocate an OID to be the LO's identifier, unless we were told what to
+ * use. We can use the index on pg_largeobject for checking OID
+ * uniqueness, even though it has additional columns besides OID.
*/
if (!OidIsValid(lobjId))
{
}
/*
- * Create the LO by writing an empty first page for it in
- * pg_largeobject (will fail if duplicate)
+ * Create the LO by writing an empty first page for it in pg_largeobject
+ * (will fail if duplicate)
*/
LargeObjectCreate(lobjId);
/*
* Because the pg_largeobject index is on both loid and pageno, but we
* constrain only loid, a backwards scan should visit all pages of the
- * large object in reverse pageno order. So, it's sufficient to
- * examine the first valid tuple (== last valid page).
+ * large object in reverse pageno order. So, it's sufficient to examine
+ * the first valid tuple (== last valid page).
*/
while ((tuple = index_getnext(sd, BackwardScanDirection)) != NULL)
{
/*
* We assume the indexscan will deliver pages in order. However,
- * there may be missing pages if the LO contains unwritten
- * "holes". We want missing sections to read out as zeroes.
+ * there may be missing pages if the LO contains unwritten "holes". We
+ * want missing sections to read out as zeroes.
*/
pageoff = ((uint32) data->pageno) * LOBLKSIZE;
if (pageoff > obj_desc->offset)
while (nwritten < nbytes)
{
/*
- * If possible, get next pre-existing page of the LO. We assume
- * the indexscan will deliver these in order --- but there may be
- * holes.
+ * If possible, get next pre-existing page of the LO. We assume the
+ * indexscan will deliver these in order --- but there may be holes.
*/
if (neednextpage)
{
}
/*
- * If we have a pre-existing page, see if it is the page we want
- * to write, or a later one.
+ * If we have a pre-existing page, see if it is the page we want to
+ * write, or a later one.
*/
if (olddata != NULL && olddata->pageno == pageno)
{
CatalogCloseIndexes(indstate);
/*
- * Advance command counter so that my tuple updates will be seen by
- * later large-object operations in this transaction.
+ * Advance command counter so that my tuple updates will be seen by later
+ * large-object operations in this transaction.
*/
CommandCounterIncrement();
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.34 2005/04/29 22:28:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.35 2005/10/15 02:49:26 momjian Exp $
*
* Interface:
*
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
/*
- * FindLockCycle needs at most MaxBackends entries in visitedProcs[]
- * and deadlockDetails[].
+ * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
+ * deadlockDetails[].
*/
visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
deadlockDetails = (DEADLOCK_INFO *) palloc(MaxBackends * sizeof(DEADLOCK_INFO));
/*
- * TopoSort needs to consider at most MaxBackends wait-queue entries,
- * and it needn't run concurrently with FindLockCycle.
+ * TopoSort needs to consider at most MaxBackends wait-queue entries, and
+ * it needn't run concurrently with FindLockCycle.
*/
topoProcs = visitedProcs; /* re-use this space */
beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
/*
* We need to consider rearranging at most MaxBackends/2 wait queues
- * (since it takes at least two waiters in a queue to create a soft
- * edge), and the expanded form of the wait queues can't involve more
- * than MaxBackends total waiters.
+ * (since it takes at least two waiters in a queue to create a soft edge),
+ * and the expanded form of the wait queues can't involve more than
+ * MaxBackends total waiters.
*/
waitOrders = (WAIT_ORDER *)
palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
/*
- * Allow at most MaxBackends distinct constraints in a configuration.
- * (Is this enough? In practice it seems it should be, but I don't
- * quite see how to prove it. If we run out, we might fail to find a
- * workable wait queue rearrangement even though one exists.) NOTE
- * that this number limits the maximum recursion depth of
- * DeadLockCheckRecurse. Making it really big might potentially allow
- * a stack-overflow problem.
+ * Allow at most MaxBackends distinct constraints in a configuration. (Is
+ * this enough? In practice it seems it should be, but I don't quite see
+ * how to prove it. If we run out, we might fail to find a workable wait
+ * queue rearrangement even though one exists.) NOTE that this number
+ * limits the maximum recursion depth of DeadLockCheckRecurse. Making it
+ * really big might potentially allow a stack-overflow problem.
*/
maxCurConstraints = MaxBackends;
curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE));
/*
* Allow up to 3*MaxBackends constraints to be saved without having to
- * re-run TestConfiguration. (This is probably more than enough, but
- * we can survive if we run low on space by doing excess runs of
- * TestConfiguration to re-compute constraint lists each time needed.)
- * The last MaxBackends entries in possibleConstraints[] are reserved
- * as output workspace for FindLockCycle.
+ * re-run TestConfiguration. (This is probably more than enough, but we
+ * can survive if we run low on space by doing excess runs of
+ * TestConfiguration to re-compute constraint lists each time needed.) The
+ * last MaxBackends entries in possibleConstraints[] are reserved as
+ * output workspace for FindLockCycle.
*/
maxPossibleConstraints = MaxBackends * 4;
possibleConstraints =
return -1;
/*
- * Check for cycles involving startProc or any of the procs mentioned
- * in constraints. We check startProc last because if it has a soft
- * cycle still to be dealt with, we want to deal with that first.
+ * Check for cycles involving startProc or any of the procs mentioned in
+ * constraints. We check startProc last because if it has a soft cycle
+ * still to be dealt with, we want to deal with that first.
*/
for (i = 0; i < nCurConstraints; i++)
{
if (i == 0)
{
/*
- * record total length of cycle --- outer levels will now
- * fill deadlockDetails[]
+ * record total length of cycle --- outer levels will now fill
+ * deadlockDetails[]
*/
Assert(depth <= MaxBackends);
nDeadlockDetails = depth;
}
/*
- * Otherwise, we have a cycle but it does not include the
- * start point, so say "no deadlock".
+ * Otherwise, we have a cycle but it does not include the start
+ * point, so say "no deadlock".
*/
return false;
}
conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
/*
- * Scan for procs that already hold conflicting locks. These are
- * "hard" edges in the waits-for graph.
+ * Scan for procs that already hold conflicting locks. These are "hard"
+ * edges in the waits-for graph.
*/
procLocks = &(lock->procLocks);
}
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
- offsetof(PROCLOCK, lockLink));
+ offsetof(PROCLOCK, lockLink));
}
/*
* Scan for procs that are ahead of this one in the lock's wait queue.
- * Those that have conflicting requests soft-block this one. This
- * must be done after the hard-block search, since if another proc
- * both hard- and soft-blocks this one, we want to call it a hard
- * edge.
+ * Those that have conflicting requests soft-block this one. This must be
+ * done after the hard-block search, since if another proc both hard- and
+ * soft-blocks this one, we want to call it a hard edge.
*
* If there is a proposed re-ordering of the lock's wait order, use that
* rather than the current wait order.
info->pid = checkProc->pid;
/*
- * Add this edge to the list of soft edges in the
- * cycle
+ * Add this edge to the list of soft edges in the cycle
*/
Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProc;
info->pid = checkProc->pid;
/*
- * Add this edge to the list of soft edges in the
- * cycle
+ * Add this edge to the list of soft edges in the cycle
*/
Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProc;
/*
* Scan constraint list backwards. This is because the last-added
- * constraint is the only one that could fail, and so we want to test
- * it for inconsistency first.
+ * constraint is the only one that could fail, and so we want to test it
+ * for inconsistency first.
*/
for (i = nConstraints; --i >= 0;)
{
Assert(nWaitOrderProcs <= MaxBackends);
/*
- * Do the topo sort. TopoSort need not examine constraints after
- * this one, since they must be for different locks.
+ * Do the topo sort. TopoSort need not examine constraints after this
+ * one, since they must be for different locks.
*/
if (!TopoSort(lock, constraints, i + 1,
waitOrders[nWaitOrders].procs))
}
/*
- * Scan the constraints, and for each proc in the array, generate a
- * count of the number of constraints that say it must be before
- * something else, plus a list of the constraints that say it must be
- * after something else. The count for the j'th proc is stored in
- * beforeConstraints[j], and the head of its list in
- * afterConstraints[j]. Each constraint stores its list link in
- * constraints[i].link (note any constraint will be in just one list).
- * The array index for the before-proc of the i'th constraint is
- * remembered in constraints[i].pred.
+ * Scan the constraints, and for each proc in the array, generate a count
+ * of the number of constraints that say it must be before something else,
+ * plus a list of the constraints that say it must be after something
+ * else. The count for the j'th proc is stored in beforeConstraints[j],
+ * and the head of its list in afterConstraints[j]. Each constraint
+ * stores its list link in constraints[i].link (note any constraint will
+ * be in just one list). The array index for the before-proc of the i'th
+ * constraint is remembered in constraints[i].pred.
*/
MemSet(beforeConstraints, 0, queue_size * sizeof(int));
MemSet(afterConstraints, 0, queue_size * sizeof(int));
DescribeLockTag(&buf2, &info->locktag);
appendStringInfo(&buf,
- _("Process %d waits for %s on %s; blocked by process %d."),
+ _("Process %d waits for %s on %s; blocked by process %d."),
info->pid,
GetLockmodeName(info->lockmode),
buf2.data,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.78 2005/08/01 20:31:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.79 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
lockmode, false, false);
/*
- * Check to see if the relcache entry has been invalidated while we
- * were waiting to lock it. If so, rebuild it, or ereport() trying.
- * Increment the refcount to ensure that RelationFlushRelation will
- * rebuild it and not just delete it. We can skip this if the lock
- * was already held, however.
+ * Check to see if the relcache entry has been invalidated while we were
+ * waiting to lock it. If so, rebuild it, or ereport() trying. Increment
+ * the refcount to ensure that RelationFlushRelation will rebuild it and
+ * not just delete it. We can skip this if the lock was already held,
+ * however.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
{
return false;
/*
- * Check to see if the relcache entry has been invalidated while we
- * were waiting to lock it. If so, rebuild it, or ereport() trying.
- * Increment the refcount to ensure that RelationFlushRelation will
- * rebuild it and not just delete it. We can skip this if the lock
- * was already held, however.
+ * Check to see if the relcache entry has been invalidated while we were
+ * waiting to lock it. If so, rebuild it, or ereport() trying. Increment
+ * the refcount to ensure that RelationFlushRelation will rebuild it and
+ * not just delete it. We can skip this if the lock was already held,
+ * however.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
{
*
* Delete the lock showing that the given transaction ID is running.
* (This is never used for main transaction IDs; those locks are only
- * released implicitly at transaction end. But we do use it for subtrans
+ * released implicitly at transaction end. But we do use it for subtrans
* IDs.)
*/
void
* subtransaction, we will exit as soon as it aborts or its top parent commits.
* It takes some extra work to ensure this, because to save on shared memory
* the XID lock of a subtransaction is released when it ends, whether
- * successfully or unsuccessfully. So we have to check if it's "still running"
+ * successfully or unsuccessfully. So we have to check if it's "still running"
* and if so wait for its parent.
*/
void
}
/*
- * Transaction was committed/aborted/crashed - we have to update
- * pg_clog if transaction is still marked as running.
+ * Transaction was committed/aborted/crashed - we have to update pg_clog
+ * if transaction is still marked as running.
*/
if (!TransactionIdDidCommit(xid) && !TransactionIdDidAbort(xid))
TransactionIdAbort(xid);
}
/*
- * Transaction was committed/aborted/crashed - we have to update
- * pg_clog if transaction is still marked as running.
+ * Transaction was committed/aborted/crashed - we have to update pg_clog
+ * if transaction is still marked as running.
*/
if (!TransactionIdDidCommit(xid) && !TransactionIdDidAbort(xid))
TransactionIdAbort(xid);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.157 2005/08/20 23:26:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.158 2005/10/15 02:49:26 momjian Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
/* This configuration variable is used to set the lock table size */
int max_locks_per_xact; /* set by guc.c */
-#define NLOCKENTS() \
+#define NLOCKENTS() \
mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
{
if (LOCK_DEBUG_ENABLED((LOCK *) MAKE_PTR(proclockP->tag.lock)))
elog(LOG,
- "%s: proclock(%lx) lock(%lx) method(%u) proc(%lx) hold(%x)",
+ "%s: proclock(%lx) lock(%lx) method(%u) proc(%lx) hold(%x)",
where, MAKE_OFFSET(proclockP), proclockP->tag.lock,
PROCLOCK_LOCKMETHOD(*(proclockP)),
proclockP->tag.proc, (int) proclockP->holdMask);
}
-
#else /* not LOCK_DEBUG */
#define LOCK_PRINT(where, lock, type)
static void RemoveLocalLock(LOCALLOCK *locallock);
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
static void WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
- ResourceOwner owner);
+ ResourceOwner owner);
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
- PROCLOCK *proclock, LockMethod lockMethodTable);
+ PROCLOCK *proclock, LockMethod lockMethodTable);
static void CleanUpLock(LOCKMETHODID lockmethodid, LOCK *lock,
- PROCLOCK *proclock, bool wakeupNeeded);
+ PROCLOCK *proclock, bool wakeupNeeded);
/*
elog(FATAL, "could not initialize lock table \"%s\"", tabName);
/*
- * allocate a non-shared hash table for LOCALLOCK structs. This is
- * used to store lock counts and resource owner information.
+ * allocate a non-shared hash table for LOCALLOCK structs. This is used
+ * to store lock counts and resource owner information.
*
- * The non-shared table could already exist in this process (this occurs
- * when the postmaster is recreating shared memory after a backend
- * crash). If so, delete and recreate it. (We could simply leave it,
- * since it ought to be empty in the postmaster, but for safety let's
- * zap it.)
+ * The non-shared table could already exist in this process (this occurs when
+ * the postmaster is recreating shared memory after a backend crash). If
+ * so, delete and recreate it. (We could simply leave it, since it ought
+ * to be empty in the postmaster, but for safety let's zap it.)
*/
if (LockMethodLocalHash[lockmethodid])
hash_destroy(LockMethodLocalHash[lockmethodid]);
locallock->lockOwners = NULL;
locallock->lockOwners = (LOCALLOCKOWNER *)
MemoryContextAlloc(TopMemoryContext,
- locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
+ locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
}
else
{
}
/*
- * If we already hold the lock, we can just increase the count
- * locally.
+ * If we already hold the lock, we can just increase the count locally.
*/
if (locallock->nLocks > 0)
{
/*
* Find or create a lock with this tag.
*
- * Note: if the locallock object already existed, it might have a pointer
- * to the lock already ... but we probably should not assume that that
+ * Note: if the locallock object already existed, it might have a pointer to
+ * the lock already ... but we probably should not assume that that
* pointer is valid, since a lock object with no locks can go away
* anytime.
*/
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
locallock->lock = lock;
/*
* Create the hash key for the proclock table.
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(MyProc);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
locallock->proclock = proclock;
#ifdef CHECK_DEADLOCK_RISK
/*
- * Issue warning if we already hold a lower-level lock on this
- * object and do not hold a lock of the requested level or higher.
- * This indicates a deadlock-prone coding practice (eg, we'd have
- * a deadlock if another backend were following the same code path
- * at about the same time).
+ * Issue warning if we already hold a lower-level lock on this object
+ * and do not hold a lock of the requested level or higher. This
+ * indicates a deadlock-prone coding practice (eg, we'd have a
+ * deadlock if another backend were following the same code path at
+ * about the same time).
*
- * This is not enabled by default, because it may generate log
- * entries about user-level coding practices that are in fact safe
- * in context. It can be enabled to help find system-level
- * problems.
+ * This is not enabled by default, because it may generate log entries
+ * about user-level coding practices that are in fact safe in context.
+ * It can be enabled to help find system-level problems.
*
* XXX Doing numeric comparison on the lockmodes is a hack; it'd be
* better to use a table. For now, though, this works.
*/
{
- int i;
+ int i;
for (i = lockMethodTable->numLockModes; i > 0; i--)
{
if (proclock->holdMask & LOCKBIT_ON(i))
{
if (i >= (int) lockmode)
- break; /* safe: we have a lock >= req level */
+ break; /* safe: we have a lock >= req level */
elog(LOG, "deadlock risk: raising lock level"
" from %s to %s on object %u/%u/%u",
lock_mode_names[i], lock_mode_names[lockmode],
/*
* lock->nRequested and lock->requested[] count the total number of
- * requests, whether granted or waiting, so increment those
- * immediately. The other counts don't increment till we get the lock.
+ * requests, whether granted or waiting, so increment those immediately.
+ * The other counts don't increment till we get the lock.
*/
lock->nRequested++;
lock->requested[lockmode]++;
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
/*
- * We shouldn't already hold the desired lock; else locallock table
- * is broken.
+ * We shouldn't already hold the desired lock; else locallock table is
+ * broken.
*/
if (proclock->holdMask & LOCKBIT_ON(lockmode))
elog(ERROR, "lock %s on object %u/%u/%u is already held",
lock->tag.locktag_field3);
/*
- * If lock requested conflicts with locks requested by waiters, must
- * join wait queue. Otherwise, check for conflict with already-held
- * locks. (That's last because most complex check.)
+ * If lock requested conflicts with locks requested by waiters, must join
+ * wait queue. Otherwise, check for conflict with already-held locks.
+ * (That's last because most complex check.)
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
status = STATUS_FOUND;
/*
* We can't acquire the lock immediately. If caller specified no
- * blocking, remove useless table entries and return NOT_AVAIL
- * without waiting.
+ * blocking, remove useless table entries and return NOT_AVAIL without
+ * waiting.
*/
if (dontWait)
{
/*
* NOTE: do not do any material change of state between here and
* return. All required changes in locktable state must have been
- * done when the lock was granted to us --- see notes in
- * WaitOnLock.
+ * done when the lock was granted to us --- see notes in WaitOnLock.
*/
/*
int i;
/*
- * first check for global conflicts: If no locks conflict with my
- * request, then I get the lock.
+ * first check for global conflicts: If no locks conflict with my request,
+ * then I get the lock.
*
- * Checking for conflict: lock->grantMask represents the types of
- * currently held locks. conflictTable[lockmode] has a bit set for
- * each type of lock that conflicts with request. Bitwise compare
- * tells if there is a conflict.
+ * Checking for conflict: lock->grantMask represents the types of currently
+ * held locks. conflictTable[lockmode] has a bit set for each type of
+ * lock that conflicts with request. Bitwise compare tells if there is a
+ * conflict.
*/
if (!(lockMethodTable->conflictTab[lockmode] & lock->grantMask))
{
}
/*
- * Rats. Something conflicts. But it could still be my own lock.
- * We have to construct a conflict mask that does not reflect our own
- * locks, but only lock types held by other processes.
+ * Rats. Something conflicts. But it could still be my own lock. We have
+ * to construct a conflict mask that does not reflect our own locks, but
+ * only lock types held by other processes.
*/
myLocks = proclock->holdMask;
otherLocks = 0;
for (i = 1; i <= numLockModes; i++)
{
- int myHolding = (myLocks & LOCKBIT_ON(i)) ? 1 : 0;
+ int myHolding = (myLocks & LOCKBIT_ON(i)) ? 1 : 0;
if (lock->granted[i] > myHolding)
otherLocks |= LOCKBIT_ON(i);
/*
* now check again for conflicts. 'otherLocks' describes the types of
- * locks held by other processes. If one of these conflicts with the
- * kind of lock that I want, there is a conflict and I have to sleep.
+ * locks held by other processes. If one of these conflicts with the kind
+ * of lock that I want, there is a conflict and I have to sleep.
*/
if (!(lockMethodTable->conflictTab[lockmode] & otherLocks))
{
}
/*
- * UnGrantLock -- opposite of GrantLock.
+ * UnGrantLock -- opposite of GrantLock.
*
* Updates the lock and proclock data structures to show that the lock
* is no longer held nor requested by the current holder.
UnGrantLock(LOCK *lock, LOCKMODE lockmode,
PROCLOCK *proclock, LockMethod lockMethodTable)
{
- bool wakeupNeeded = false;
+ bool wakeupNeeded = false;
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
/*
- * We need only run ProcLockWakeup if the released lock conflicts with
- * at least one of the lock types requested by waiter(s). Otherwise
- * whatever conflict made them wait must still exist. NOTE: before
- * MVCC, we could skip wakeup if lock->granted[lockmode] was still
- * positive. But that's not true anymore, because the remaining
- * granted locks might belong to some waiter, who could now be
- * awakened because he doesn't conflict with his own locks.
+ * We need only run ProcLockWakeup if the released lock conflicts with at
+ * least one of the lock types requested by waiter(s). Otherwise whatever
+ * conflict made them wait must still exist. NOTE: before MVCC, we could
+ * skip wakeup if lock->granted[lockmode] was still positive. But that's
+ * not true anymore, because the remaining granted locks might belong to
+ * some waiter, who could now be awakened because he doesn't conflict with
+ * his own locks.
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
wakeupNeeded = true;
}
/*
- * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
+ * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
* proclock and lock objects if possible, and call ProcLockWakeup if there
* are remaining requests and the caller says it's OK. (Normally, this
* should be called after UnGrantLock, and wakeupNeeded is the result from
bool wakeupNeeded)
{
/*
- * If this was my last hold on this lock, delete my entry in the
- * proclock table.
+ * If this was my last hold on this lock, delete my entry in the proclock
+ * table.
*/
if (proclock->holdMask == 0)
{
if (lock->nRequested == 0)
{
/*
- * The caller just released the last lock, so garbage-collect the
- * lock object.
+ * The caller just released the last lock, so garbage-collect the lock
+ * object.
*/
LOCK_PRINT("CleanUpLock: deleting", lock, 0);
Assert(SHMQueueEmpty(&(lock->procLocks)));
else if (wakeupNeeded)
{
/* There are waiters on this lock, so wake them up. */
- ProcLockWakeup(LockMethods[lockmethodid], lock);
+ ProcLockWakeup(LockMethods[lockmethodid], lock);
}
}
/*
* NOTE: Think not to put any shared-state cleanup after the call to
- * ProcSleep, in either the normal or failure path. The lock state
- * must be fully set by the lock grantor, or by CheckDeadLock if we
- * give up waiting for the lock. This is necessary because of the
- * possibility that a cancel/die interrupt will interrupt ProcSleep
- * after someone else grants us the lock, but before we've noticed it.
- * Hence, after granting, the locktable state must fully reflect the
- * fact that we own the lock; we can't do additional work on return.
- * Contrariwise, if we fail, any cleanup must happen in xact abort
- * processing, not here, to ensure it will also happen in the
- * cancel/die case.
+ * ProcSleep, in either the normal or failure path. The lock state must
+ * be fully set by the lock grantor, or by CheckDeadLock if we give up
+ * waiting for the lock. This is necessary because of the possibility
+ * that a cancel/die interrupt will interrupt ProcSleep after someone else
+ * grants us the lock, but before we've noticed it. Hence, after granting,
+ * the locktable state must fully reflect the fact that we own the lock;
+ * we can't do additional work on return. Contrariwise, if we fail, any
+ * cleanup must happen in xact abort processing, not here, to ensure it
+ * will also happen in the cancel/die case.
*/
if (ProcSleep(lockMethodTable,
locallock->proclock) != STATUS_OK)
{
/*
- * We failed as a result of a deadlock, see CheckDeadLock(). Quit
- * now.
+ * We failed as a result of a deadlock, see CheckDeadLock(). Quit now.
*/
awaitedLock = NULL;
LOCK_PRINT("WaitOnLock: aborting on lock",
LWLockRelease(lockMethodTable->masterLock);
/*
- * Now that we aren't holding the LockMgrLock, we can give an
- * error report including details about the detected deadlock.
+ * Now that we aren't holding the LockMgrLock, we can give an error
+ * report including details about the detected deadlock.
*/
DeadLockReport();
/* not reached */
* Delete the proclock immediately if it represents no already-held locks.
* (This must happen now because if the owner of the lock decides to
* release it, and the requested/granted counts then go to zero,
- * LockRelease expects there to be no remaining proclocks.)
- * Then see if any other waiters for the lock can be woken up now.
+ * LockRelease expects there to be no remaining proclocks.) Then see if
+ * any other waiters for the lock can be woken up now.
*/
CleanUpLock(lockmethodid, waitLock, proclock, true);
}
/*
* LockRelease -- look up 'locktag' in lock table 'lockmethodid' and
- * release one 'lockmode' lock on it. Release a session lock if
+ * release one 'lockmode' lock on it. Release a session lock if
* 'sessionLock' is true, else release a regular transaction lock.
*
* Side Effects: find any waiting processes that are now wakable,
HASH_FIND, NULL);
/*
- * let the caller print its own error message, too. Do not
- * ereport(ERROR).
+ * let the caller print its own error message, too. Do not ereport(ERROR).
*/
if (!locallock || locallock->nLocks <= 0)
{
}
/*
- * Decrease the total local count. If we're still holding the lock,
- * we're done.
+ * Decrease the total local count. If we're still holding the lock, we're
+ * done.
*/
locallock->nLocks--;
/*
* We don't need to re-find the lock or proclock, since we kept their
- * addresses in the locallock table, and they couldn't have been
- * removed while we were holding a lock on them.
+ * addresses in the locallock table, and they couldn't have been removed
+ * while we were holding a lock on them.
*/
lock = locallock->lock;
LOCK_PRINT("LockRelease: found", lock, lockmode);
PROCLOCK_PRINT("LockRelease: found", proclock);
/*
- * Double-check that we are actually holding a lock of the type we
- * want to release.
+ * Double-check that we are actually holding a lock of the type we want to
+ * release.
*/
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
{
/*
* First we run through the locallock table and get rid of unwanted
- * entries, then we scan the process's proclocks and get rid of those.
- * We do this separately because we may have multiple locallock
- * entries pointing to the same proclock, and we daren't end up with
- * any dangling pointers.
+ * entries, then we scan the process's proclocks and get rid of those. We
+ * do this separately because we may have multiple locallock entries
+ * pointing to the same proclock, and we daren't end up with any dangling
+ * pointers.
*/
hash_seq_init(&status, LockMethodLocalHash[lockmethodid]);
if (locallock->proclock == NULL || locallock->lock == NULL)
{
/*
- * We must've run out of shared memory while trying to set up
- * this lock. Just forget the local entry.
+ * We must've run out of shared memory while trying to set up this
+ * lock. Just forget the local entry.
*/
Assert(locallock->nLocks == 0);
RemoveLocalLock(locallock);
continue;
/*
- * If we are asked to release all locks, we can just zap the
- * entry. Otherwise, must scan to see if there are session locks.
- * We assume there is at most one lockOwners entry for session locks.
+ * If we are asked to release all locks, we can just zap the entry.
+ * Otherwise, must scan to see if there are session locks. We assume
+ * there is at most one lockOwners entry for session locks.
*/
if (!allLocks)
{
/* Get link first, since we may unlink/delete this proclock */
nextplock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
Assert(proclock->tag.proc == MAKE_OFFSET(MyProc));
continue;
/*
- * Scan to see if there are any locks belonging to current owner
- * or its parent
+ * Scan to see if there are any locks belonging to current owner or
+ * its parent
*/
lockOwners = locallock->lockOwners;
for (i = locallock->numLockOwners - 1; i >= 0; i--)
{
TwoPhaseLockRecord record;
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
- int i;
+ int i;
/* Ignore items that are not of the lockmethod to be processed */
if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
/*
* First we run through the locallock table and get rid of unwanted
- * entries, then we scan the process's proclocks and transfer them
- * to the target proc.
+ * entries, then we scan the process's proclocks and transfer them to the
+ * target proc.
*
- * We do this separately because we may have multiple locallock
- * entries pointing to the same proclock, and we daren't end up with
- * any dangling pointers.
+ * We do this separately because we may have multiple locallock entries
+ * pointing to the same proclock, and we daren't end up with any dangling
+ * pointers.
*/
hash_seq_init(&status, LockMethodLocalHash[lockmethodid]);
if (locallock->proclock == NULL || locallock->lock == NULL)
{
/*
- * We must've run out of shared memory while trying to set up
- * this lock. Just forget the local entry.
+ * We must've run out of shared memory while trying to set up this
+ * lock. Just forget the local entry.
*/
Assert(locallock->nLocks == 0);
RemoveLocalLock(locallock);
/* Get link first, since we may unlink/delete this proclock */
nextplock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
Assert(proclock->tag.proc == MAKE_OFFSET(MyProc));
holdMask = proclock->holdMask;
/*
- * We cannot simply modify proclock->tag.proc to reassign ownership
- * of the lock, because that's part of the hash key and the proclock
+ * We cannot simply modify proclock->tag.proc to reassign ownership of
+ * the lock, because that's part of the hash key and the proclock
* would then be in the wrong hash chain. So, unlink and delete the
- * old proclock; create a new one with the right contents; and link
- * it into place. We do it in this order to be certain we won't
- * run out of shared memory (the way dynahash.c works, the deleted
- * object is certain to be available for reallocation).
+ * old proclock; create a new one with the right contents; and link it
+ * into place. We do it in this order to be certain we won't run out
+ * of shared memory (the way dynahash.c works, the deleted object is
+ * certain to be available for reallocation).
*/
SHMQueueDelete(&proclock->lockLink);
SHMQueueDelete(&proclock->procLink);
(void *) &proclocktag,
HASH_ENTER_NULL, &found);
if (!newproclock)
- ereport(PANIC, /* should not happen */
+ ereport(PANIC, /* should not happen */
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errdetail("Not enough memory for reassigning the prepared transaction's locks.")));
size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
/*
- * Note we count only one pair of hash tables, since the userlocks
- * table actually overlays the main one.
+ * Note we count only one pair of hash tables, since the userlocks table
+ * actually overlays the main one.
*
- * Since the lockHash entry count above is only an estimate, add 10%
- * safety margin.
+ * Since the lockHash entry count above is only an estimate, add 10% safety
+ * margin.
*/
size = add_size(size, size / 10);
LOCK_PRINT("DumpLocks", lock, 0);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
}
}
elog(LOG, "DumpAllLocks: proclock->tag.lock = NULL");
}
}
-
#endif /* LOCK_DEBUG */
/*
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
PGPROC *proc = TwoPhaseGetDummyProc(xid);
- LOCKTAG *locktag;
+ LOCKTAG *locktag;
LOCKMODE lockmode;
LOCKMETHODID lockmethodid;
LOCK *lock;
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
/*
/*
* Create the hash key for the proclock table.
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(proc);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
/*
/*
* lock->nRequested and lock->requested[] count the total number of
- * requests, whether granted or waiting, so increment those
- * immediately.
+ * requests, whether granted or waiting, so increment those immediately.
*/
lock->nRequested++;
lock->requested[lockmode]++;
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
PGPROC *proc = TwoPhaseGetDummyProc(xid);
- LOCKTAG *locktag;
+ LOCKTAG *locktag;
LOCKMODE lockmode;
LOCKMETHODID lockmethodid;
PROCLOCKTAG proclocktag;
/*
* Re-find the proclock object (ditto).
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(proc);
proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash[lockmethodid],
elog(PANIC, "failed to re-find shared proclock object");
/*
- * Double-check that we are actually holding a lock of the type we
- * want to release.
+ * Double-check that we are actually holding a lock of the type we want to
+ * release.
*/
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.33 2005/10/12 16:55:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.34 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* All the LWLock structs are allocated as an array in shared memory.
- * (LWLockIds are indexes into the array.) We force the array stride to
+ * (LWLockIds are indexes into the array.) We force the array stride to
* be a power of 2, which saves a few cycles in indexing, but more
* importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
+ * boundaries. This reduces cache contention problems, especially on AMD
* Opterons. (Of course, we have to also ensure that the array start
* address is suitably aligned.)
*
if (Trace_lwlocks)
elog(LOG, "%s(%d): %s", where, (int) lockid, msg);
}
-
#else /* not LOCK_DEBUG */
#define PRINT_LWDEBUG(a,b,c)
#define LOG_LWDEBUG(a,b,c)
int numLocks;
/*
- * Possibly this logic should be spread out among the affected
- * modules, the same way that shmem space estimation is done. But for
- * now, there are few enough users of LWLocks that we can get away
- * with just keeping the knowledge here.
+ * Possibly this logic should be spread out among the affected modules,
+ * the same way that shmem space estimation is done. But for now, there
+ * are few enough users of LWLocks that we can get away with just keeping
+ * the knowledge here.
*/
/* Predefined LWLocks */
numLocks += NUM_SLRU_BUFFERS;
/*
- * multixact.c needs one per MultiXact buffer, but there are
- * two SLRU areas for MultiXact
+ * multixact.c needs one per MultiXact buffer, but there are two SLRU
+ * areas for MultiXact
*/
numLocks += 2 * NUM_SLRU_BUFFERS;
LWLockAssign(void)
{
LWLockId result;
+
/* use volatile pointer to prevent code rearrangement */
volatile int *LWLockCounter;
/*
* We can't wait if we haven't got a PGPROC. This should only occur
- * during bootstrap or shared memory initialization. Put an Assert
- * here to catch unsafe coding practices.
+ * during bootstrap or shared memory initialization. Put an Assert here
+ * to catch unsafe coding practices.
*/
Assert(!(proc == NULL && IsUnderPostmaster));
elog(ERROR, "too many LWLocks taken");
/*
- * Lock out cancel/die interrupts until we exit the code section
- * protected by the LWLock. This ensures that interrupts will not
- * interfere with manipulations of data structures in shared memory.
+ * Lock out cancel/die interrupts until we exit the code section protected
+ * by the LWLock. This ensures that interrupts will not interfere with
+ * manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
* LWLockRelease.
*
* NOTE: it might seem better to have LWLockRelease actually grant us the
- * lock, rather than retrying and possibly having to go back to sleep.
- * But in practice that is no good because it means a process swap for
- * every lock acquisition when two or more processes are contending
- * for the same lock. Since LWLocks are normally used to protect
- * not-very-long sections of computation, a process needs to be able
- * to acquire and release the same lock many times during a single CPU
- * time slice, even in the presence of contention. The efficiency of
- * being able to do that outweighs the inefficiency of sometimes
- * wasting a process dispatch cycle because the lock is not free when
- * a released waiter finally gets to run. See pgsql-hackers archives
- * for 29-Dec-01.
+ * lock, rather than retrying and possibly having to go back to sleep. But
+ * in practice that is no good because it means a process swap for every
+ * lock acquisition when two or more processes are contending for the same
+ * lock. Since LWLocks are normally used to protect not-very-long
+ * sections of computation, a process needs to be able to acquire and
+ * release the same lock many times during a single CPU time slice, even
+ * in the presence of contention. The efficiency of being able to do that
+ * outweighs the inefficiency of sometimes wasting a process dispatch
+ * cycle because the lock is not free when a released waiter finally gets
+ * to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
* Add myself to wait queue.
*
* If we don't have a PGPROC structure, there's no way to wait. This
- * should never occur, since MyProc should only be null during
- * shared memory initialization.
+ * should never occur, since MyProc should only be null during shared
+ * memory initialization.
*/
if (proc == NULL)
elog(FATAL, "cannot wait without a PGPROC structure");
* Wait until awakened.
*
* Since we share the process wait semaphore with the regular lock
- * manager and ProcWaitForSignal, and we may need to acquire an
- * LWLock while one of those is pending, it is possible that we
- * get awakened for a reason other than being signaled by
- * LWLockRelease. If so, loop back and wait again. Once we've
- * gotten the LWLock, re-increment the sema by the number of
- * additional signals received, so that the lock manager or signal
- * manager will see the received signal when it next waits.
+ * manager and ProcWaitForSignal, and we may need to acquire an LWLock
+ * while one of those is pending, it is possible that we get awakened
+ * for a reason other than being signaled by LWLockRelease. If so,
+ * loop back and wait again. Once we've gotten the LWLock,
+ * re-increment the sema by the number of additional signals received,
+ * so that the lock manager or signal manager will see the received
+ * signal when it next waits.
*/
LOG_LWDEBUG("LWLockAcquire", lockid, "waiting");
elog(ERROR, "too many LWLocks taken");
/*
- * Lock out cancel/die interrupts until we exit the code section
- * protected by the LWLock. This ensures that interrupts will not
- * interfere with manipulations of data structures in shared memory.
+ * Lock out cancel/die interrupts until we exit the code section protected
+ * by the LWLock. This ensures that interrupts will not interfere with
+ * manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
PRINT_LWDEBUG("LWLockRelease", lockid, lock);
/*
- * Remove lock from list of locks held. Usually, but not always, it
- * will be the latest-acquired lock; so search array backwards.
+ * Remove lock from list of locks held. Usually, but not always, it will
+ * be the latest-acquired lock; so search array backwards.
*/
for (i = num_held_lwlocks; --i >= 0;)
{
}
/*
- * See if I need to awaken any waiters. If I released a non-last
- * shared hold, there cannot be anything to do. Also, do not awaken
- * any waiters if someone has already awakened waiters that haven't
- * yet acquired the lock.
+ * See if I need to awaken any waiters. If I released a non-last shared
+ * hold, there cannot be anything to do. Also, do not awaken any waiters
+ * if someone has already awakened waiters that haven't yet acquired the
+ * lock.
*/
head = lock->head;
if (head != NULL)
if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
{
/*
- * Remove the to-be-awakened PGPROCs from the queue. If the
- * front waiter wants exclusive lock, awaken him only.
- * Otherwise awaken as many waiters as want shared access.
+ * Remove the to-be-awakened PGPROCs from the queue. If the front
+ * waiter wants exclusive lock, awaken him only. Otherwise awaken
+ * as many waiters as want shared access.
*/
proc = head;
if (!proc->lwExclusive)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.166 2005/10/13 06:24:05 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.167 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
/*
- * Pre-create the PGPROC structures and create a semaphore for
- * each.
+ * Pre-create the PGPROC structures and create a semaphore for each.
*/
procs = (PGPROC *) ShmemAlloc(MaxBackends * sizeof(PGPROC));
if (!procs)
volatile PROC_HDR *procglobal = ProcGlobal;
/*
- * ProcGlobal should be set by a previous call to InitProcGlobal (if
- * we are a backend, we inherit this by fork() from the postmaster).
+ * ProcGlobal should be set by a previous call to InitProcGlobal (if we
+ * are a backend, we inherit this by fork() from the postmaster).
*/
if (procglobal == NULL)
elog(PANIC, "proc header uninitialized");
elog(ERROR, "you already exist");
/*
- * Try to get a proc struct from the free list. If this fails, we
- * must be out of PGPROC structures (not to mention semaphores).
+ * Try to get a proc struct from the free list. If this fails, we must be
+ * out of PGPROC structures (not to mention semaphores).
*
- * While we are holding the ProcStructLock, also copy the current
- * shared estimate of spins_per_delay to local storage.
+ * While we are holding the ProcStructLock, also copy the current shared
+ * estimate of spins_per_delay to local storage.
*/
SpinLockAcquire(ProcStructLock);
else
{
/*
- * If we reach here, all the PGPROCs are in use. This is one of
- * the possible places to detect "too many backends", so give the
- * standard error message.
+ * If we reach here, all the PGPROCs are in use. This is one of the
+ * possible places to detect "too many backends", so give the standard
+ * error message.
*/
SpinLockRelease(ProcStructLock);
ereport(FATAL,
on_shmem_exit(ProcKill, 0);
/*
- * We might be reusing a semaphore that belonged to a failed process.
- * So be careful and reinitialize its value here.
+ * We might be reusing a semaphore that belonged to a failed process. So
+ * be careful and reinitialize its value here.
*/
PGSemaphoreReset(&MyProc->sem);
/*
- * Now that we have a PGPROC, we could try to acquire locks, so
- * initialize the deadlock checker.
+ * Now that we have a PGPROC, we could try to acquire locks, so initialize
+ * the deadlock checker.
*/
InitDeadLockChecking();
}
* Just for paranoia's sake, we use the ProcStructLock to protect
* assignment and releasing of DummyProcs entries.
*
- * While we are holding the ProcStructLock, also copy the current
- * shared estimate of spins_per_delay to local storage.
+ * While we are holding the ProcStructLock, also copy the current shared
+ * estimate of spins_per_delay to local storage.
*/
SpinLockAcquire(ProcStructLock);
SpinLockRelease(ProcStructLock);
/*
- * Initialize all fields of MyProc, except MyProc->sem which was set
- * up by InitProcGlobal.
+ * Initialize all fields of MyProc, except MyProc->sem which was set up by
+ * InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
on_shmem_exit(DummyProcKill, Int32GetDatum(proctype));
/*
- * We might be reusing a semaphore that belonged to a failed process.
- * So be careful and reinitialize its value here.
+ * We might be reusing a semaphore that belonged to a failed process. So
+ * be careful and reinitialize its value here.
*/
PGSemaphoreReset(&MyProc->sem);
}
{
SHMEM_OFFSET offset;
PGPROC *proc;
+
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
{
/*
* Somebody kicked us off the lock queue already. Perhaps they
- * granted us the lock, or perhaps they detected a deadlock. If
- * they did grant us the lock, we'd better remember it in our
- * local lock table.
+ * granted us the lock, or perhaps they detected a deadlock. If they
+ * did grant us the lock, we'd better remember it in our local lock
+ * table.
*/
if (MyProc->waitStatus == STATUS_OK)
GrantAwaitedLock();
/*
* Reset the proc wait semaphore to zero. This is necessary in the
* scenario where someone else granted us the lock we wanted before we
- * were able to remove ourselves from the wait-list. The semaphore
- * will have been bumped to 1 by the would-be grantor, and since we
- * are no longer going to wait on the sema, we have to force it back
- * to zero. Otherwise, our next attempt to wait for a lock will fall
- * through prematurely.
+ * were able to remove ourselves from the wait-list. The semaphore will
+ * have been bumped to 1 by the would-be grantor, and since we are no
+ * longer going to wait on the sema, we have to force it back to zero.
+ * Otherwise, our next attempt to wait for a lock will fall through
+ * prematurely.
*/
PGSemaphoreReset(&MyProc->sem);
/*
- * Return true even if we were kicked off the lock before we were able
- * to remove ourselves.
+ * Return true even if we were kicked off the lock before we were able to
+ * remove ourselves.
*/
return true;
}
Assert(MyProc != NULL);
/*
- * Release any LW locks I am holding. There really shouldn't be any,
- * but it's cheap to check again before we cut the knees off the LWLock
+ * Release any LW locks I am holding. There really shouldn't be any, but
+ * it's cheap to check again before we cut the knees off the LWLock
* facility by releasing our PGPROC ...
*/
LWLockReleaseAll();
/*
* Determine where to add myself in the wait queue.
*
- * Normally I should go at the end of the queue. However, if I already
- * hold locks that conflict with the request of any previous waiter,
- * put myself in the queue just in front of the first such waiter.
- * This is not a necessary step, since deadlock detection would move
- * me to before that waiter anyway; but it's relatively cheap to
- * detect such a conflict immediately, and avoid delaying till
- * deadlock timeout.
+ * Normally I should go at the end of the queue. However, if I already hold
+ * locks that conflict with the request of any previous waiter, put myself
+ * in the queue just in front of the first such waiter. This is not a
+ * necessary step, since deadlock detection would move me to before that
+ * waiter anyway; but it's relatively cheap to detect such a conflict
+ * immediately, and avoid delaying till deadlock timeout.
*
- * Special case: if I find I should go in front of some waiter, check to
- * see if I conflict with already-held locks or the requests before
- * that waiter. If not, then just grant myself the requested lock
- * immediately. This is the same as the test for immediate grant in
- * LockAcquire, except we are only considering the part of the wait
- * queue before my insertion point.
+ * Special case: if I find I should go in front of some waiter, check to see
+ * if I conflict with already-held locks or the requests before that
+ * waiter. If not, then just grant myself the requested lock immediately.
+ * This is the same as the test for immediate grant in LockAcquire, except
+ * we are only considering the part of the wait queue before my insertion
+ * point.
*/
if (myHeldLocks != 0)
{
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
{
/*
- * Yes, so we have a deadlock. Easiest way to clean
- * up correctly is to call RemoveFromWaitQueue(), but
- * we can't do that until we are *on* the wait queue.
- * So, set a flag to check below, and break out of
- * loop. Also, record deadlock info for later
- * message.
+ * Yes, so we have a deadlock. Easiest way to clean up
+ * correctly is to call RemoveFromWaitQueue(), but we
+ * can't do that until we are *on* the wait queue. So, set
+ * a flag to check below, and break out of loop. Also,
+ * record deadlock info for later message.
*/
RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
early_deadlock = true;
}
/*
- * If we fall out of loop normally, proc points to waitQueue head,
- * so we will insert at tail of queue as desired.
+ * If we fall out of loop normally, proc points to waitQueue head, so
+ * we will insert at tail of queue as desired.
*/
}
else
}
/*
- * Insert self into queue, ahead of the given proc (or at tail of
- * queue).
+ * Insert self into queue, ahead of the given proc (or at tail of queue).
*/
SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
waitQueue->size++;
MyProc->waitStatus = STATUS_ERROR; /* initialize result for error */
/*
- * If we detected deadlock, give up without waiting. This must agree
- * with CheckDeadLock's recovery code, except that we shouldn't
- * release the semaphore since we haven't tried to lock it yet.
+ * If we detected deadlock, give up without waiting. This must agree with
+ * CheckDeadLock's recovery code, except that we shouldn't release the
+ * semaphore since we haven't tried to lock it yet.
*/
if (early_deadlock)
{
* Release the locktable's masterLock.
*
* NOTE: this may also cause us to exit critical-section state, possibly
- * allowing a cancel/die interrupt to be accepted. This is OK because
- * we have recorded the fact that we are waiting for a lock, and so
+ * allowing a cancel/die interrupt to be accepted. This is OK because we
+ * have recorded the fact that we are waiting for a lock, and so
* LockWaitCancel will clean up if cancel/die happens.
*/
LWLockRelease(masterLock);
/*
- * Set timer so we can wake up after awhile and check for a deadlock.
- * If a deadlock is detected, the handler releases the process's
- * semaphore and sets MyProc->waitStatus = STATUS_ERROR, allowing us
- * to know that we must report failure rather than success.
+ * Set timer so we can wake up after awhile and check for a deadlock. If a
+ * deadlock is detected, the handler releases the process's semaphore and
+ * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
+ * must report failure rather than success.
*
- * By delaying the check until we've waited for a bit, we can avoid
- * running the rather expensive deadlock-check code in most cases.
+ * By delaying the check until we've waited for a bit, we can avoid running
+ * the rather expensive deadlock-check code in most cases.
*/
if (!enable_sig_alarm(DeadlockTimeout, false))
elog(FATAL, "could not set timer for process wakeup");
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
- * PGSemaphoreLock will not block. The wakeup is "saved" by the
- * semaphore implementation. Note also that if CheckDeadLock is
- * invoked but does not detect a deadlock, PGSemaphoreLock() will
- * continue to wait. There used to be a loop here, but it was useless
- * code...
+ * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
+ * implementation. Note also that if CheckDeadLock is invoked but does
+ * not detect a deadlock, PGSemaphoreLock() will continue to wait. There
+ * used to be a loop here, but it was useless code...
*
- * We pass interruptOK = true, which eliminates a window in which
- * cancel/die interrupts would be held off undesirably. This is a
- * promise that we don't mind losing control to a cancel/die interrupt
- * here. We don't, because we have no shared-state-change work to do
- * after being granted the lock (the grantor did it all). We do have
- * to worry about updating the locallock table, but if we lose control
- * to an error, LockWaitCancel will fix that up.
+ * We pass interruptOK = true, which eliminates a window in which cancel/die
+ * interrupts would be held off undesirably. This is a promise that we
+ * don't mind losing control to a cancel/die interrupt here. We don't,
+ * because we have no shared-state-change work to do after being granted
+ * the lock (the grantor did it all). We do have to worry about updating
+ * the locallock table, but if we lose control to an error, LockWaitCancel
+ * will fix that up.
*/
PGSemaphoreLock(&MyProc->sem, true);
elog(FATAL, "could not disable timer for process wakeup");
/*
- * Re-acquire the locktable's masterLock. We have to do this to hold
- * off cancel/die interrupts before we can mess with waitingForLock
- * (else we might have a missed or duplicated locallock update).
+ * Re-acquire the locktable's masterLock. We have to do this to hold off
+ * cancel/die interrupts before we can mess with waitingForLock (else we
+ * might have a missed or duplicated locallock update).
*/
LWLockAcquire(masterLock, LW_EXCLUSIVE);
LOCKMODE lockmode = proc->waitLockMode;
/*
- * Waken if (a) doesn't conflict with requests of earlier waiters,
- * and (b) doesn't conflict with already-held locks.
+ * Waken if (a) doesn't conflict with requests of earlier waiters, and
+ * (b) doesn't conflict with already-held locks.
*/
if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
LockCheckConflicts(lockMethodTable,
proc = ProcWakeup(proc, STATUS_OK);
/*
- * ProcWakeup removes proc from the lock's waiting process
- * queue and returns the next proc in chain; don't use proc's
- * next-link, because it's been cleared.
+ * ProcWakeup removes proc from the lock's waiting process queue
+ * and returns the next proc in chain; don't use proc's next-link,
+ * because it's been cleared.
*/
}
else
{
/*
- * Cannot wake this guy. Remember his request for later
- * checks.
+ * Cannot wake this guy. Remember his request for later checks.
*/
aheadRequests |= LOCKBIT_ON(lockmode);
proc = (PGPROC *) MAKE_PTR(proc->links.next);
* Acquire locktable lock. Note that the deadlock check interrupt had
* better not be enabled anywhere that this process itself holds the
* locktable lock, else this will wait forever. Also note that
- * LWLockAcquire creates a critical section, so that this routine
- * cannot be interrupted by cancel/die interrupts.
+ * LWLockAcquire creates a critical section, so that this routine cannot
+ * be interrupted by cancel/die interrupts.
*/
LWLockAcquire(LockMgrLock, LW_EXCLUSIVE);
/*
* Check to see if we've been awoken by anyone in the interim.
*
- * If we have we can return and resume our transaction -- happy day.
- * Before we are awoken the process releasing the lock grants it to us
- * so we know that we don't have to wait anymore.
+ * If we have we can return and resume our transaction -- happy day. Before
+ * we are awoken the process releasing the lock grants it to us so we know
+ * that we don't have to wait anymore.
*
* We check by looking to see if we've been unlinked from the wait queue.
- * This is quicker than checking our semaphore's state, since no
- * kernel call is needed, and it is safe because we hold the locktable
- * lock.
+ * This is quicker than checking our semaphore's state, since no kernel
+ * call is needed, and it is safe because we hold the locktable lock.
*/
if (MyProc->links.prev == INVALID_OFFSET ||
MyProc->links.next == INVALID_OFFSET)
RemoveFromWaitQueue(MyProc);
/*
- * Set MyProc->waitStatus to STATUS_ERROR so that ProcSleep will
- * report an error after we return from the signal handler.
+ * Set MyProc->waitStatus to STATUS_ERROR so that ProcSleep will report an
+ * error after we return from the signal handler.
*/
MyProc->waitStatus = STATUS_ERROR;
PGSemaphoreUnlock(&MyProc->sem);
/*
- * We're done here. Transaction abort caused by the error that
- * ProcSleep will raise will cause any other locks we hold to be
- * released, thus allowing other processes to wake up; we don't need
- * to do that here. NOTE: an exception is that releasing locks we hold
- * doesn't consider the possibility of waiters that were blocked
- * behind us on the lock we just failed to get, and might now be
- * wakable because we're not in front of them anymore. However,
- * RemoveFromWaitQueue took care of waking up any such processes.
+ * We're done here. Transaction abort caused by the error that ProcSleep
+ * will raise will cause any other locks we hold to be released, thus
+ * allowing other processes to wake up; we don't need to do that here.
+ * NOTE: an exception is that releasing locks we hold doesn't consider the
+ * possibility of waiters that were blocked behind us on the lock we just
+ * failed to get, and might now be wakable because we're not in front of
+ * them anymore. However, RemoveFromWaitQueue took care of waking up any
+ * such processes.
*/
LWLockRelease(LockMgrLock);
}
#ifndef __BEOS__
struct itimerval timeval;
-
#else
bigtime_t time_interval;
#endif
/*
* Begin deadlock timeout with statement-level timeout active
*
- * Here, we want to interrupt at the closer of the two timeout times.
- * If fin_time >= statement_fin_time then we need not touch the
- * existing timer setting; else set up to interrupt at the
- * deadlock timeout time.
+ * Here, we want to interrupt at the closer of the two timeout times. If
+ * fin_time >= statement_fin_time then we need not touch the existing
+ * timer setting; else set up to interrupt at the deadlock timeout
+ * time.
*
* NOTE: in this case it is possible that this routine will be
* interrupted by the previously-set timer alarm. This is okay
- * because the signal handler will do only what it should do
- * according to the state variables. The deadlock checker may get
- * run earlier than normal, but that does no harm.
+ * because the signal handler will do only what it should do according
+ * to the state variables. The deadlock checker may get run earlier
+ * than normal, but that does no harm.
*/
deadlock_timeout_active = true;
if (fin_time.tv_sec > statement_fin_time.tv_sec ||
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.39 2005/10/11 20:41:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.40 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
s_lock(volatile slock_t *lock, const char *file, int line)
{
/*
- * We loop tightly for awhile, then delay using pg_usleep() and try
- * again. Preferably, "awhile" should be a small multiple of the
- * maximum time we expect a spinlock to be held. 100 iterations seems
- * about right as an initial guess. However, on a uniprocessor the
- * loop is a waste of cycles, while in a multi-CPU scenario it's usually
- * better to spin a bit longer than to call the kernel, so we try to
- * adapt the spin loop count depending on whether we seem to be in
- * a uniprocessor or multiprocessor.
+ * We loop tightly for awhile, then delay using pg_usleep() and try again.
+ * Preferably, "awhile" should be a small multiple of the maximum time we
+ * expect a spinlock to be held. 100 iterations seems about right as an
+ * initial guess. However, on a uniprocessor the loop is a waste of
+ * cycles, while in a multi-CPU scenario it's usually better to spin a bit
+ * longer than to call the kernel, so we try to adapt the spin loop count
+ * depending on whether we seem to be in a uniprocessor or multiprocessor.
*
- * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd
- * be wrong; there are platforms where that can result in a "stuck
- * spinlock" failure. This has been seen particularly on Alphas; it
- * seems that the first TAS after returning from kernel space will always
- * fail on that hardware.
+ * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd be
+ * wrong; there are platforms where that can result in a "stuck spinlock"
+ * failure. This has been seen particularly on Alphas; it seems that the
+ * first TAS after returning from kernel space will always fail on that
+ * hardware.
*
- * Once we do decide to block, we use randomly increasing pg_usleep()
- * delays. The first delay is 1 msec, then the delay randomly
- * increases to about one second, after which we reset to 1 msec and
- * start again. The idea here is that in the presence of heavy
- * contention we need to increase the delay, else the spinlock holder
- * may never get to run and release the lock. (Consider situation
- * where spinlock holder has been nice'd down in priority by the
- * scheduler --- it will not get scheduled until all would-be
- * acquirers are sleeping, so if we always use a 1-msec sleep, there
- * is a real possibility of starvation.) But we can't just clamp the
- * delay to an upper bound, else it would take a long time to make a
- * reasonable number of tries.
+ * Once we do decide to block, we use randomly increasing pg_usleep() delays.
+ * The first delay is 1 msec, then the delay randomly increases to about
+ * one second, after which we reset to 1 msec and start again. The idea
+ * here is that in the presence of heavy contention we need to increase
+ * the delay, else the spinlock holder may never get to run and release
+ * the lock. (Consider situation where spinlock holder has been nice'd
+ * down in priority by the scheduler --- it will not get scheduled until
+ * all would-be acquirers are sleeping, so if we always use a 1-msec
+ * sleep, there is a real possibility of starvation.) But we can't just
+ * clamp the delay to an upper bound, else it would take a long time to
+ * make a reasonable number of tries.
*
- * We time out and declare error after NUM_DELAYS delays (thus, exactly
- * that many tries). With the given settings, this will usually take
- * 2 or so minutes. It seems better to fix the total number of tries
- * (and thus the probability of unintended failure) than to fix the
- * total time spent.
+ * We time out and declare error after NUM_DELAYS delays (thus, exactly that
+ * many tries). With the given settings, this will usually take 2 or so
+ * minutes. It seems better to fix the total number of tries (and thus
+ * the probability of unintended failure) than to fix the total time
+ * spent.
*
- * The pg_usleep() delays are measured in milliseconds because 1 msec
- * is a common resolution limit at the OS level for newer platforms.
- * On older platforms the resolution limit is usually 10 msec, in
- * which case the total delay before timeout will be a bit more.
+ * The pg_usleep() delays are measured in milliseconds because 1 msec is a
+ * common resolution limit at the OS level for newer platforms. On older
+ * platforms the resolution limit is usually 10 msec, in which case the
+ * total delay before timeout will be a bit more.
*/
-#define MIN_SPINS_PER_DELAY 10
-#define MAX_SPINS_PER_DELAY 1000
+#define MIN_SPINS_PER_DELAY 10
+#define MAX_SPINS_PER_DELAY 1000
#define NUM_DELAYS 1000
#define MIN_DELAY_MSEC 1
#define MAX_DELAY_MSEC 1000
if (++delays > NUM_DELAYS)
s_lock_stuck(lock, file, line);
- if (cur_delay == 0) /* first time to delay? */
+ if (cur_delay == 0) /* first time to delay? */
cur_delay = MIN_DELAY_MSEC;
pg_usleep(cur_delay * 1000L);
/* increase delay by a random fraction between 1X and 2X */
cur_delay += (int) (cur_delay *
- (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5);
+ (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5);
/* wrap back to minimum delay when max is exceeded */
if (cur_delay > MAX_DELAY_MSEC)
cur_delay = MIN_DELAY_MSEC;
/*
* If we were able to acquire the lock without delaying, it's a good
- * indication we are in a multiprocessor. If we had to delay, it's
- * a sign (but not a sure thing) that we are in a uniprocessor.
- * Hence, we decrement spins_per_delay slowly when we had to delay,
- * and increase it rapidly when we didn't. It's expected that
- * spins_per_delay will converge to the minimum value on a uniprocessor
- * and to the maximum value on a multiprocessor.
+ * indication we are in a multiprocessor. If we had to delay, it's a sign
+ * (but not a sure thing) that we are in a uniprocessor. Hence, we
+ * decrement spins_per_delay slowly when we had to delay, and increase it
+ * rapidly when we didn't. It's expected that spins_per_delay will
+ * converge to the minimum value on a uniprocessor and to the maximum
+ * value on a multiprocessor.
*
- * Note: spins_per_delay is local within our current process.
- * We want to average these observations across multiple backends,
- * since it's relatively rare for this function to even get entered,
- * and so a single backend might not live long enough to converge on
- * a good value. That is handled by the two routines below.
+ * Note: spins_per_delay is local within our current process. We want to
+ * average these observations across multiple backends, since it's
+ * relatively rare for this function to even get entered, and so a single
+ * backend might not live long enough to converge on a good value. That
+ * is handled by the two routines below.
*/
if (cur_delay == 0)
{
update_spins_per_delay(int shared_spins_per_delay)
{
/*
- * We use an exponential moving average with a relatively slow
- * adaption rate, so that noise in any one backend's result won't
- * affect the shared value too much. As long as both inputs are
- * within the allowed range, the result must be too, so we need not
- * worry about clamping the result.
+ * We use an exponential moving average with a relatively slow adaption
+ * rate, so that noise in any one backend's result won't affect the shared
+ * value too much. As long as both inputs are within the allowed range,
+ * the result must be too, so we need not worry about clamping the result.
*
- * We deliberately truncate rather than rounding; this is so that
- * single adjustments inside a backend can affect the shared estimate
- * (see the asymmetric adjustment rules above).
+ * We deliberately truncate rather than rounding; this is so that single
+ * adjustments inside a backend can affect the shared estimate (see the
+ * asymmetric adjustment rules above).
*/
return (shared_spins_per_delay * 15 + spins_per_delay) / 16;
}
__asm__ __volatile__(
#if defined(__NetBSD__) && defined(__ELF__)
/* no underscore for label and % for registers */
- "\
+ "\
.global tas \n\
tas: \n\
movel %sp@(0x4),%a0 \n\
moveq #0,%d0 \n\
rts \n"
#else
- "\
+ "\
.global _tas \n\
_tas: \n\
movel sp@(0x4),a0 \n\
moveq #0,d0 \n\
rts \n"
#endif /* __NetBSD__ && __ELF__ */
-);
+ );
}
#endif /* __m68k__ && !__linux__ */
-
#else /* not __GNUC__ */
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/spin.c,v 1.16 2004/12/31 22:01:05 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/spin.c,v 1.17 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
return 0;
}
-
#else /* !HAVE_SPINLOCKS */
/*
SpinlockSemas(void)
{
/*
- * It would be cleaner to distribute this logic into the affected
- * modules, similar to the way shmem space estimation is handled.
+ * It would be cleaner to distribute this logic into the affected modules,
+ * similar to the way shmem space estimation is handled.
*
- * For now, though, we just need a few spinlocks (10 should be plenty)
- * plus one for each LWLock.
+ * For now, though, we just need a few spinlocks (10 should be plenty) plus
+ * one for each LWLock.
*/
return NumLWLocks() + 10;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.66 2005/09/22 16:45:59 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.67 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ereport(PANIC,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
- phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
+ phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
/*
* Select offsetNumber to place the new item at
/*
* Compute new lower and upper pointers for page, see if it'll fit.
*
- * Note: do arithmetic as signed ints, to avoid mistakes if, say,
- * alignedSize > pd_upper.
+ * Note: do arithmetic as signed ints, to avoid mistakes if, say, alignedSize
+ * > pd_upper.
*/
if (offsetNumber == limit || needshuffle)
lower = phdr->pd_lower + sizeof(ItemIdData);
return InvalidOffsetNumber;
/*
- * OK to insert the item. First, shuffle the existing pointers if
- * needed.
+ * OK to insert the item. First, shuffle the existing pointers if needed.
*/
itemId = PageGetItemId(phdr, offsetNumber);
Offset upper;
/*
- * It's worth the trouble to be more paranoid here than in most
- * places, because we are about to reshuffle data in (what is usually)
- * a shared disk buffer. If we aren't careful then corrupted
- * pointers, lengths, etc could cause us to clobber adjacent disk
- * buffers, spreading the data loss further. So, check everything.
+ * It's worth the trouble to be more paranoid here than in most places,
+ * because we are about to reshuffle data in (what is usually) a shared
+ * disk buffer. If we aren't careful then corrupted pointers, lengths,
+ * etc could cause us to clobber adjacent disk buffers, spreading the data
+ * loss further. So, check everything.
*/
if (pd_lower < SizeOfPageHeaderData ||
pd_lower > pd_upper ||
if (totallen > (Size) (pd_special - pd_lower))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted item lengths: total %u, available space %u",
- (unsigned int) totallen, pd_special - pd_lower)));
+ errmsg("corrupted item lengths: total %u, available space %u",
+ (unsigned int) totallen, pd_special - pd_lower)));
/* sort itemIdSortData array into decreasing itemoff order */
qsort((char *) itemidbase, nused, sizeof(itemIdSortData),
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
- phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
+ phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
nline = PageGetMaxOffsetNumber(page);
if ((int) offnum <= 0 || (int) offnum > nline)
offset, (unsigned int) size)));
/*
- * First, we want to get rid of the pd_linp entry for the index tuple.
- * We copy all subsequent linp's back one slot in the array. We don't
- * use PageGetItemId, because we are manipulating the _array_, not
- * individual linp's.
+ * First, we want to get rid of the pd_linp entry for the index tuple. We
+ * copy all subsequent linp's back one slot in the array. We don't use
+ * PageGetItemId, because we are manipulating the _array_, not individual
+ * linp's.
*/
nbytes = phdr->pd_lower -
((char *) &phdr->pd_linp[offidx + 1] - (char *) phdr);
/*
* Now move everything between the old upper bound (beginning of tuple
- * space) and the beginning of the deleted tuple forward, so that
- * space in the middle of the page is left free. If we've just
- * deleted the tuple at the beginning of tuple space, then there's no
- * need to do the copy (and bcopy on some architectures SEGV's if
- * asked to move zero bytes).
+ * space) and the beginning of the deleted tuple forward, so that space in
+ * the middle of the page is left free. If we've just deleted the tuple
+ * at the beginning of tuple space, then there's no need to do the copy
+ * (and bcopy on some architectures SEGV's if asked to move zero bytes).
*/
/* beginning of tuple space */
/*
* Finally, we need to adjust the linp entries that remain.
*
- * Anything that used to be before the deleted tuple's data was moved
- * forward by the size of the deleted tuple.
+ * Anything that used to be before the deleted tuple's data was moved forward
+ * by the size of the deleted tuple.
*/
if (!PageIsEmpty(page))
{
* PageIndexMultiDelete
*
* This routine handles the case of deleting multiple tuples from an
- * index page at once. It is considerably faster than a loop around
+ * index page at once. It is considerably faster than a loop around
* PageIndexTupleDelete ... however, the caller *must* supply the array
* of item numbers to be deleted in item number order!
*/
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
- pd_lower, pd_upper, pd_special)));
+ pd_lower, pd_upper, pd_special)));
/*
- * Scan the item pointer array and build a list of just the ones we
- * are going to keep. Notice we do not modify the page yet, since
- * we are still validity-checking.
+ * Scan the item pointer array and build a list of just the ones we are
+ * going to keep. Notice we do not modify the page yet, since we are
+ * still validity-checking.
*/
nline = PageGetMaxOffsetNumber(page);
itemidbase = (itemIdSort) palloc(sizeof(itemIdSortData) * nline);
}
else
{
- itemidptr->offsetindex = nused; /* where it will go */
+ itemidptr->offsetindex = nused; /* where it will go */
itemidptr->itemoff = offset;
itemidptr->olditemid = *lp;
itemidptr->alignedlen = MAXALIGN(size);
if (totallen > (Size) (pd_special - pd_lower))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted item lengths: total %u, available space %u",
- (unsigned int) totallen, pd_special - pd_lower)));
+ errmsg("corrupted item lengths: total %u, available space %u",
+ (unsigned int) totallen, pd_special - pd_lower)));
/* sort itemIdSortData array into decreasing itemoff order */
qsort((char *) itemidbase, nused, sizeof(itemIdSortData),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.117 2005/07/04 04:51:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.118 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Create pending-operations hashtable if we need it. Currently, we
- * need it if we are standalone (not under a postmaster) OR if we are
- * a bootstrap-mode subprocess of a postmaster (that is, a startup or
+ * Create pending-operations hashtable if we need it. Currently, we need
+ * it if we are standalone (not under a postmaster) OR if we are a
+ * bootstrap-mode subprocess of a postmaster (that is, a startup or
* bgwriter process).
*/
if (!IsUnderPostmaster || IsBootstrapProcessingMode())
pendingOpsTable = hash_create("Pending Ops Table",
100L,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
}
return true;
int save_errno = errno;
/*
- * During bootstrap, there are cases where a system relation will
- * be accessed (by internal backend processes) before the
- * bootstrap script nominally creates it. Therefore, allow the
- * file to exist already, even if isRedo is not set. (See also
- * mdopen)
+ * During bootstrap, there are cases where a system relation will be
+ * accessed (by internal backend processes) before the bootstrap
+ * script nominally creates it. Therefore, allow the file to exist
+ * already, even if isRedo is not set. (See also mdopen)
*/
if (isRedo || IsBootstrapProcessingMode())
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
#endif
/*
- * Note: because caller obtained blocknum by calling _mdnblocks, which
- * did a seek(SEEK_END), this seek is often redundant and will be
- * optimized away by fd.c. It's not redundant, however, if there is a
- * partial page at the end of the file. In that case we want to try
- * to overwrite the partial page with a full page. It's also not
- * redundant if bufmgr.c had to dump another buffer of the same file
- * to make room for the new page's buffer.
+ * Note: because caller obtained blocknum by calling _mdnblocks, which did
+ * a seek(SEEK_END), this seek is often redundant and will be optimized
+ * away by fd.c. It's not redundant, however, if there is a partial page
+ * at the end of the file. In that case we want to try to overwrite the
+ * partial page with a full page. It's also not redundant if bufmgr.c had
+ * to dump another buffer of the same file to make room for the new page's
+ * buffer.
*/
if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
return false;
if (fd < 0)
{
/*
- * During bootstrap, there are cases where a system relation will
- * be accessed (by internal backend processes) before the
- * bootstrap script nominally creates it. Therefore, accept
- * mdopen() as a substitute for mdcreate() in bootstrap mode only.
- * (See mdcreate)
+ * During bootstrap, there are cases where a system relation will be
+ * accessed (by internal backend processes) before the bootstrap
+ * script nominally creates it. Therefore, accept mdopen() as a
+ * substitute for mdcreate() in bootstrap mode only. (See mdcreate)
*/
if (IsBootstrapProcessingMode())
fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, 0600);
if ((nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ)) != BLCKSZ)
{
/*
- * If we are at or past EOF, return zeroes without complaining.
- * Also substitute zeroes if we found a partial block at EOF.
+ * If we are at or past EOF, return zeroes without complaining. Also
+ * substitute zeroes if we found a partial block at EOF.
*
* XXX this is really ugly, bad design. However the current
* implementation of hash indexes requires it, because hash index
BlockNumber segno = 0;
/*
- * Skip through any segments that aren't the last one, to avoid
- * redundant seeks on them. We have previously verified that these
- * segments are exactly RELSEG_SIZE long, and it's useless to recheck
- * that each time. (NOTE: this assumption could only be wrong if
- * another backend has truncated the relation. We rely on higher code
- * levels to handle that scenario by closing and re-opening the md
- * fd.)
+ * Skip through any segments that aren't the last one, to avoid redundant
+ * seeks on them. We have previously verified that these segments are
+ * exactly RELSEG_SIZE long, and it's useless to recheck that each time.
+ * (NOTE: this assumption could only be wrong if another backend has
+ * truncated the relation. We rely on higher code levels to handle that
+ * scenario by closing and re-opening the md fd.)
*/
while (v->mdfd_chain != NULL)
{
if (v->mdfd_chain == NULL)
{
/*
- * Because we pass O_CREAT, we will create the next segment
- * (with zero length) immediately, if the last segment is of
- * length REL_SEGSIZE. This is unnecessary but harmless, and
- * testing for the case would take more cycles than it seems
- * worth.
+ * Because we pass O_CREAT, we will create the next segment (with
+ * zero length) immediately, if the last segment is of length
+ * REL_SEGSIZE. This is unnecessary but harmless, and testing for
+ * the case would take more cycles than it seems worth.
*/
v->mdfd_chain = _mdfd_openseg(reln, segno, O_CREAT);
if (v->mdfd_chain == NULL)
if (priorblocks > nblocks)
{
/*
- * This segment is no longer wanted at all (and has already
- * been unlinked from the mdfd_chain). We truncate the file
- * before deleting it because if other backends are holding
- * the file open, the unlink will fail on some platforms.
- * Better a zero-size file gets left around than a big file...
+ * This segment is no longer wanted at all (and has already been
+ * unlinked from the mdfd_chain). We truncate the file before
+ * deleting it because if other backends are holding the file
+ * open, the unlink will fail on some platforms. Better a
+ * zero-size file gets left around than a big file...
*/
FileTruncate(v->mdfd_vfd, 0);
FileUnlink(v->mdfd_vfd);
else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
{
/*
- * This is the last segment we want to keep. Truncate the file
- * to the right length, and clear chain link that points to
- * any remaining segments (which we shall zap). NOTE: if
- * nblocks is exactly a multiple K of RELSEG_SIZE, we will
- * truncate the K+1st segment to 0 length but keep it. This is
- * mainly so that the right thing happens if nblocks==0.
+ * This is the last segment we want to keep. Truncate the file to
+ * the right length, and clear chain link that points to any
+ * remaining segments (which we shall zap). NOTE: if nblocks is
+ * exactly a multiple K of RELSEG_SIZE, we will truncate the K+1st
+ * segment to 0 length but keep it. This is mainly so that the
+ * right thing happens if nblocks==0.
*/
BlockNumber lastsegblocks = nblocks - priorblocks;
else
{
/*
- * We still need this segment and 0 or more blocks beyond it,
- * so nothing to do here.
+ * We still need this segment and 0 or more blocks beyond it, so
+ * nothing to do here.
*/
v = v->mdfd_chain;
}
/*
* If we are in the bgwriter, the sync had better include all fsync
- * requests that were queued by backends before the checkpoint REDO
- * point was determined. We go that a little better by accepting all
- * requests queued up to the point where we start fsync'ing.
+ * requests that were queued by backends before the checkpoint REDO point
+ * was determined. We go that a little better by accepting all requests
+ * queued up to the point where we start fsync'ing.
*/
AbsorbFsyncRequests();
while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * If fsync is off then we don't have to bother opening the file
- * at all. (We delay checking until this point so that changing
- * fsync on the fly behaves sensibly.)
+ * If fsync is off then we don't have to bother opening the file at
+ * all. (We delay checking until this point so that changing fsync on
+ * the fly behaves sensibly.)
*/
if (enableFsync)
{
MdfdVec *seg;
/*
- * Find or create an smgr hash entry for this relation. This
- * may seem a bit unclean -- md calling smgr? But it's really
- * the best solution. It ensures that the open file reference
- * isn't permanently leaked if we get an error here. (You may
- * say "but an unreferenced SMgrRelation is still a leak!" Not
- * really, because the only case in which a checkpoint is done
- * by a process that isn't about to shut down is in the
- * bgwriter, and it will periodically do smgrcloseall(). This
- * fact justifies our not closing the reln in the success path
- * either, which is a good thing since in non-bgwriter cases
- * we couldn't safely do that.) Furthermore, in many cases
- * the relation will have been dirtied through this same smgr
- * relation, and so we can save a file open/close cycle.
+ * Find or create an smgr hash entry for this relation. This may
+ * seem a bit unclean -- md calling smgr? But it's really the
+ * best solution. It ensures that the open file reference isn't
+ * permanently leaked if we get an error here. (You may say "but
+ * an unreferenced SMgrRelation is still a leak!" Not really,
+ * because the only case in which a checkpoint is done by a
+ * process that isn't about to shut down is in the bgwriter, and
+ * it will periodically do smgrcloseall(). This fact justifies
+ * our not closing the reln in the success path either, which is a
+ * good thing since in non-bgwriter cases we couldn't safely do
+ * that.) Furthermore, in many cases the relation will have been
+ * dirtied through this same smgr relation, and so we can save a
+ * file open/close cycle.
*/
reln = smgropen(entry->rnode);
/*
- * It is possible that the relation has been dropped or
- * truncated since the fsync request was entered. Therefore,
- * we have to allow file-not-found errors. This applies both
- * during _mdfd_getseg() and during FileSync, since fd.c might
- * have closed the file behind our back.
+ * It is possible that the relation has been dropped or truncated
+ * since the fsync request was entered. Therefore, we have to
+ * allow file-not-found errors. This applies both during
+ * _mdfd_getseg() and during FileSync, since fd.c might have
+ * closed the file behind our back.
*/
seg = _mdfd_getseg(reln,
entry->segno * ((BlockNumber) RELSEG_SIZE),
{
/*
* We will create the next segment only if the target block is
- * within it. This prevents Sorcerer's Apprentice syndrome if
- * a bug at higher levels causes us to be handed a
- * ridiculously large blkno --- otherwise we could create many
- * thousands of empty segment files before reaching the
- * "target" block. We should never need to create more than
- * one new segment per call, so this restriction seems
- * reasonable.
+ * within it. This prevents Sorcerer's Apprentice syndrome if a
+ * bug at higher levels causes us to be handed a ridiculously
+ * large blkno --- otherwise we could create many thousands of
+ * empty segment files before reaching the "target" block. We
+ * should never need to create more than one new segment per call,
+ * so this restriction seems reasonable.
*
* BUT: when doing WAL recovery, disable this logic and create
- * segments unconditionally. In this case it seems better
- * to assume the given blkno is good (it presumably came from
- * a CRC-checked WAL record); furthermore this lets us cope
- * in the case where we are replaying WAL data that has a write
- * into a high-numbered segment of a relation that was later
- * deleted. We want to go ahead and create the segments so
- * we can finish out the replay.
+ * segments unconditionally. In this case it seems better to
+ * assume the given blkno is good (it presumably came from a
+ * CRC-checked WAL record); furthermore this lets us cope in the
+ * case where we are replaying WAL data that has a write into a
+ * high-numbered segment of a relation that was later deleted. We
+ * want to go ahead and create the segments so we can finish out
+ * the replay.
*/
v->mdfd_chain = _mdfd_openseg(reln,
nextsegno,
- (segstogo == 1 || InRecovery) ? O_CREAT : 0);
+ (segstogo == 1 || InRecovery) ? O_CREAT : 0);
if (v->mdfd_chain == NULL)
{
if (allowNotFound && errno == ENOENT)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.92 2005/08/08 03:12:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.93 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!(*(smgrsw[i].smgr_init)) ())
elog(FATAL, "smgr initialization failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
if (!(*(smgrsw[i].smgr_shutdown)) ())
elog(FATAL, "smgr shutdown failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
}
smgrsetowner(SMgrRelation *owner, SMgrRelation reln)
{
/*
- * First, unhook any old owner. (Normally there shouldn't be any, but
- * it seems possible that this can happen during swap_relation_files()
+ * First, unhook any old owner. (Normally there shouldn't be any, but it
+ * seems possible that this can happen during swap_relation_files()
* depending on the order of processing. It's ok to close the old
* relcache entry early in that case.)
*/
elog(ERROR, "SMgrRelation hashtable corrupted");
/*
- * Unhook the owner pointer, if any. We do this last since in the
- * remote possibility of failure above, the SMgrRelation object will still
- * exist.
+ * Unhook the owner pointer, if any. We do this last since in the remote
+ * possibility of failure above, the SMgrRelation object will still exist.
*/
if (owner)
*owner = NULL;
* We may be using the target table space for the first time in this
* database, so create a per-database subdirectory if needed.
*
- * XXX this is a fairly ugly violation of module layering, but this seems
- * to be the best place to put the check. Maybe
- * TablespaceCreateDbspace should be here and not in
- * commands/tablespace.c? But that would imply importing a lot of
- * stuff that smgr.c oughtn't know, either.
+ * XXX this is a fairly ugly violation of module layering, but this seems to
+ * be the best place to put the check. Maybe TablespaceCreateDbspace
+ * should be here and not in commands/tablespace.c? But that would imply
+ * importing a lot of stuff that smgr.c oughtn't know, either.
*/
TablespaceCreateDbspace(reln->smgr_rnode.spcNode,
reln->smgr_rnode.dbNode,
/*
* Make a non-transactional XLOG entry showing the file creation. It's
- * non-transactional because we should replay it whether the
- * transaction commits or not; if not, the file will be dropped at
- * abort time.
+ * non-transactional because we should replay it whether the transaction
+ * commits or not; if not, the file will be dropped at abort time.
*/
xlrec.rnode = reln->smgr_rnode;
pendingDeletes = pending;
/*
- * NOTE: if the relation was created in this transaction, it will now
- * be present in the pending-delete list twice, once with atCommit
- * true and once with atCommit false. Hence, it will be physically
- * deleted at end of xact in either case (and the other entry will be
- * ignored by smgrDoPendingDeletes, so no error will occur). We could
- * instead remove the existing list entry and delete the physical file
- * immediately, but for now I'll keep the logic simple.
+ * NOTE: if the relation was created in this transaction, it will now be
+ * present in the pending-delete list twice, once with atCommit true and
+ * once with atCommit false. Hence, it will be physically deleted at end
+ * of xact in either case (and the other entry will be ignored by
+ * smgrDoPendingDeletes, so no error will occur). We could instead remove
+ * the existing list entry and delete the physical file immediately, but
+ * for now I'll keep the logic simple.
*/
/* Now close the file and throw away the hashtable entry */
DropRelFileNodeBuffers(rnode, isTemp, 0);
/*
- * Tell the free space map to forget this relation. It won't be
- * accessed any more anyway, but we may as well recycle the map space
- * quickly.
+ * Tell the free space map to forget this relation. It won't be accessed
+ * any more anyway, but we may as well recycle the map space quickly.
*/
FreeSpaceMapForgetRel(&rnode);
/*
* And delete the physical files.
*
- * Note: we treat deletion failure as a WARNING, not an error, because
- * we've already decided to commit or abort the current xact.
+ * Note: we treat deletion failure as a WARNING, not an error, because we've
+ * already decided to commit or abort the current xact.
*/
if (!(*(smgrsw[which].smgr_unlink)) (rnode, isRedo))
ereport(WARNING,
if (!(*(smgrsw[reln->smgr_which].smgr_read)) (reln, blocknum, buffer))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read block %u of relation %u/%u/%u: %m",
- blocknum,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not read block %u of relation %u/%u/%u: %m",
+ blocknum,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
}
/*
isTemp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write block %u of relation %u/%u/%u: %m",
- blocknum,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not write block %u of relation %u/%u/%u: %m",
+ blocknum,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
}
/*
BlockNumber newblks;
/*
- * Get rid of any buffers for the about-to-be-deleted blocks.
- * bufmgr will just drop them without bothering to write the contents.
+ * Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
+ * just drop them without bothering to write the contents.
*/
DropRelFileNodeBuffers(reln->smgr_rnode, isTemp, nblocks);
/*
- * Tell the free space map to forget anything it may have stored for
- * the about-to-be-deleted blocks. We want to be sure it won't return
- * bogus block numbers later on.
+ * Tell the free space map to forget anything it may have stored for the
+ * about-to-be-deleted blocks. We want to be sure it won't return bogus
+ * block numbers later on.
*/
FreeSpaceMapTruncateRel(&reln->smgr_rnode, nblocks);
if (newblks == InvalidBlockNumber)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode,
- nblocks)));
+ errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode,
+ nblocks)));
if (!isTemp)
{
/*
- * Make a non-transactional XLOG entry showing the file
- * truncation. It's non-transactional because we should replay it
- * whether the transaction commits or not; the underlying file
- * change is certainly not reversible.
+ * Make a non-transactional XLOG entry showing the file truncation.
+ * It's non-transactional because we should replay it whether the
+ * transaction commits or not; the underlying file change is certainly
+ * not reversible.
*/
XLogRecPtr lsn;
XLogRecData rdata;
if (!(*(smgrsw[i].smgr_commit)) ())
elog(ERROR, "transaction commit failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
}
if (!(*(smgrsw[i].smgr_abort)) ())
elog(ERROR, "transaction abort failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
}
if (!(*(smgrsw[i].smgr_sync)) ())
elog(ERROR, "storage sync failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
}
/*
* First, force bufmgr to drop any buffers it has for the to-be-
- * truncated blocks. We must do this, else subsequent
- * XLogReadBuffer operations will not re-extend the file properly.
+ * truncated blocks. We must do this, else subsequent XLogReadBuffer
+ * operations will not re-extend the file properly.
*/
DropRelFileNodeBuffers(xlrec->rnode, false, xlrec->blkno);
/*
- * Tell the free space map to forget anything it may have stored
- * for the about-to-be-deleted blocks. We want to be sure it
- * won't return bogus block numbers later on.
+ * Tell the free space map to forget anything it may have stored for
+ * the about-to-be-deleted blocks. We want to be sure it won't return
+ * bogus block numbers later on.
*/
FreeSpaceMapTruncateRel(&reln->smgr_rnode, xlrec->blkno);
/* Do the truncation */
newblks = (*(smgrsw[reln->smgr_which].smgr_truncate)) (reln,
- xlrec->blkno,
+ xlrec->blkno,
false);
if (newblks == InvalidBlockNumber)
ereport(WARNING,
(errcode_for_file_access(),
- errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode,
- xlrec->blkno)));
+ errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode,
+ xlrec->blkno)));
}
else
elog(PANIC, "smgr_redo: unknown op code %u", info);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/dest.c,v 1.65 2005/03/16 21:38:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/dest.c,v 1.66 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
case RemoteExecute:
/*
- * tell the fe that we saw an empty query string. In
- * protocols before 3.0 this has a useless empty-string
- * message body.
+ * tell the fe that we saw an empty query string. In protocols
+ * before 3.0 this has a useless empty-string message body.
*/
if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3)
pq_putemptymessage('I');
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.82 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.83 2005/10/15 02:49:26 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
/* FATAL here since no hope of regaining message sync */
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
}
/* and arg contents */
if (argsize > 0)
/*
* Since the validity of this structure is determined by whether the
* funcid is OK, we clear the funcid here. It must not be set to the
- * correct value until we are about to return with a good struct
- * fp_info, since we can be interrupted (i.e., with an ereport(ERROR,
- * ...)) at any time. [No longer really an issue since we don't save
- * the struct fp_info across transactions anymore, but keep it
- * anyway.]
+ * correct value until we are about to return with a good struct fp_info,
+ * since we can be interrupted (i.e., with an ereport(ERROR, ...)) at any
+ * time. [No longer really an issue since we don't save the struct
+ * fp_info across transactions anymore, but keep it anyway.]
*/
MemSet(fip, 0, sizeof(struct fp_info));
fip->funcid = InvalidOid;
/*
* Now that we've eaten the input message, check to see if we actually
- * want to do the function call or not. It's now safe to ereport();
- * we won't lose sync with the frontend.
+ * want to do the function call or not. It's now safe to ereport(); we
+ * won't lose sync with the frontend.
*/
if (IsAbortedTransactionBlockState())
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
/*
* Begin parsing the buffer contents.
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
if (abuf.cursor != abuf.len)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("incorrect binary data format in function argument %d",
- i + 1)));
+ errmsg("incorrect binary data format in function argument %d",
+ i + 1)));
}
else
ereport(ERROR,
* Copy supplied arguments into arg vector. In protocol 2.0 these are
* always assumed to be supplied in binary format.
*
- * Note: although the original protocol 2.0 code did not have any way for
- * the frontend to specify a NULL argument, we now choose to interpret
- * length == -1 as meaning a NULL.
+ * Note: although the original protocol 2.0 code did not have any way for the
+ * frontend to specify a NULL argument, we now choose to interpret length
+ * == -1 as meaning a NULL.
*/
for (i = 0; i < nargs; ++i)
{
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
if (abuf.cursor != abuf.len)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("incorrect binary data format in function argument %d",
- i + 1)));
+ errmsg("incorrect binary data format in function argument %d",
+ i + 1)));
}
/* Desired result format is always binary in protocol 2.0 */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.465 2005/10/13 22:57:27 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.466 2005/10/15 02:49:27 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
* global variables
* ----------------
*/
-const char *debug_query_string; /* for pgmonitor and
- * log_min_error_statement */
+const char *debug_query_string; /* for pgmonitor and log_min_error_statement */
/* Note: whereToSendOutput is initialized for the bootstrap/standalone case */
CommandDest whereToSendOutput = Debug;
/* stack base pointer (initialized by PostgresMain) */
/* Do not make static so PL/Java can modifiy it */
-char *stack_base_ptr = NULL;
+char *stack_base_ptr = NULL;
/*
* tcop/tcopdebug.h
*/
#ifndef TCOP_DONTUSENEWLINE
-static int UseNewLine = 1; /* Use newlines query delimiters (the
- * default) */
-
+static int UseNewLine = 1; /* Use newlines query delimiters (the default) */
#else
static int UseNewLine = 0; /* Use EOF as query delimiters */
#endif /* TCOP_DONTUSENEWLINE */
if (UseNewLine)
{
/*
- * if we are using \n as a delimiter, then read characters
- * until the \n.
+ * if we are using \n as a delimiter, then read characters until
+ * the \n.
*/
while ((c = getc(stdin)) != EOF)
{
}
/*
- * Validate message type code before trying to read body; if we have
- * lost sync, better to say "command unknown" than to run out of
- * memory because we used garbage as a length word.
+ * Validate message type code before trying to read body; if we have lost
+ * sync, better to say "command unknown" than to run out of memory because
+ * we used garbage as a length word.
*
- * This also gives us a place to set the doing_extended_query_message
- * flag as soon as possible.
+ * This also gives us a place to set the doing_extended_query_message flag as
+ * soon as possible.
*/
switch (qtype)
{
{
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
return EOF;
}
}
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid frontend message type %d", qtype)));
+ errmsg("invalid frontend message type %d", qtype)));
break;
case 'S': /* sync */
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid frontend message type %d", qtype)));
+ errmsg("invalid frontend message type %d", qtype)));
break;
case 'd': /* copy data */
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid frontend message type %d", qtype)));
+ errmsg("invalid frontend message type %d", qtype)));
break;
default:
/*
- * Otherwise we got garbage from the frontend. We treat this
- * as fatal because we have probably lost message boundary
- * sync, and there's no good way to recover.
+ * Otherwise we got garbage from the frontend. We treat this as
+ * fatal because we have probably lost message boundary sync, and
+ * there's no good way to recover.
*/
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
}
/*
- * In protocol version 3, all frontend messages have a length word
- * next after the type code; we can read the message contents
- * independently of the type.
+ * In protocol version 3, all frontend messages have a length word next
+ * after the type code; we can read the message contents independently of
+ * the type.
*/
if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3)
{
static bool
log_after_parse(List *raw_parsetree_list, const char *query_string,
- char **prepare_string)
+ char **prepare_string)
{
ListCell *parsetree_item;
bool log_this_statement = (log_statement == LOGSTMT_ALL);
*prepare_string = NULL;
- /* Check if we need to log the statement, and get prepare_string. */
+ /* Check if we need to log the statement, and get prepare_string. */
foreach(parsetree_item, raw_parsetree_list)
{
Node *parsetree = (Node *) lfirst(parsetree_item);
if (IsA(parsetree, SelectStmt) &&
((SelectStmt *) parsetree)->into == NULL)
- continue; /* optimization for frequent command */
+ continue; /* optimization for frequent command */
if (log_statement == LOGSTMT_MOD &&
(IsA(parsetree, InsertStmt) ||
IsA(parsetree, DeleteStmt) ||
IsA(parsetree, TruncateStmt) ||
(IsA(parsetree, CopyStmt) &&
- ((CopyStmt *) parsetree)->is_from))) /* COPY FROM */
+ ((CopyStmt *) parsetree)->is_from))) /* COPY FROM */
log_this_statement = true;
commandTag = CreateCommandTag(parsetree);
if ((log_statement == LOGSTMT_MOD ||
log_statement == LOGSTMT_DDL) &&
(strncmp(commandTag, "CREATE ", strlen("CREATE ")) == 0 ||
- IsA(parsetree, SelectStmt) || /* SELECT INTO, CREATE AS */
+ IsA(parsetree, SelectStmt) || /* SELECT INTO, CREATE AS */
strncmp(commandTag, "ALTER ", strlen("ALTER ")) == 0 ||
strncmp(commandTag, "DROP ", strlen("DROP ")) == 0 ||
- IsA(parsetree, GrantStmt) || /* GRANT or REVOKE */
+ IsA(parsetree, GrantStmt) || /* GRANT or REVOKE */
IsA(parsetree, CommentStmt)))
log_this_statement = true;
/*
- * For the first EXECUTE we find, record the client statement
- * used by the PREPARE.
+ * For the first EXECUTE we find, record the client statement used by
+ * the PREPARE.
*/
if (IsA(parsetree, ExecuteStmt))
{
entry->query_string)
{
*prepare_string = palloc(strlen(entry->query_string) +
- strlen(" [client PREPARE: %s]") - 1);
+ strlen(" [client PREPARE: %s]") - 1);
sprintf(*prepare_string, " [client PREPARE: %s]",
- entry->query_string);
+ entry->query_string);
}
}
}
-
+
if (log_this_statement)
{
ereport(LOG,
ResetUsage();
/*
- * rewritten queries are collected in new_list. Note there may be
- * more or fewer than in the original list.
+ * rewritten queries are collected in new_list. Note there may be more or
+ * fewer than in the original list.
*/
foreach(list_item, querytree_list)
{
#ifdef COPY_PARSE_PLAN_TREES
/*
- * Optional debugging check: pass querytree output through
- * copyObject()
+ * Optional debugging check: pass querytree output through copyObject()
*/
new_list = (List *) copyObject(querytree_list);
/* This checks both copyObject() and the equal() routines... */
Plan *new_plan = (Plan *) copyObject(plan);
/*
- * equal() currently does not have routines to compare Plan nodes,
- * so don't try to test equality here. Perhaps fix someday?
+ * equal() currently does not have routines to compare Plan nodes, so
+ * don't try to test equality here. Perhaps fix someday?
*/
#ifdef NOT_USED
/* This checks both copyObject() and the equal() routines... */
MemoryContext oldcontext;
List *parsetree_list;
ListCell *parsetree_item;
- struct timeval start_t, stop_t;
+ struct timeval start_t,
+ stop_t;
bool save_log_duration = log_duration;
int save_log_min_duration_statement = log_min_duration_statement;
bool save_log_statement_stats = log_statement_stats;
- char *prepare_string = NULL;
+ char *prepare_string = NULL;
bool was_logged = false;
-
+
/*
* Report query to various monitoring facilities.
*/
/*
* We use save_log_* so "SET log_duration = true" and "SET
- * log_min_duration_statement = true" don't report incorrect time
- * because gettimeofday() wasn't called. Similarly,
- * log_statement_stats has to be captured once.
+ * log_min_duration_statement = true" don't report incorrect time because
+ * gettimeofday() wasn't called. Similarly, log_statement_stats has to be
+ * captured once.
*/
if (save_log_duration || save_log_min_duration_statement != -1)
gettimeofday(&start_t, NULL);
/*
* Start up a transaction command. All queries generated by the
* query_string will be in this same command block, *unless* we find a
- * BEGIN/COMMIT/ABORT statement; we have to force a new xact command
- * after one of those, else bad things will happen in xact.c. (Note
- * that this will normally change current memory context.)
+ * BEGIN/COMMIT/ABORT statement; we have to force a new xact command after
+ * one of those, else bad things will happen in xact.c. (Note that this
+ * will normally change current memory context.)
*/
start_xact_command();
/*
- * Zap any pre-existing unnamed statement. (While not strictly
- * necessary, it seems best to define simple-Query mode as if it used
- * the unnamed statement and portal; this ensures we recover any
- * storage used by prior unnamed operations.)
+ * Zap any pre-existing unnamed statement. (While not strictly necessary,
+ * it seems best to define simple-Query mode as if it used the unnamed
+ * statement and portal; this ensures we recover any storage used by prior
+ * unnamed operations.)
*/
unnamed_stmt_pstmt = NULL;
if (unnamed_stmt_context)
QueryContext = CurrentMemoryContext;
/*
- * Do basic parsing of the query or queries (this should be safe even
- * if we are in aborted transaction state!)
+ * Do basic parsing of the query or queries (this should be safe even if
+ * we are in aborted transaction state!)
*/
parsetree_list = pg_parse_query(query_string);
if (log_statement != LOGSTMT_NONE || save_log_min_duration_statement != -1)
was_logged = log_after_parse(parsetree_list, query_string,
- &prepare_string);
+ &prepare_string);
/*
* Switch back to transaction context to enter the loop.
int16 format;
/*
- * Get the command name for use in status display (it also becomes
- * the default completion tag, down inside PortalRun). Set
- * ps_status and do any special start-of-SQL-command processing
- * needed by the destination.
+ * Get the command name for use in status display (it also becomes the
+ * default completion tag, down inside PortalRun). Set ps_status and
+ * do any special start-of-SQL-command processing needed by the
+ * destination.
*/
commandTag = CreateCommandTag(parsetree);
/*
* If we are in an aborted transaction, reject all commands except
- * COMMIT/ABORT. It is important that this test occur before we
- * try to do parse analysis, rewrite, or planning, since all those
- * phases try to do database accesses, which may fail in abort
- * state. (It might be safe to allow some additional utility
- * commands in this state, but not many...)
+ * COMMIT/ABORT. It is important that this test occur before we try
+ * to do parse analysis, rewrite, or planning, since all those phases
+ * try to do database accesses, which may fail in abort state. (It
+ * might be safe to allow some additional utility commands in this
+ * state, but not many...)
*/
if (IsAbortedTransactionBlockState())
{
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
}
/* Make sure we are in a transaction command */
PortalStart(portal, NULL, InvalidSnapshot);
/*
- * Select the appropriate output format: text unless we are doing
- * a FETCH from a binary cursor. (Pretty grotty to have to do
- * this here --- but it avoids grottiness in other places. Ah,
- * the joys of backward compatibility...)
+ * Select the appropriate output format: text unless we are doing a
+ * FETCH from a binary cursor. (Pretty grotty to have to do this here
+ * --- but it avoids grottiness in other places. Ah, the joys of
+ * backward compatibility...)
*/
format = 0; /* TEXT is default */
if (IsA(parsetree, FetchStmt))
MemoryContextSwitchTo(oldcontext);
/*
- * Run the portal to completion, and then drop it (and the
- * receiver).
+ * Run the portal to completion, and then drop it (and the receiver).
*/
(void) PortalRun(portal,
FETCH_ALL,
if (IsA(parsetree, TransactionStmt))
{
/*
- * If this was a transaction control statement, commit it. We
- * will start a new xact command for the next command (if
- * any).
+ * If this was a transaction control statement, commit it. We will
+ * start a new xact command for the next command (if any).
*/
finish_xact_command();
}
else if (lnext(parsetree_item) == NULL)
{
/*
- * If this is the last parsetree of the query string, close
- * down transaction statement before reporting
- * command-complete. This is so that any end-of-transaction
- * errors are reported before the command-complete message is
- * issued, to avoid confusing clients who will expect either a
- * command-complete message or an error, not one and then the
- * other. But for compatibility with historical Postgres
- * behavior, we do not force a transaction boundary between
- * queries appearing in a single query string.
+ * If this is the last parsetree of the query string, close down
+ * transaction statement before reporting command-complete. This
+ * is so that any end-of-transaction errors are reported before
+ * the command-complete message is issued, to avoid confusing
+ * clients who will expect either a command-complete message or an
+ * error, not one and then the other. But for compatibility with
+ * historical Postgres behavior, we do not force a transaction
+ * boundary between queries appearing in a single query string.
*/
finish_xact_command();
}
}
/*
- * Tell client that we're done with this query. Note we emit
- * exactly one EndCommand report for each raw parsetree, thus one
- * for each SQL command the client sent, regardless of rewriting.
- * (But a command aborted by error will not send an EndCommand
- * report at all.)
+ * Tell client that we're done with this query. Note we emit exactly
+ * one EndCommand report for each raw parsetree, thus one for each SQL
+ * command the client sent, regardless of rewriting. (But a command
+ * aborted by error will not send an EndCommand report at all.)
*/
EndCommand(completionTag, dest);
} /* end loop over parsetrees */
QueryContext = NULL;
/*
- * Combine processing here as we need to calculate the query duration
- * in both instances.
+ * Combine processing here as we need to calculate the query duration in
+ * both instances.
*/
if (save_log_duration || save_log_min_duration_statement != -1)
{
stop_t.tv_usec += 1000000;
}
usecs = (long) (stop_t.tv_sec - start_t.tv_sec) * 1000000 +
- (long) (stop_t.tv_usec - start_t.tv_usec);
+ (long) (stop_t.tv_usec - start_t.tv_usec);
/* Only print duration if we previously printed the statement. */
if (was_logged && save_log_duration)
ereport(LOG,
(errmsg("duration: %ld.%03ld ms",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
/*
- * Output a duration_statement to the log if the query has
- * exceeded the min duration, or if we are to print all durations.
+ * Output a duration_statement to the log if the query has exceeded
+ * the min duration, or if we are to print all durations.
*/
if (save_log_min_duration_statement == 0 ||
(save_log_min_duration_statement > 0 &&
usecs >= save_log_min_duration_statement * 1000))
ereport(LOG,
(errmsg("duration: %ld.%03ld ms statement: %s%s",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
query_string,
prepare_string ? prepare_string : "")));
}
query_string)));
/*
- * Start up a transaction command so we can run parse analysis etc.
- * (Note that this will normally change current memory context.)
- * Nothing happens if we are already in one.
+ * Start up a transaction command so we can run parse analysis etc. (Note
+ * that this will normally change current memory context.) Nothing happens
+ * if we are already in one.
*/
start_xact_command();
* We have two strategies depending on whether the prepared statement is
* named or not. For a named prepared statement, we do parsing in
* MessageContext and copy the finished trees into the prepared
- * statement's private context; then the reset of MessageContext
- * releases temporary space used by parsing and planning. For an
- * unnamed prepared statement, we assume the statement isn't going to
- * hang around long, so getting rid of temp space quickly is probably
- * not worth the costs of copying parse/plan trees. So in this case,
- * we set up a special context for the unnamed statement, and do all
- * the parsing/planning therein.
+ * statement's private context; then the reset of MessageContext releases
+ * temporary space used by parsing and planning. For an unnamed prepared
+ * statement, we assume the statement isn't going to hang around long, so
+ * getting rid of temp space quickly is probably not worth the costs of
+ * copying parse/plan trees. So in this case, we set up a special context
+ * for the unnamed statement, and do all the parsing/planning therein.
*/
is_named = (stmt_name[0] != '\0');
if (is_named)
QueryContext = CurrentMemoryContext;
/*
- * Do basic parsing of the query or queries (this should be safe even
- * if we are in aborted transaction state!)
+ * Do basic parsing of the query or queries (this should be safe even if
+ * we are in aborted transaction state!)
*/
parsetree_list = pg_parse_query(query_string);
/*
- * We only allow a single user statement in a prepared statement. This
- * is mainly to keep the protocol simple --- otherwise we'd need to
- * worry about multiple result tupdescs and things like that.
+ * We only allow a single user statement in a prepared statement. This is
+ * mainly to keep the protocol simple --- otherwise we'd need to worry
+ * about multiple result tupdescs and things like that.
*/
if (list_length(parsetree_list) > 1)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot insert multiple commands into a prepared statement")));
+ errmsg("cannot insert multiple commands into a prepared statement")));
if (parsetree_list != NIL)
{
/*
* If we are in an aborted transaction, reject all commands except
- * COMMIT/ROLLBACK. It is important that this test occur before
- * we try to do parse analysis, rewrite, or planning, since all
- * those phases try to do database accesses, which may fail in
- * abort state. (It might be safe to allow some additional utility
- * commands in this state, but not many...)
+ * COMMIT/ROLLBACK. It is important that this test occur before we
+ * try to do parse analysis, rewrite, or planning, since all those
+ * phases try to do database accesses, which may fail in abort state.
+ * (It might be safe to allow some additional utility commands in this
+ * state, but not many...)
*/
if (IsAbortedTransactionBlockState())
{
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
}
/*
* OK to analyze, rewrite, and plan this query. Note that the
- * originally specified parameter set is not required to be
- * complete, so we have to use parse_analyze_varparams().
+ * originally specified parameter set is not required to be complete,
+ * so we have to use parse_analyze_varparams().
*/
if (log_parser_stats)
ResetUsage();
if (ptype == InvalidOid || ptype == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_DATATYPE),
- errmsg("could not determine data type of parameter $%d",
- i + 1)));
+ errmsg("could not determine data type of parameter $%d",
+ i + 1)));
param_list = lappend_oid(param_list, ptype);
}
querytree_list = pg_rewrite_queries(querytree_list);
/*
- * If this is the unnamed statement and it has parameters, defer
- * query planning until Bind. Otherwise do it now.
+ * If this is the unnamed statement and it has parameters, defer query
+ * planning until Bind. Otherwise do it now.
*/
if (!is_named && numParams > 0)
plantree_list = NIL;
QueryContext = NULL;
/*
- * We do NOT close the open transaction command here; that only
- * happens when the client sends Sync. Instead, do
- * CommandCounterIncrement just in case something happened during
- * parse/plan.
+ * We do NOT close the open transaction command here; that only happens
+ * when the client sends Sync. Instead, do CommandCounterIncrement just
+ * in case something happened during parse/plan.
*/
CommandCounterIncrement();
set_ps_display("BIND");
/*
- * Start up a transaction command so we can call functions etc. (Note
- * that this will normally change current memory context.) Nothing
- * happens if we are already in one.
+ * Start up a transaction command so we can call functions etc. (Note that
+ * this will normally change current memory context.) Nothing happens if
+ * we are already in one.
*/
start_xact_command();
if (numPFormats > 1 && numPFormats != numParams)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("bind message has %d parameter formats but %d parameters",
- numPFormats, numParams)));
+ errmsg("bind message has %d parameter formats but %d parameters",
+ numPFormats, numParams)));
/* Find prepared statement */
if (stmt_name[0] != '\0')
if (!pstmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
- errmsg("unnamed prepared statement does not exist")));
+ errmsg("unnamed prepared statement does not exist")));
}
if (numParams != list_length(pstmt->argtype_list))
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("bind message supplies %d parameters, but prepared statement \"%s\" requires %d",
- numParams, stmt_name, list_length(pstmt->argtype_list))));
+ numParams, stmt_name, list_length(pstmt->argtype_list))));
/*
- * Create the portal. Allow silent replacement of an existing portal
- * only if the unnamed portal is specified.
+ * Create the portal. Allow silent replacement of an existing portal only
+ * if the unnamed portal is specified.
*/
if (portal_name[0] == '\0')
portal = CreatePortal(portal_name, true, true);
/*
* Fetch parameters, if any, and store in the portal's memory context.
*
- * In an aborted transaction, we can't risk calling user-defined
- * functions, but we can't fail to Bind either, so bind all parameters
- * to null values.
+ * In an aborted transaction, we can't risk calling user-defined functions,
+ * but we can't fail to Bind either, so bind all parameters to null
+ * values.
*/
if (numParams > 0)
{
pformat = 0; /* default = text */
/*
- * Rather than copying data around, we just set up a
- * phony StringInfo pointing to the correct portion of
- * the message buffer. We assume we can scribble on
- * the message buffer so as to maintain the convention
- * that StringInfos have a trailing null. This is
- * grotty but is a big win when dealing with very
- * large parameter strings.
+ * Rather than copying data around, we just set up a phony
+ * StringInfo pointing to the correct portion of the
+ * message buffer. We assume we can scribble on the
+ * message buffer so as to maintain the convention that
+ * StringInfos have a trailing null. This is grotty but
+ * is a big win when dealing with very large parameter
+ * strings.
*/
pbuf.data = (char *) pvalue;
pbuf.maxlen = plength + 1;
getTypeInputInfo(ptype, &typinput, &typioparam);
/*
- * We have to do encoding conversion before
- * calling the typinput routine.
+ * We have to do encoding conversion before calling
+ * the typinput routine.
*/
pstring = pg_client_to_server(pbuf.data, plength);
params[i].value =
Oid typioparam;
/*
- * Call the parameter type's binary input
- * converter
+ * Call the parameter type's binary input converter
*/
getTypeBinaryInputInfo(ptype, &typreceive, &typioparam);
/* Trouble if it didn't eat the whole buffer */
if (pbuf.cursor != pbuf.len)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("incorrect binary data format in bind parameter %d",
- i + 1)));
+ (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
+ errmsg("incorrect binary data format in bind parameter %d",
+ i + 1)));
}
else
{
pq_getmsgend(input_message);
/*
- * If we didn't plan the query before, do it now. This allows the
- * planner to make use of the concrete parameter values we now have.
+ * If we didn't plan the query before, do it now. This allows the planner
+ * to make use of the concrete parameter values we now have.
*
* This happens only for unnamed statements, and so switching into the
* statement context for planning is correct (see notes in
bool is_trans_exit = false;
bool completed;
char completionTag[COMPLETION_TAG_BUFSIZE];
- struct timeval start_t, stop_t;
+ struct timeval start_t,
+ stop_t;
bool save_log_duration = log_duration;
int save_log_min_duration_statement = log_min_duration_statement;
bool save_log_statement_stats = log_statement_stats;
errmsg("portal \"%s\" does not exist", portal_name)));
/*
- * If we re-issue an Execute protocol request against an existing
- * portal, then we are only fetching more rows rather than
- * completely re-executing the query from the start. atStart is never
- * reset for a v3 portal, so we are safe to use this check.
+ * If we re-issue an Execute protocol request against an existing portal,
+ * then we are only fetching more rows rather than completely re-executing
+ * the query from the start. atStart is never reset for a v3 portal, so we
+ * are safe to use this check.
*/
if (!portal->atStart)
execute_is_fetch = true;
/*
* We use save_log_* so "SET log_duration = true" and "SET
- * log_min_duration_statement = true" don't report incorrect time
- * because gettimeofday() wasn't called. Similarly,
- * log_statement_stats has to be captured once.
+ * log_min_duration_statement = true" don't report incorrect time because
+ * gettimeofday() wasn't called. Similarly, log_statement_stats has to be
+ * captured once.
*/
if (save_log_duration || save_log_min_duration_statement != -1)
gettimeofday(&start_t, NULL);
}
/*
- * Create dest receiver in MessageContext (we don't want it in
- * transaction context, because that may get deleted if portal
- * contains VACUUM).
+ * Create dest receiver in MessageContext (we don't want it in transaction
+ * context, because that may get deleted if portal contains VACUUM).
*/
receiver = CreateDestReceiver(dest, portal);
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
}
/* Check for cancel signal before we start execution */
{
/*
* If this was a transaction control statement, commit it. We
- * will start a new xact command for the next command (if
- * any).
+ * will start a new xact command for the next command (if any).
*/
finish_xact_command();
}
}
/*
- * Combine processing here as we need to calculate the query duration
- * in both instances.
+ * Combine processing here as we need to calculate the query duration in
+ * both instances.
*/
if (save_log_duration || save_log_min_duration_statement != -1)
{
stop_t.tv_usec += 1000000;
}
usecs = (long) (stop_t.tv_sec - start_t.tv_sec) * 1000000 +
- (long) (stop_t.tv_usec - start_t.tv_usec);
+ (long) (stop_t.tv_usec - start_t.tv_usec);
/* Only print duration if we previously printed the statement. */
if (log_statement == LOGSTMT_ALL && save_log_duration)
ereport(LOG,
(errmsg("duration: %ld.%03ld ms",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
/*
- * Output a duration_statement to the log if the query has
- * exceeded the min duration, or if we are to print all durations.
+ * Output a duration_statement to the log if the query has exceeded
+ * the min duration, or if we are to print all durations.
*/
if (save_log_min_duration_statement == 0 ||
(save_log_min_duration_statement > 0 &&
usecs >= save_log_min_duration_statement * 1000))
ereport(LOG,
(errmsg("duration: %ld.%03ld ms statement: %sEXECUTE %s [PREPARE: %s]",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
(execute_is_fetch) ? "FETCH from " : "",
- (*portal_name != '\0') ? portal_name : "",
+ (*portal_name != '\0') ? portal_name : "",
portal->sourceText ? portal->sourceText : "")));
}
if (!pstmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
- errmsg("unnamed prepared statement does not exist")));
+ errmsg("unnamed prepared statement does not exist")));
}
if (whereToSendOutput != Remote)
enable_sig_alarm(StatementTimeout, true);
else
cancel_from_timeout = false;
-
+
xact_started = true;
}
}
PG_SETMASK(&BlockSig);
/*
- * Ideally this should be ereport(FATAL), but then we'd not get
- * control back...
+ * Ideally this should be ereport(FATAL), but then we'd not get control
+ * back...
*/
ereport(WARNING,
(errcode(ERRCODE_CRASH_SHUTDOWN),
errmsg("terminating connection because of crash of another server process"),
- errdetail("The postmaster has commanded this server process to roll back"
- " the current transaction and exit, because another"
- " server process exited abnormally and possibly corrupted"
- " shared memory."),
+ errdetail("The postmaster has commanded this server process to roll back"
+ " the current transaction and exit, because another"
+ " server process exited abnormally and possibly corrupted"
+ " shared memory."),
errhint("In a moment you should be able to reconnect to the"
" database and repeat your command.")));
/*
* DO NOT proc_exit() -- we're here because shared memory may be
- * corrupted, so we don't want to try to clean up our transaction.
- * Just nail the windows shut and get out of town.
+ * corrupted, so we don't want to try to clean up our transaction. Just
+ * nail the windows shut and get out of town.
*
- * Note we do exit(1) not exit(0). This is to force the postmaster into
- * a system reset cycle if some idiot DBA sends a manual SIGQUIT to a
- * random backend. This is necessary precisely because we don't clean
- * up our shared memory state.
+ * Note we do exit(1) not exit(0). This is to force the postmaster into a
+ * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
+ * backend. This is necessary precisely because we don't clean up our
+ * shared memory state.
*/
exit(1);
}
ProcDiePending = true;
/*
- * If it's safe to interrupt, and we're waiting for input or a
- * lock, service the interrupt immediately
+ * If it's safe to interrupt, and we're waiting for input or a lock,
+ * service the interrupt immediately
*/
if (ImmediateInterruptOK && InterruptHoldoffCount == 0 &&
CritSectionCount == 0)
QueryCancelPending = true;
/*
- * If it's safe to interrupt, and we're waiting for a lock,
- * service the interrupt immediately. No point in interrupting if
- * we're waiting for input, however.
+ * If it's safe to interrupt, and we're waiting for a lock, service
+ * the interrupt immediately. No point in interrupting if we're
+ * waiting for input, however.
*/
if (ImmediateInterruptOK && InterruptHoldoffCount == 0 &&
CritSectionCount == 0)
ereport(ERROR,
(errcode(ERRCODE_FLOATING_POINT_EXCEPTION),
errmsg("floating-point exception"),
- errdetail("An invalid floating-point operation was signaled. "
- "This probably means an out-of-range result or an "
- "invalid operation, such as division by zero.")));
+ errdetail("An invalid floating-point operation was signaled. "
+ "This probably means an out-of-range result or an "
+ "invalid operation, such as division by zero.")));
}
/* SIGHUP: set flag to re-read config file at next convenient time */
DisableCatchupInterrupt();
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
- errmsg("terminating connection due to administrator command")));
+ errmsg("terminating connection due to administrator command")));
}
if (QueryCancelPending)
{
/*
* Compute distance from PostgresMain's local variables to my own
*
- * Note: in theory stack_depth should be ptrdiff_t or some such, but
- * since the whole point of this code is to bound the value to
- * something much less than integer-sized, int should work fine.
+ * Note: in theory stack_depth should be ptrdiff_t or some such, but since
+ * the whole point of this code is to bound the value to something much
+ * less than integer-sized, int should work fine.
*/
stack_depth = (int) (stack_base_ptr - &stack_top_loc);
/*
- * Take abs value, since stacks grow up on some machines, down on
- * others
+ * Take abs value, since stacks grow up on some machines, down on others
*/
if (stack_depth < 0)
stack_depth = -stack_depth;
/*
* Trouble?
*
- * The test on stack_base_ptr prevents us from erroring out if called
- * during process setup or in a non-backend process. Logically it
- * should be done first, but putting it here avoids wasting cycles
- * during normal cases.
+ * The test on stack_base_ptr prevents us from erroring out if called during
+ * process setup or in a non-backend process. Logically it should be done
+ * first, but putting it here avoids wasting cycles during normal cases.
*/
if (stack_depth > max_stack_depth_bytes &&
stack_base_ptr != NULL)
char *userDoption = NULL;
bool secure;
int errs = 0;
- int debug_flag = -1; /* -1 means not given */
- List *guc_names = NIL; /* for SUSET options */
+ int debug_flag = -1; /* -1 means not given */
+ List *guc_names = NIL; /* for SUSET options */
List *guc_values = NIL;
GucContext ctx;
GucSource gucsource;
/*
* ignore system indexes
*
- * As of PG 7.4 this is safe to allow from the client, since
- * it only disables reading the system indexes, not
- * writing them. Worst case consequence is slowness.
+ * As of PG 7.4 this is safe to allow from the client, since it
+ * only disables reading the system indexes, not writing them.
+ * Worst case consequence is slowness.
*/
IgnoreSystemIndexes(true);
break;
{
dbname = strdup(optarg);
- secure = false; /* subsequent switches are NOT
- * secure */
+ secure = false; /* subsequent switches are NOT secure */
ctx = PGC_BACKEND;
gucsource = PGC_S_CLIENT;
}
/*
* s - report usage statistics (timings) after each query
*
- * Since log options are SUSET, we need to postpone unless
- * still in secure context
+ * Since log options are SUSET, we need to postpone unless still
+ * in secure context
*/
if (ctx == PGC_BACKEND)
PendingConfigOption("log_statement_stats", "true");
}
/*
- * If a SUSET option, must postpone evaluation, unless
- * we are still reading secure switches.
+ * If a SUSET option, must postpone evaluation, unless we
+ * are still reading secure switches.
*/
if (ctx == PGC_BACKEND && IsSuperuserConfigOption(name))
PendingConfigOption(name, value);
}
/*
- * Process any additional GUC variable settings passed in startup
- * packet. These are handled exactly like command-line variables.
+ * Process any additional GUC variable settings passed in startup packet.
+ * These are handled exactly like command-line variables.
*/
if (MyProcPort != NULL)
{
/*
* Set up signal handlers and masks.
*
- * Note that postmaster blocked all signals before forking child process,
- * so there is no race condition whereby we might receive a signal
- * before we have set up the handler.
+ * Note that postmaster blocked all signals before forking child process, so
+ * there is no race condition whereby we might receive a signal before we
+ * have set up the handler.
*
- * Also note: it's best not to use any signals that are SIG_IGNored in
- * the postmaster. If such a signal arrives before we are able to
- * change the handler to non-SIG_IGN, it'll get dropped. Instead,
- * make a dummy handler in the postmaster to reserve the signal. (Of
- * course, this isn't an issue for signals that are locally generated,
- * such as SIGALRM and SIGPIPE.)
+ * Also note: it's best not to use any signals that are SIG_IGNored in the
+ * postmaster. If such a signal arrives before we are able to change the
+ * handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy
+ * handler in the postmaster to reserve the signal. (Of course, this isn't
+ * an issue for signals that are locally generated, such as SIGALRM and
+ * SIGPIPE.)
*/
pqsignal(SIGHUP, SigHupHandler); /* set flag to read config file */
pqsignal(SIGINT, StatementCancelHandler); /* cancel current query */
/*
* Ignore failure to write to frontend. Note: if frontend closes
* connection, we will notice it and exit cleanly when control next
- * returns to outer loop. This seems safer than forcing exit in the
- * midst of output during who-knows-what operation...
+ * returns to outer loop. This seems safer than forcing exit in the midst
+ * of output during who-knows-what operation...
*/
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, CatchupInterruptHandler);
pqsignal(SIGFPE, FloatExceptionHandler);
/*
- * Reset some signals that are accepted by postmaster but not by
- * backend
+ * Reset some signals that are accepted by postmaster but not by backend
*/
- pqsignal(SIGCHLD, SIG_DFL); /* system() requires this on some
- * platforms */
+ pqsignal(SIGCHLD, SIG_DFL); /* system() requires this on some platforms */
pqinitmask();
{
ereport(FATAL,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("invalid command-line arguments for server process"),
- errhint("Try \"%s --help\" for more information.", argv[0])));
+ errmsg("invalid command-line arguments for server process"),
+ errhint("Try \"%s --help\" for more information.", argv[0])));
}
BaseInit();
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("%s: invalid command-line arguments",
argv[0]),
- errhint("Try \"%s --help\" for more information.", argv[0])));
+ errhint("Try \"%s --help\" for more information.", argv[0])));
}
else if (argc - optind == 1)
dbname = argv[optind];
}
/*
- * Validate we have been given a reasonable-looking DataDir (if
- * under postmaster, assume postmaster did this already).
+ * Validate we have been given a reasonable-looking DataDir (if under
+ * postmaster, assume postmaster did this already).
*/
Assert(DataDir);
ValidatePgVersion(DataDir);
on_shmem_exit(ShutdownXLOG, 0);
/*
- * Read any existing FSM cache file, and register to write one out
- * at exit.
+ * Read any existing FSM cache file, and register to write one out at
+ * exit.
*/
LoadFreeSpaceMap();
on_shmem_exit(DumpFreeSpaceMap, 0);
/*
- * We have to build the flat file for pg_database, but not for
- * the user and group tables, since we won't try to do authentication.
+ * We have to build the flat file for pg_database, but not for the
+ * user and group tables, since we won't try to do authentication.
*/
BuildFlatFiles(true);
}
/*
* General initialization.
*
- * NOTE: if you are tempted to add code in this vicinity, consider
- * putting it inside InitPostgres() instead. In particular, anything
- * that involves database access should be there, not here.
+ * NOTE: if you are tempted to add code in this vicinity, consider putting it
+ * inside InitPostgres() instead. In particular, anything that involves
+ * database access should be there, not here.
*/
ereport(DEBUG3,
(errmsg_internal("InitPostgres")));
BeginReportingGUCOptions();
/*
- * Also set up handler to log session end; we have to wait till now
- * to be sure Log_disconnections has its final value.
+ * Also set up handler to log session end; we have to wait till now to be
+ * sure Log_disconnections has its final value.
*/
if (IsUnderPostmaster && Log_disconnections)
on_proc_exit(log_disconnections, 0);
/*
* POSTGRES main processing loop begins here
*
- * If an exception is encountered, processing resumes here so we abort
- * the current transaction and start a new one.
+ * If an exception is encountered, processing resumes here so we abort the
+ * current transaction and start a new one.
*
- * You might wonder why this isn't coded as an infinite loop around a
- * PG_TRY construct. The reason is that this is the bottom of the
- * exception stack, and so with PG_TRY there would be no exception
- * handler in force at all during the CATCH part. By leaving the
- * outermost setjmp always active, we have at least some chance of
- * recovering from an error during error recovery. (If we get into an
- * infinite loop thereby, it will soon be stopped by overflow of
- * elog.c's internal state stack.)
+ * You might wonder why this isn't coded as an infinite loop around a PG_TRY
+ * construct. The reason is that this is the bottom of the exception
+ * stack, and so with PG_TRY there would be no exception handler in force
+ * at all during the CATCH part. By leaving the outermost setjmp always
+ * active, we have at least some chance of recovering from an error during
+ * error recovery. (If we get into an infinite loop thereby, it will soon
+ * be stopped by overflow of elog.c's internal state stack.)
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
* NOTE: if you are tempted to add more code in this if-block,
* consider the high probability that it should be in
* AbortTransaction() instead. The only stuff done directly here
- * should be stuff that is guaranteed to apply *only* for
- * outer-level error recovery, such as adjusting the FE/BE
- * protocol status.
+ * should be stuff that is guaranteed to apply *only* for outer-level
+ * error recovery, such as adjusting the FE/BE protocol status.
*/
/* Since not using PG_TRY, must reset error stack by hand */
HOLD_INTERRUPTS();
/*
- * Forget any pending QueryCancel request, since we're returning
- * to the idle loop anyway, and cancel the statement timer if
- * running.
+ * Forget any pending QueryCancel request, since we're returning to
+ * the idle loop anyway, and cancel the statement timer if running.
*/
QueryCancelPending = false;
disable_sig_alarm(true);
QueryCancelPending = false; /* again in case timeout occurred */
/*
- * Turn off these interrupts too. This is only needed here and
- * not in other exception-catching places since these interrupts
- * are only enabled while we wait for client input.
+ * Turn off these interrupts too. This is only needed here and not in
+ * other exception-catching places since these interrupts are only
+ * enabled while we wait for client input.
*/
DoingCommandRead = false;
DisableNotifyInterrupt();
EmitErrorReport();
/*
- * Make sure debug_query_string gets reset before we possibly
- * clobber the storage it points at.
+ * Make sure debug_query_string gets reset before we possibly clobber
+ * the storage it points at.
*/
debug_query_string = NULL;
AbortCurrentTransaction();
/*
- * Now return to normal top-level context and clear ErrorContext
- * for next time.
+ * Now return to normal top-level context and clear ErrorContext for
+ * next time.
*/
MemoryContextSwitchTo(TopMemoryContext);
FlushErrorState();
QueryContext = NULL;
/*
- * If we were handling an extended-query-protocol message,
- * initiate skip till next Sync. This also causes us not to issue
+ * If we were handling an extended-query-protocol message, initiate
+ * skip till next Sync. This also causes us not to issue
* ReadyForQuery (until we get Sync).
*/
if (doing_extended_query_message)
doing_extended_query_message = false;
/*
- * Release storage left over from prior query cycle, and create a
- * new query input buffer in the cleared MessageContext.
+ * Release storage left over from prior query cycle, and create a new
+ * query input buffer in the cleared MessageContext.
*/
MemoryContextSwitchTo(MessageContext);
MemoryContextResetAndDeleteChildren(MessageContext);
initStringInfo(&input_message);
/*
- * (1) If we've reached idle state, tell the frontend we're ready
- * for a new query.
+ * (1) If we've reached idle state, tell the frontend we're ready for
+ * a new query.
*
* Note: this includes fflush()'ing the last of the prior output.
*
* This is also a good time to send collected statistics to the
* collector, and to update the PS stats display. We avoid doing
- * those every time through the message loop because it'd slow
- * down processing of batched messages, and because we don't want
- * to report uncommitted updates (that confuses autovacuum).
+ * those every time through the message loop because it'd slow down
+ * processing of batched messages, and because we don't want to report
+ * uncommitted updates (that confuses autovacuum).
*/
if (send_rfq)
{
}
/*
- * (2) Allow asynchronous signals to be executed immediately
- * if they come in while we are waiting for client input.
- * (This must be conditional since we don't want, say, reads on
- * behalf of COPY FROM STDIN doing the same thing.)
+ * (2) Allow asynchronous signals to be executed immediately if they
+ * come in while we are waiting for client input. (This must be
+ * conditional since we don't want, say, reads on behalf of COPY FROM
+ * STDIN doing the same thing.)
*/
QueryCancelPending = false; /* forget any earlier CANCEL signal */
DoingCommandRead = true;
DoingCommandRead = false;
/*
- * (5) check for any other interesting events that happened while
- * we slept.
+ * (5) check for any other interesting events that happened while we
+ * slept.
*/
if (got_SIGHUP)
{
case 'B': /* bind */
/*
- * this message is complex enough that it seems best to
- * put the field extraction out-of-line
+ * this message is complex enough that it seems best to put
+ * the field extraction out-of-line
*/
exec_bind_message(&input_message);
break;
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid CLOSE message subtype %d",
- close_type)));
+ errmsg("invalid CLOSE message subtype %d",
+ close_type)));
break;
}
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid DESCRIBE message subtype %d",
- describe_type)));
+ errmsg("invalid DESCRIBE message subtype %d",
+ describe_type)));
break;
}
}
break;
/*
- * 'X' means that the frontend is closing down the socket.
- * EOF means unexpected loss of frontend connection.
- * Either way, perform normal shutdown.
+ * 'X' means that the frontend is closing down the socket. EOF
+ * means unexpected loss of frontend connection. Either way,
+ * perform normal shutdown.
*/
case 'X':
case EOF:
/*
- * Reset whereToSendOutput to prevent ereport from
- * attempting to send any more messages to client.
+ * Reset whereToSendOutput to prevent ereport from attempting
+ * to send any more messages to client.
*/
if (whereToSendOutput == Remote)
whereToSendOutput = None;
/*
* NOTE: if you are tempted to add more code here, DON'T!
* Whatever you had in mind to do should be set up as an
- * on_proc_exit or on_shmem_exit callback, instead.
- * Otherwise it will fail to be called during other
- * backend-shutdown scenarios.
+ * on_proc_exit or on_shmem_exit callback, instead. Otherwise
+ * it will fail to be called during other backend-shutdown
+ * scenarios.
*/
proc_exit(0);
/*
* Accept but ignore these messages, per protocol spec; we
- * probably got here because a COPY failed, and the
- * frontend is still sending data.
+ * probably got here because a COPY failed, and the frontend
+ * is still sending data.
*/
break;
/*
* the only stats we don't show here are for memory usage -- i can't
- * figure out how to interpret the relevant fields in the rusage
- * struct, and they change names across o/s platforms, anyway. if you
- * can figure out what the entries mean, you can somehow extract
- * resident set size, shared text size, and unshared data and stack
- * sizes.
+ * figure out how to interpret the relevant fields in the rusage struct,
+ * and they change names across o/s platforms, anyway. if you can figure
+ * out what the entries mean, you can somehow extract resident set size,
+ * shared text size, and unshared data and stack sizes.
*/
initStringInfo(&str);
appendStringInfo(&str, "! system usage stats:\n");
appendStringInfo(&str,
- "!\t%ld.%06ld elapsed %ld.%06ld user %ld.%06ld system sec\n",
+ "!\t%ld.%06ld elapsed %ld.%06ld user %ld.%06ld system sec\n",
(long) (elapse_t.tv_sec - Save_t.tv_sec),
(long) (elapse_t.tv_usec - Save_t.tv_usec),
(long) (r.ru_utime.tv_sec - Save_r.ru_utime.tv_sec),
- (long) (r.ru_utime.tv_usec - Save_r.ru_utime.tv_usec),
+ (long) (r.ru_utime.tv_usec - Save_r.ru_utime.tv_usec),
(long) (r.ru_stime.tv_sec - Save_r.ru_stime.tv_sec),
- (long) (r.ru_stime.tv_usec - Save_r.ru_stime.tv_usec));
+ (long) (r.ru_stime.tv_usec - Save_r.ru_stime.tv_usec));
appendStringInfo(&str,
"!\t[%ld.%06ld user %ld.%06ld sys total]\n",
(long) user.tv_sec,
r.ru_oublock - Save_r.ru_oublock,
r.ru_inblock, r.ru_oublock);
appendStringInfo(&str,
- "!\t%ld/%ld [%ld/%ld] page faults/reclaims, %ld [%ld] swaps\n",
+ "!\t%ld/%ld [%ld/%ld] page faults/reclaims, %ld [%ld] swaps\n",
r.ru_majflt - Save_r.ru_majflt,
r.ru_minflt - Save_r.ru_minflt,
r.ru_majflt, r.ru_minflt,
r.ru_nswap - Save_r.ru_nswap,
r.ru_nswap);
appendStringInfo(&str,
- "!\t%ld [%ld] signals rcvd, %ld/%ld [%ld/%ld] messages rcvd/sent\n",
+ "!\t%ld [%ld] signals rcvd, %ld/%ld [%ld/%ld] messages rcvd/sent\n",
r.ru_nsignals - Save_r.ru_nsignals,
r.ru_nsignals,
r.ru_msgrcv - Save_r.ru_msgrcv,
r.ru_msgsnd - Save_r.ru_msgsnd,
r.ru_msgrcv, r.ru_msgsnd);
appendStringInfo(&str,
- "!\t%ld/%ld [%ld/%ld] voluntary/involuntary context switches\n",
+ "!\t%ld/%ld [%ld/%ld] voluntary/involuntary context switches\n",
r.ru_nvcsw - Save_r.ru_nvcsw,
r.ru_nivcsw - Save_r.ru_nivcsw,
r.ru_nvcsw, r.ru_nivcsw);
static void
log_disconnections(int code, Datum arg)
{
- Port *port = MyProcPort;
- struct timeval end;
- int hours,
- minutes,
- seconds;
+ Port *port = MyProcPort;
+ struct timeval end;
+ int hours,
+ minutes,
+ seconds;
gettimeofday(&end, NULL);
if (end.tv_usec < port->session_start.tv_usec)
"user=%s database=%s host=%s%s%s",
hours, minutes, seconds, (int) (end.tv_usec / 10000),
port->user_name, port->database_name, port->remote_host,
- port->remote_port[0] ? " port=" : "", port->remote_port)));
+ port->remote_port[0] ? " port=" : "", port->remote_port)));
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.94 2005/06/22 17:45:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.95 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
qd->parsetree = parsetree; /* parse tree */
qd->plantree = plantree; /* plan */
qd->snapshot = snapshot; /* snapshot */
- qd->crosscheck_snapshot = crosscheck_snapshot; /* RI check snapshot */
+ qd->crosscheck_snapshot = crosscheck_snapshot; /* RI check snapshot */
qd->dest = dest; /* output dest */
qd->params = params; /* parameter values passed into query */
qd->doInstrument = doInstrument; /* instrumentation wanted? */
* SELECT INTO table (a/k/a CREATE AS ... SELECT).
*
* Override the normal communication destination; execMain.c
- * special-cases this case. (Perhaps would be cleaner to have
- * an additional destination type?)
+ * special-cases this case. (Perhaps would be cleaner to have an
+ * additional destination type?)
*/
dest = None_Receiver;
}
}
/*
- * Must always set snapshot for plannable queries. Note we assume
- * that caller will take care of restoring ActiveSnapshot on exit/error.
+ * Must always set snapshot for plannable queries. Note we assume that
+ * caller will take care of restoring ActiveSnapshot on exit/error.
*/
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
else
lastOid = InvalidOid;
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "INSERT %u %u", lastOid, queryDesc->estate->es_processed);
+ "INSERT %u %u", lastOid, queryDesc->estate->es_processed);
break;
case CMD_UPDATE:
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
return ((Query *) linitial(portal->parseTrees))->targetList;
if (portal->strategy == PORTAL_UTIL_SELECT)
{
- Node *utilityStmt;
+ Node *utilityStmt;
utilityStmt = ((Query *) linitial(portal->parseTrees))->utilityStmt;
switch (nodeTag(utilityStmt))
{
case T_FetchStmt:
- {
- FetchStmt *substmt = (FetchStmt *) utilityStmt;
- Portal subportal;
+ {
+ FetchStmt *substmt = (FetchStmt *) utilityStmt;
+ Portal subportal;
- Assert(!substmt->ismove);
- subportal = GetPortalByName(substmt->portalname);
- Assert(PortalIsValid(subportal));
- return FetchPortalTargetList(subportal);
- }
+ Assert(!substmt->ismove);
+ subportal = GetPortalByName(substmt->portalname);
+ Assert(PortalIsValid(subportal));
+ return FetchPortalTargetList(subportal);
+ }
case T_ExecuteStmt:
- {
- ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt;
- PreparedStatement *entry;
+ {
+ ExecuteStmt *substmt = (ExecuteStmt *) utilityStmt;
+ PreparedStatement *entry;
- Assert(!substmt->into);
- entry = FetchPreparedStatement(substmt->name, true);
- return FetchPreparedStatementTargetList(entry);
- }
+ Assert(!substmt->into);
+ entry = FetchPreparedStatement(substmt->name, true);
+ return FetchPreparedStatementTargetList(entry);
+ }
default:
break;
AssertState(portal->status == PORTAL_NEW); /* else extra PortalStart */
/*
- * Set up global portal context pointers. (Should we set
- * QueryContext?)
+ * Set up global portal context pointers. (Should we set QueryContext?)
*/
saveActivePortal = ActivePortal;
saveActiveSnapshot = ActiveSnapshot;
PG_TRY();
{
ActivePortal = portal;
- ActiveSnapshot = NULL; /* will be set later */
+ ActiveSnapshot = NULL; /* will be set later */
CurrentResourceOwner = portal->resowner;
PortalContext = PortalGetHeapMemory(portal);
case PORTAL_ONE_SELECT:
/*
- * Must set snapshot before starting executor. Be sure to
+ * Must set snapshot before starting executor. Be sure to
* copy it into the portal's context.
*/
if (snapshot)
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
/*
- * Create QueryDesc in portal's context; for the moment,
- * set the destination to None.
+ * Create QueryDesc in portal's context; for the moment, set
+ * the destination to None.
*/
queryDesc = CreateQueryDesc((Query *) linitial(portal->parseTrees),
- (Plan *) linitial(portal->planTrees),
+ (Plan *) linitial(portal->planTrees),
ActiveSnapshot,
InvalidSnapshot,
None_Receiver,
false);
/*
- * We do *not* call AfterTriggerBeginQuery() here. We
- * assume that a SELECT cannot queue any triggers. It
- * would be messy to support triggers since the execution
- * of the portal may be interleaved with other queries.
+ * We do *not* call AfterTriggerBeginQuery() here. We assume
+ * that a SELECT cannot queue any triggers. It would be messy
+ * to support triggers since the execution of the portal may
+ * be interleaved with other queries.
*/
/*
case PORTAL_UTIL_SELECT:
/*
- * We don't set snapshot here, because
- * PortalRunUtility will take care of it if needed.
+ * We don't set snapshot here, because PortalRunUtility will
+ * take care of it if needed.
*/
portal->tupDesc =
UtilityTupleDescriptor(((Query *) linitial(portal->parseTrees))->utilityStmt);
*
* We have to play a special game here to support utility commands like
* VACUUM and CLUSTER, which internally start and commit transactions.
- * When we are called to execute such a command, CurrentResourceOwner
- * will be pointing to the TopTransactionResourceOwner --- which will
- * be destroyed and replaced in the course of the internal commit and
- * restart. So we need to be prepared to restore it as pointing to
- * the exit-time TopTransactionResourceOwner. (Ain't that ugly? This
- * idea of internally starting whole new transactions is not good.)
- * CurrentMemoryContext has a similar problem, but the other pointers
- * we save here will be NULL or pointing to longer-lived objects.
+ * When we are called to execute such a command, CurrentResourceOwner will
+ * be pointing to the TopTransactionResourceOwner --- which will be
+ * destroyed and replaced in the course of the internal commit and
+ * restart. So we need to be prepared to restore it as pointing to the
+ * exit-time TopTransactionResourceOwner. (Ain't that ugly? This idea of
+ * internally starting whole new transactions is not good.)
+ * CurrentMemoryContext has a similar problem, but the other pointers we
+ * save here will be NULL or pointing to longer-lived objects.
*/
saveTopTransactionResourceOwner = TopTransactionResourceOwner;
saveTopTransactionContext = TopTransactionContext;
PG_TRY();
{
ActivePortal = portal;
- ActiveSnapshot = NULL; /* will be set later */
+ ActiveSnapshot = NULL; /* will be set later */
CurrentResourceOwner = portal->resowner;
PortalContext = PortalGetHeapMemory(portal);
QueryContext = portal->queryContext;
portal->status = PORTAL_READY;
/*
- * Since it's a forward fetch, say DONE iff atEnd is now
- * true.
+ * Since it's a forward fetch, say DONE iff atEnd is now true.
*/
result = portal->atEnd;
break;
portal->status = PORTAL_READY;
/*
- * Since it's a forward fetch, say DONE iff atEnd is now
- * true.
+ * Since it's a forward fetch, say DONE iff atEnd is now true.
*/
result = portal->atEnd;
break;
uint32 nprocessed;
/*
- * NB: queryDesc will be NULL if we are fetching from a held cursor or
- * a completed utility query; can't use it in that path.
+ * NB: queryDesc will be NULL if we are fetching from a held cursor or a
+ * completed utility query; can't use it in that path.
*/
queryDesc = PortalGetQueryDesc(portal);
queryDesc->dest = dest;
/*
- * Determine which direction to go in, and check to see if we're
- * already at the end of the available tuples in that direction. If
- * so, set the direction to NoMovement to avoid trying to fetch any
- * tuples. (This check exists because not all plan node types are
- * robust about being called again if they've already returned NULL
- * once.) Then call the executor (we must not skip this, because the
- * destination needs to see a setup and shutdown even if no tuples are
- * available). Finally, update the portal position state depending on
- * the number of tuples that were retrieved.
+ * Determine which direction to go in, and check to see if we're already
+ * at the end of the available tuples in that direction. If so, set the
+ * direction to NoMovement to avoid trying to fetch any tuples. (This
+ * check exists because not all plan node types are robust about being
+ * called again if they've already returned NULL once.) Then call the
+ * executor (we must not skip this, because the destination needs to see a
+ * setup and shutdown even if no tuples are available). Finally, update
+ * the portal position state depending on the number of tuples that were
+ * retrieved.
*/
if (forward)
{
ExecClearTuple(slot);
/*
- * check our tuple count.. if we've processed the proper
- * number then quit, else loop again and process more tuples.
- * Zero count means no limit.
+ * check our tuple count.. if we've processed the proper number
+ * then quit, else loop again and process more tuples. Zero count
+ * means no limit.
*/
current_tuple_count++;
if (count && count == current_tuple_count)
(errmsg_internal("ProcessUtility")));
/*
- * Set snapshot if utility stmt needs one. Most reliable way to do
- * this seems to be to enumerate those that do not need one; this is a
- * short list. Transaction control, LOCK, and SET must *not* set a
- * snapshot since they need to be executable at the start of a
- * serializable transaction without freezing a snapshot. By extension
- * we allow SHOW not to set a snapshot. The other stmts listed are
- * just efficiency hacks. Beware of listing anything that can modify
- * the database --- if, say, it has to update an index with
- * expressions that invoke user-defined functions, then it had better
- * have a snapshot.
+ * Set snapshot if utility stmt needs one. Most reliable way to do this
+ * seems to be to enumerate those that do not need one; this is a short
+ * list. Transaction control, LOCK, and SET must *not* set a snapshot
+ * since they need to be executable at the start of a serializable
+ * transaction without freezing a snapshot. By extension we allow SHOW
+ * not to set a snapshot. The other stmts listed are just efficiency
+ * hacks. Beware of listing anything that can modify the database --- if,
+ * say, it has to update an index with expressions that invoke
+ * user-defined functions, then it had better have a snapshot.
*
- * Note we assume that caller will take care of restoring ActiveSnapshot
- * on exit/error.
+ * Note we assume that caller will take care of restoring ActiveSnapshot on
+ * exit/error.
*/
if (!(IsA(utilityStmt, TransactionStmt) ||
IsA(utilityStmt, LockStmt) ||
/*
* If the destination is RemoteExecute, change to None. The reason is
- * that the client won't be expecting any tuples, and indeed has no
- * way to know what they are, since there is no provision for Describe
- * to send a RowDescription message when this portal execution
- * strategy is in effect. This presently will only affect SELECT
- * commands added to non-SELECT queries by rewrite rules: such
- * commands will be executed, but the results will be discarded unless
- * you use "simple Query" protocol.
+ * that the client won't be expecting any tuples, and indeed has no way to
+ * know what they are, since there is no provision for Describe to send a
+ * RowDescription message when this portal execution strategy is in
+ * effect. This presently will only affect SELECT commands added to
+ * non-SELECT queries by rewrite rules: such commands will be executed,
+ * but the results will be discarded unless you use "simple Query"
+ * protocol.
*/
if (dest->mydest == RemoteExecute)
dest = None_Receiver;
altdest = None_Receiver;
/*
- * Loop to handle the individual queries generated from a single
- * parsetree by analysis and rewrite.
+ * Loop to handle the individual queries generated from a single parsetree
+ * by analysis and rewrite.
*/
forboth(querylist_item, portal->parseTrees,
planlist_item, portal->planTrees)
}
/*
- * Increment command counter between queries, but not after the
- * last one.
+ * Increment command counter between queries, but not after the last
+ * one.
*/
if (lnext(planlist_item) != NULL)
CommandCounterIncrement();
}
/*
- * If a command completion tag was supplied, use it. Otherwise use
- * the portal's commandTag as the default completion tag.
+ * If a command completion tag was supplied, use it. Otherwise use the
+ * portal's commandTag as the default completion tag.
*
- * Exception: clients will expect INSERT/UPDATE/DELETE tags to have
- * counts, so fake something up if necessary. (This could happen if
- * the original query was replaced by a DO INSTEAD rule.)
+ * Exception: clients will expect INSERT/UPDATE/DELETE tags to have counts,
+ * so fake something up if necessary. (This could happen if the original
+ * query was replaced by a DO INSTEAD rule.)
*/
if (completionTag && completionTag[0] == '\0')
{
PG_TRY();
{
ActivePortal = portal;
- ActiveSnapshot = NULL; /* will be set later */
+ ActiveSnapshot = NULL; /* will be set later */
CurrentResourceOwner = portal->resowner;
PortalContext = PortalGetHeapMemory(portal);
QueryContext = portal->queryContext;
if (count > 0)
{
/*
- * Definition: Rewind to start, advance count-1 rows,
- * return next row (if any). In practice, if the goal is
- * less than halfway back to the start, it's better to
- * scan from where we are. In any case, we arrange to
- * fetch the target row going forwards.
+ * Definition: Rewind to start, advance count-1 rows, return
+ * next row (if any). In practice, if the goal is less than
+ * halfway back to the start, it's better to scan from where
+ * we are. In any case, we arrange to fetch the target row
+ * going forwards.
*/
if (portal->posOverflow || portal->portalPos == LONG_MAX ||
count - 1 <= portal->portalPos / 2)
{
/*
* Definition: Advance to end, back up abs(count)-1 rows,
- * return prior row (if any). We could optimize this if
- * we knew in advance where the end was, but typically we
- * won't. (Is it worth considering case where count > half
- * of size of query? We could rewind once we know the
- * size ...)
+ * return prior row (if any). We could optimize this if we
+ * knew in advance where the end was, but typically we won't.
+ * (Is it worth considering case where count > half of size of
+ * query? We could rewind once we know the size ...)
*/
PortalRunSelect(portal, true, FETCH_ALL, None_Receiver);
if (count < -1)
if (count > 0)
{
/*
- * Definition: advance count-1 rows, return next row (if
- * any).
+ * Definition: advance count-1 rows, return next row (if any).
*/
if (count > 1)
PortalRunSelect(portal, true, count - 1, None_Receiver);
else if (count < 0)
{
/*
- * Definition: back up abs(count)-1 rows, return prior row
- * (if any).
+ * Definition: back up abs(count)-1 rows, return prior row (if
+ * any).
*/
if (count < -1)
PortalRunSelect(portal, false, -count - 1, None_Receiver);
}
/*
- * Get here with fdirection == FETCH_FORWARD or FETCH_BACKWARD, and
- * count >= 0.
+ * Get here with fdirection == FETCH_FORWARD or FETCH_BACKWARD, and count
+ * >= 0.
*/
forward = (fdirection == FETCH_FORWARD);
else
{
/*
- * If we are sitting on a row, back up one so we can re-fetch
- * it. If we are not sitting on a row, we still have to start
- * up and shut down the executor so that the destination is
- * initialized and shut down correctly; so keep going. To
- * PortalRunSelect, count == 0 means we will retrieve no row.
+ * If we are sitting on a row, back up one so we can re-fetch it.
+ * If we are not sitting on a row, we still have to start up and
+ * shut down the executor so that the destination is initialized
+ * and shut down correctly; so keep going. To PortalRunSelect,
+ * count == 0 means we will retrieve no row.
*/
if (on_row)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.244 2005/10/06 21:30:36 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.245 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
IsSystemClass((Form_pg_class) GETSTRUCT(tuple)))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied: \"%s\" is a system catalog",
- rel->relname)));
+ errmsg("permission denied: \"%s\" is a system catalog",
+ rel->relname)));
}
ReleaseSysCache(tuple);
{
case CMD_SELECT:
if (parsetree->into != NULL)
- return false; /* SELECT INTO */
+ return false; /* SELECT INTO */
else if (parsetree->rowMarks != NIL)
- return false; /* SELECT FOR UPDATE/SHARE */
+ return false; /* SELECT FOR UPDATE/SHARE */
else
return true;
case CMD_UPDATE:
return;
/*
- * Note: Commands that need to do more complicated checking are
- * handled elsewhere.
+ * Note: Commands that need to do more complicated checking are handled
+ * elsewhere.
*/
switch (nodeTag(parsetree))
switch (stmt->kind)
{
/*
- * START TRANSACTION, as defined by SQL99:
- * Identical to BEGIN. Same code for both.
+ * START TRANSACTION, as defined by SQL99: Identical
+ * to BEGIN. Same code for both.
*/
case TRANS_STMT_BEGIN:
case TRANS_STMT_START:
RELKIND_RELATION);
/*
- * Let AlterTableCreateToastTable decide if this one needs
- * a secondary relation too.
+ * Let AlterTableCreateToastTable decide if this one needs a
+ * secondary relation too.
*/
CommandCounterIncrement();
AlterTableCreateToastTable(relOid, true);
case OBJECT_DOMAIN:
/*
- * RemoveDomain does its own permissions
- * checks
+ * RemoveDomain does its own permissions checks
*/
RemoveDomain(names, stmt->behavior);
break;
case OBJECT_SCHEMA:
/*
- * RemoveSchema does its own permissions
- * checks
+ * RemoveSchema does its own permissions checks
*/
RemoveSchema(names, stmt->behavior);
break;
}
/*
- * We used to need to do CommandCounterIncrement()
- * here, but now it's done inside performDeletion().
+ * We used to need to do CommandCounterIncrement() here,
+ * but now it's done inside performDeletion().
*/
}
}
case 'T': /* ALTER DOMAIN DEFAULT */
/*
- * Recursively alter column default for table and,
- * if requested, for descendants
+ * Recursively alter column default for table and, if
+ * requested, for descendants
*/
AlterDomainDefault(stmt->typename,
stmt->def);
break;
/*
- * ******************************** object creation /
- * destruction ********************************
+ * ******************************** object creation / destruction ********************************
*
*/
case T_DefineStmt:
CreateFunction((CreateFunctionStmt *) parsetree);
break;
- case T_AlterFunctionStmt: /* ALTER FUNCTION */
+ case T_AlterFunctionStmt: /* ALTER FUNCTION */
AlterFunction((AlterFunctionStmt *) parsetree);
break;
DefineIndex(stmt->relation, /* relation */
stmt->idxname, /* index name */
- InvalidOid, /* no predefined OID */
+ InvalidOid, /* no predefined OID */
stmt->accessMethod, /* am name */
stmt->tableSpace,
stmt->indexParams, /* parameters */
VariableSetStmt *n = (VariableSetStmt *) parsetree;
/*
- * Special cases for special SQL syntax that effectively
- * sets more than one variable per statement.
+ * Special cases for special SQL syntax that effectively sets
+ * more than one variable per statement.
*/
if (strcmp(n->name, "TRANSACTION") == 0)
{
if (strcmp(item->defname, "transaction_isolation") == 0)
SetPGVariable("transaction_isolation",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
else if (strcmp(item->defname, "transaction_read_only") == 0)
SetPGVariable("transaction_read_only",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
}
}
else if (strcmp(n->name, "SESSION CHARACTERISTICS") == 0)
if (strcmp(item->defname, "transaction_isolation") == 0)
SetPGVariable("default_transaction_isolation",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
else if (strcmp(item->defname, "transaction_read_only") == 0)
SetPGVariable("default_transaction_read_only",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
}
}
else
break;
case T_AlterObjectSchemaStmt:
- switch (((AlterObjectSchemaStmt *) parsetree)->objectType)
- {
+ switch (((AlterObjectSchemaStmt *) parsetree)->objectType)
+ {
case OBJECT_AGGREGATE:
- tag = "ALTER AGGREGATE";
- break;
+ tag = "ALTER AGGREGATE";
+ break;
case OBJECT_DOMAIN:
- tag = "ALTER DOMAIN";
- break;
+ tag = "ALTER DOMAIN";
+ break;
case OBJECT_FUNCTION:
- tag = "ALTER FUNCTION";
- break;
+ tag = "ALTER FUNCTION";
+ break;
case OBJECT_SEQUENCE:
- tag = "ALTER SEQUENCE";
- break;
+ tag = "ALTER SEQUENCE";
+ break;
case OBJECT_TABLE:
- tag = "ALTER TABLE";
- break;
+ tag = "ALTER TABLE";
+ break;
case OBJECT_TYPE:
- tag = "ALTER TYPE";
- break;
+ tag = "ALTER TYPE";
+ break;
default:
tag = "???";
break;
- }
+ }
break;
case T_AlterOwnerStmt:
case T_GrantRoleStmt:
{
- GrantRoleStmt *stmt = (GrantRoleStmt *) parsetree;
+ GrantRoleStmt *stmt = (GrantRoleStmt *) parsetree;
tag = (stmt->is_grant) ? "GRANT ROLE" : "REVOKE ROLE";
}
switch (parsetree->commandType)
{
case CMD_SELECT:
+
/*
- * We take a little extra care here so that the result will
- * be useful for complaints about read-only statements
+ * We take a little extra care here so that the result will be
+ * useful for complaints about read-only statements
*/
if (parsetree->into != NULL)
tag = "SELECT INTO";
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.125 2005/10/10 18:49:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.126 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* The cache is valid if cached_member_role is not InvalidOid.
*/
static Oid cached_privs_role = InvalidOid;
-static List *cached_privs_roles = NIL;
+static List *cached_privs_roles = NIL;
static Oid cached_member_role = InvalidOid;
-static List *cached_membership_roles = NIL;
+static List *cached_membership_roles = NIL;
static const char *getid(const char *s, char *n);
Oid ownerId);
static Acl *recursive_revoke(Acl *acl, Oid grantee, AclMode revoke_privs,
Oid ownerId, DropBehavior behavior);
-static int oidComparator(const void *arg1, const void *arg2);
+static int oidComparator(const void *arg1, const void *arg2);
static AclMode convert_priv_string(text *priv_type_text);
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("identifier too long"),
- errdetail("Identifier must be less than %d characters.",
- NAMEDATALEN)));
+ errdetail("Identifier must be less than %d characters.",
+ NAMEDATALEN)));
n[len++] = *s;
}
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("unrecognized key word: \"%s\"", name),
- errhint("ACL key word must be \"group\" or \"user\".")));
+ errhint("ACL key word must be \"group\" or \"user\".")));
s = getid(s, name); /* move s to the name beyond the keyword */
if (name[0] == '\0')
ereport(ERROR,
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid mode character: must be one of \"%s\"",
- ACL_ALL_RIGHTS_STR)));
+ errmsg("invalid mode character: must be one of \"%s\"",
+ ACL_ALL_RIGHTS_STR)));
}
privs |= read;
aip->ai_grantee = get_roleid_checked(name);
/*
- * XXX Allow a degree of backward compatibility by defaulting the
- * grantor to the superuser.
+ * XXX Allow a degree of backward compatibility by defaulting the grantor
+ * to the superuser.
*/
if (*s == '/')
{
if (*s)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("extra garbage at the end of the ACL specification")));
+ errmsg("extra garbage at the end of the ACL specification")));
PG_RETURN_ACLITEM_P(aip);
}
}
/*
- * Note that the owner's entry shows all ordinary privileges but no
- * grant options. This is because his grant options come "from the
- * system" and not from his own efforts. (The SQL spec says that the
- * owner's rights come from a "_SYSTEM" authid.) However, we do
- * consider that the owner's ordinary privileges are self-granted;
- * this lets him revoke them. We implement the owner's grant options
- * without any explicit "_SYSTEM"-like ACL entry, by internally
- * special-casing the owner whereever we are testing grant options.
+ * Note that the owner's entry shows all ordinary privileges but no grant
+ * options. This is because his grant options come "from the system" and
+ * not from his own efforts. (The SQL spec says that the owner's rights
+ * come from a "_SYSTEM" authid.) However, we do consider that the
+ * owner's ordinary privileges are self-granted; this lets him revoke
+ * them. We implement the owner's grant options without any explicit
+ * "_SYSTEM"-like ACL entry, by internally special-casing the owner
+ * whereever we are testing grant options.
*/
aip->ai_grantee = ownerId;
aip->ai_grantor = ownerId;
old_aip = ACL_DAT(old_acl);
/*
- * Search the ACL for an existing entry for this grantee and grantor.
- * If one exists, just modify the entry in-place (well, in the same
- * position, since we actually return a copy); otherwise, insert the
- * new entry at the end.
+ * Search the ACL for an existing entry for this grantee and grantor. If
+ * one exists, just modify the entry in-place (well, in the same position,
+ * since we actually return a copy); otherwise, insert the new entry at
+ * the end.
*/
for (dst = 0; dst < num; ++dst)
break;
case ACL_MODECHG_DEL:
ACLITEM_SET_RIGHTS(new_aip[dst],
- old_rights & ~ACLITEM_GET_RIGHTS(*mod_aip));
+ old_rights & ~ACLITEM_GET_RIGHTS(*mod_aip));
break;
case ACL_MODECHG_EQL:
ACLITEM_SET_RIGHTS(new_aip[dst],
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can
- * only handle this when the grantee is not PUBLIC.
+ * Remove abandoned privileges (cascading revoke). Currently we can only
+ * handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
{
/*
* Make a copy of the given ACL, substituting new owner ID for old
- * wherever it appears as either grantor or grantee. Also note if the
- * new owner ID is already present.
+ * wherever it appears as either grantor or grantee. Also note if the new
+ * owner ID is already present.
*/
num = ACL_NUM(old_acl);
old_aip = ACL_DAT(old_acl);
}
/*
- * If the old ACL contained any references to the new owner, then we
- * may now have generated an ACL containing duplicate entries. Find
- * them and merge them so that there are not duplicates. (This is
- * relatively expensive since we use a stupid O(N^2) algorithm, but
- * it's unlikely to be the normal case.)
+ * If the old ACL contained any references to the new owner, then we may
+ * now have generated an ACL containing duplicate entries. Find them and
+ * merge them so that there are not duplicates. (This is relatively
+ * expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
+ * be the normal case.)
*
- * To simplify deletion of duplicate entries, we temporarily leave them
- * in the array but set their privilege masks to zero; when we reach
- * such an entry it's just skipped. (Thus, a side effect of this code
- * will be to remove privilege-free entries, should there be any in
- * the input.) dst is the next output slot, targ is the currently
- * considered input slot (always >= dst), and src scans entries to the
- * right of targ looking for duplicates. Once an entry has been
- * emitted to dst it is known duplicate-free and need not be
- * considered anymore.
+ * To simplify deletion of duplicate entries, we temporarily leave them in
+ * the array but set their privilege masks to zero; when we reach such an
+ * entry it's just skipped. (Thus, a side effect of this code will be to
+ * remove privilege-free entries, should there be any in the input.) dst
+ * is the next output slot, targ is the currently considered input slot
+ * (always >= dst), and src scans entries to the right of targ looking for
+ * duplicates. Once an entry has been emitted to dst it is known
+ * duplicate-free and need not be considered anymore.
*/
if (newpresent)
{
own_privs = aclmask(acl,
mod_aip->ai_grantor,
ownerId,
- ACL_GRANT_OPTION_FOR(ACLITEM_GET_GOPTIONS(*mod_aip)),
+ ACL_GRANT_OPTION_FOR(ACLITEM_GET_GOPTIONS(*mod_aip)),
ACLMASK_ALL);
own_privs = ACL_OPTION_TO_PRIVS(own_privs);
if ((ACLITEM_GET_GOPTIONS(*mod_aip) & ~own_privs) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- errmsg("grant options cannot be granted back to your own grantor")));
+ errmsg("grant options cannot be granted back to your own grantor")));
pfree(acl);
}
}
/*
- * Check privileges granted indirectly via role memberships.
- * We do this in a separate pass to minimize expensive indirect
- * membership tests. In particular, it's worth testing whether
- * a given ACL entry grants any privileges still of interest before
- * we perform the has_privs_of_role test.
+ * Check privileges granted indirectly via role memberships. We do this in
+ * a separate pass to minimize expensive indirect membership tests. In
+ * particular, it's worth testing whether a given ACL entry grants any
+ * privileges still of interest before we perform the has_privs_of_role
+ * test.
*/
remaining = mask & ~result;
for (i = 0; i < num; i++)
int
aclmembers(const Acl *acl, Oid **roleids)
{
- Oid *list;
+ Oid *list;
const AclItem *acldat;
- int i,
- j,
- k;
+ int i,
+ j,
+ k;
if (acl == NULL || ACL_NUM(acl) == 0)
{
}
/*
- * We could repalloc the array down to minimum size, but it's hardly
- * worth it since it's only transient memory.
+ * We could repalloc the array down to minimum size, but it's hardly worth
+ * it since it's only transient memory.
*/
*roleids = list;
static int
oidComparator(const void *arg1, const void *arg2)
{
- Oid oid1 = * (const Oid *) arg1;
- Oid oid2 = * (const Oid *) arg2;
+ Oid oid1 = *(const Oid *) arg1;
+ Oid oid2 = *(const Oid *) arg2;
if (oid1 > oid2)
return 1;
makeaclitem(PG_FUNCTION_ARGS)
{
Oid grantee = PG_GETARG_OID(0);
- Oid grantor = PG_GETARG_OID(1);
+ Oid grantor = PG_GETARG_OID(1);
text *privtext = PG_GETARG_TEXT_P(2);
bool goption = PG_GETARG_BOOL(3);
AclItem *result;
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
if (pg_strcasecmp(priv_type, "SELECT") == 0)
return ACL_SELECT;
{
Oid tableoid = PG_GETARG_OID(0);
text *priv_type_text = PG_GETARG_TEXT_P(1);
- Oid roleid;
+ Oid roleid;
AclMode mode;
AclResult aclresult;
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
Oid oid;
dbname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(databasename)));
+ PointerGetDatum(databasename)));
oid = get_database_oid(dbname);
if (!OidIsValid(oid))
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
Oid oid;
funcname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(functionname)));
+ PointerGetDatum(functionname)));
oid = DatumGetObjectId(DirectFunctionCall1(regprocedurein,
- CStringGetDatum(funcname)));
+ CStringGetDatum(funcname)));
if (!OidIsValid(oid))
ereport(ERROR,
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
Oid oid;
langname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(languagename)));
+ PointerGetDatum(languagename)));
oid = GetSysCacheOid(LANGNAME,
CStringGetDatum(langname),
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
Oid oid;
nspname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(schemaname)));
+ PointerGetDatum(schemaname)));
oid = GetSysCacheOid(NAMESPACENAME,
CStringGetDatum(nspname),
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
{
Oid tablespaceoid = PG_GETARG_OID(0);
text *priv_type_text = PG_GETARG_TEXT_P(1);
- Oid roleid;
+ Oid roleid;
AclMode mode;
AclResult aclresult;
Oid oid;
spcname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(tablespacename)));
+ PointerGetDatum(tablespacename)));
oid = get_tablespace_oid(spcname);
if (!OidIsValid(oid))
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
{
Oid roleoid = PG_GETARG_OID(0);
text *priv_type_text = PG_GETARG_TEXT_P(1);
- Oid roleid;
+ Oid roleid;
AclMode mode;
AclResult aclresult;
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
if (!IsBootstrapProcessingMode())
{
/*
- * In normal mode, set a callback on any syscache
- * invalidation of pg_auth_members rows
+ * In normal mode, set a callback on any syscache invalidation of
+ * pg_auth_members rows
*/
CacheRegisterSyscacheCallback(AUTHMEMROLEMEM,
RoleMembershipCacheCallback,
/*
* RoleMembershipCacheCallback
- * Syscache inval callback function
+ * Syscache inval callback function
*/
static void
RoleMembershipCacheCallback(Datum arg, Oid relid)
static List *
roles_has_privs_of(Oid roleid)
{
- List *roles_list;
- ListCell *l;
- List *new_cached_privs_roles;
- MemoryContext oldctx;
+ List *roles_list;
+ ListCell *l;
+ List *new_cached_privs_roles;
+ MemoryContext oldctx;
/* If cache is already valid, just return the list */
if (OidIsValid(cached_privs_role) && cached_privs_role == roleid)
return cached_privs_roles;
- /*
- * Find all the roles that roleid is a member of,
- * including multi-level recursion. The role itself will always
- * be the first element of the resulting list.
+ /*
+ * Find all the roles that roleid is a member of, including multi-level
+ * recursion. The role itself will always be the first element of the
+ * resulting list.
*
* Each element of the list is scanned to see if it adds any indirect
* memberships. We can use a single list as both the record of
foreach(l, roles_list)
{
- Oid memberid = lfirst_oid(l);
- CatCList *memlist;
- int i;
+ Oid memberid = lfirst_oid(l);
+ CatCList *memlist;
+ int i;
/* Ignore non-inheriting roles */
if (!has_rolinherit(memberid))
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
- Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
+ Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
/*
* Even though there shouldn't be any loops in the membership
- * graph, we must test for having already seen this role.
- * It is legal for instance to have both A->B and A->C->B.
+ * graph, we must test for having already seen this role. It is
+ * legal for instance to have both A->B and A->C->B.
*/
roles_list = list_append_unique_oid(roles_list, otherid);
}
/*
* Now safe to assign to state variable
*/
- cached_privs_role = InvalidOid; /* just paranoia */
+ cached_privs_role = InvalidOid; /* just paranoia */
list_free(cached_privs_roles);
cached_privs_roles = new_cached_privs_roles;
cached_privs_role = roleid;
static List *
roles_is_member_of(Oid roleid)
{
- List *roles_list;
- ListCell *l;
- List *new_cached_membership_roles;
- MemoryContext oldctx;
+ List *roles_list;
+ ListCell *l;
+ List *new_cached_membership_roles;
+ MemoryContext oldctx;
/* If cache is already valid, just return the list */
if (OidIsValid(cached_member_role) && cached_member_role == roleid)
return cached_membership_roles;
- /*
- * Find all the roles that roleid is a member of,
- * including multi-level recursion. The role itself will always
- * be the first element of the resulting list.
+ /*
+ * Find all the roles that roleid is a member of, including multi-level
+ * recursion. The role itself will always be the first element of the
+ * resulting list.
*
* Each element of the list is scanned to see if it adds any indirect
* memberships. We can use a single list as both the record of
foreach(l, roles_list)
{
- Oid memberid = lfirst_oid(l);
- CatCList *memlist;
- int i;
+ Oid memberid = lfirst_oid(l);
+ CatCList *memlist;
+ int i;
/* Find roles that memberid is directly a member of */
memlist = SearchSysCacheList(AUTHMEMMEMROLE, 1,
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
- Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
+ Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
/*
* Even though there shouldn't be any loops in the membership
- * graph, we must test for having already seen this role.
- * It is legal for instance to have both A->B and A->C->B.
+ * graph, we must test for having already seen this role. It is
+ * legal for instance to have both A->B and A->C->B.
*/
roles_list = list_append_unique_oid(roles_list, otherid);
}
if (superuser_arg(member))
return true;
- /*
+ /*
* Find all the roles that member has the privileges of, including
* multi-level recursion, then see if target role is any one of them.
*/
if (superuser_arg(member))
return true;
- /*
+ /*
* Find all the roles that member is a member of, including multi-level
* recursion, then see if target role is any one of them.
*/
is_admin_of_role(Oid member, Oid role)
{
bool result = false;
- List *roles_list;
- ListCell *l;
+ List *roles_list;
+ ListCell *l;
/* Fast path for simple case */
if (member == role)
if (superuser_arg(member))
return true;
- /*
- * Find all the roles that member is a member of,
- * including multi-level recursion. We build a list in the same way
- * that is_member_of_role does to track visited and unvisited roles.
+ /*
+ * Find all the roles that member is a member of, including multi-level
+ * recursion. We build a list in the same way that is_member_of_role does
+ * to track visited and unvisited roles.
*/
roles_list = list_make1_oid(member);
foreach(l, roles_list)
{
- Oid memberid = lfirst_oid(l);
- CatCList *memlist;
- int i;
+ Oid memberid = lfirst_oid(l);
+ CatCList *memlist;
+ int i;
/* Find roles that memberid is directly a member of */
memlist = SearchSysCacheList(AUTHMEMMEMROLE, 1,
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
- Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
+ Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
if (otherid == role &&
((Form_pg_auth_members) GETSTRUCT(tup))->admin_option)
static int
count_one_bits(AclMode mask)
{
- int nbits = 0;
+ int nbits = 0;
/* this code relies on AclMode being an unsigned type */
while (mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
Oid *grantorId, AclMode *grantOptions)
{
AclMode needed_goptions = ACL_GRANT_OPTION_FOR(privileges);
- List *roles_list;
+ List *roles_list;
int nrights;
ListCell *l;
/*
- * The object owner is always treated as having all grant options,
- * so if roleId is the owner it's easy. Also, if roleId is a superuser
- * it's easy: superusers are implicitly members of every role, so they
- * act as the object owner.
+ * The object owner is always treated as having all grant options, so if
+ * roleId is the owner it's easy. Also, if roleId is a superuser it's
+ * easy: superusers are implicitly members of every role, so they act as
+ * the object owner.
*/
if (roleId == ownerId || superuser_arg(roleId))
{
/*
* Otherwise we have to do a careful search to see if roleId has the
- * privileges of any suitable role. Note: we can hang onto the result
- * of roles_has_privs_of() throughout this loop, because aclmask_direct()
+ * privileges of any suitable role. Note: we can hang onto the result of
+ * roles_has_privs_of() throughout this loop, because aclmask_direct()
* doesn't query any role memberships.
*/
roles_list = roles_has_privs_of(roleId);
foreach(l, roles_list)
{
- Oid otherrole = lfirst_oid(l);
- AclMode otherprivs;
+ Oid otherrole = lfirst_oid(l);
+ AclMode otherprivs;
otherprivs = aclmask_direct(acl, otherrole, ownerId,
needed_goptions, ACLMASK_ALL);
*grantOptions = otherprivs;
return;
}
+
/*
* If it has just some of the needed privileges, remember best
* candidate.
*/
if (otherprivs != ACL_NO_RIGHTS)
{
- int nnewrights = count_one_bits(otherprivs);
+ int nnewrights = count_one_bits(otherprivs);
if (nnewrights > nrights)
{
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.15 2005/01/01 20:44:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.16 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
else
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("argument must be empty or one-dimensional array")));
+ errmsg("argument must be empty or one-dimensional array")));
/*
- * We arrange to look up info about element type only once per series
- * of calls, assuming the element type doesn't change underneath us.
+ * We arrange to look up info about element type only once per series of
+ * calls, assuming the element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
ndims2 = ARR_NDIM(v2);
/*
- * short circuit - if one input array is empty, and the other is not,
- * we return the non-empty one as the result
+ * short circuit - if one input array is empty, and the other is not, we
+ * return the non-empty one as the result
*
* if both are empty, return the first one
*/
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing element dimensions are "
- "not compatible for concatenation.")));
+ errdetail("Arrays with differing element dimensions are "
+ "not compatible for concatenation.")));
dims[i] = dims1[i];
lbs[i] = lbs1[i];
else if (ndims1 == ndims2 - 1)
{
/*
- * resulting array has the second argument as the outer array,
- * with the first argument appended to the front of the outer
- * dimension
+ * resulting array has the second argument as the outer array, with
+ * the first argument appended to the front of the outer dimension
*/
ndims = ndims2;
dims = (int *) palloc(ndims * sizeof(int));
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing dimensions are not "
- "compatible for concatenation.")));
+ errdetail("Arrays with differing dimensions are not "
+ "compatible for concatenation.")));
}
}
else
/*
* (ndims1 == ndims2 + 1)
*
- * resulting array has the first argument as the outer array, with
- * the second argument appended to the end of the outer dimension
+ * resulting array has the first argument as the outer array, with the
+ * second argument appended to the end of the outer dimension
*/
ndims = ndims1;
dims = (int *) palloc(ndims * sizeof(int));
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing dimensions are not "
- "compatible for concatenation.")));
+ errdetail("Arrays with differing dimensions are not "
+ "compatible for concatenation.")));
}
}
if (element_type == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid array element type OID: %u", element_type)));
+ errmsg("invalid array element type OID: %u", element_type)));
if (ndims < 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
}
/*
- * We arrange to look up info about element type only once per series
- * of calls, assuming the element type doesn't change underneath us.
+ * We arrange to look up info about element type only once per series of
+ * calls, assuming the element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.122 2005/08/15 19:40:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.123 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *string = PG_GETARG_CSTRING(0); /* external form */
Oid element_type = PG_GETARG_OID(1); /* type of an array
* element */
- int32 typmod = PG_GETARG_INT32(2); /* typmod for array
- * elements */
+ int32 typmod = PG_GETARG_INT32(2); /* typmod for array elements */
int typlen;
bool typbyval;
char typalign;
/*
* We arrange to look up info about element type, including its input
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = ~element_type;
}
if (my_extra->element_type != element_type)
{
/*
- * Get info about element type, including its input conversion
- * proc
+ * Get info about element type, including its input conversion proc
*/
get_type_io_data(element_type, IOFunc_input,
&my_extra->typlen, &my_extra->typbyval,
* Otherwise, we require the input to be in curly-brace style, and we
* prescan the input to determine dimensions.
*
- * Dimension info takes the form of one or more [n] or [m:n] items. The
- * outer loop iterates once per dimension item.
+ * Dimension info takes the form of one or more [n] or [m:n] items. The outer
+ * loop iterates once per dimension item.
*/
p = string_save;
ndim = 0;
if (ub < lBound[ndim])
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("upper bound cannot be less than lower bound")));
+ errmsg("upper bound cannot be less than lower bound")));
dim[ndim] = ub - lBound[ndim] + 1;
ndim++;
p++;
/*
- * intuit dimensions from brace structure -- it better match what
- * we were given
+ * intuit dimensions from brace structure -- it better match what we
+ * were given
*/
if (*p != '{')
ereport(ERROR,
if (ndim_braces != ndim)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("array dimensions incompatible with array literal")));
+ errmsg("array dimensions incompatible with array literal")));
for (i = 0; i < ndim; ++i)
{
if (dim[i] != dim_braces[i])
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("array dimensions incompatible with array literal")));
+ errmsg("array dimensions incompatible with array literal")));
}
}
/* Signal a premature end of the string */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ errmsg("malformed array literal: \"%s\"", str)));
break;
case '\\':
/*
- * An escape must be after a level start, after an
- * element start, or after an element delimiter. In
- * any case we now must be past an element start.
+ * An escape must be after a level start, after an element
+ * start, or after an element delimiter. In any case we
+ * now must be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_QUOTED_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
if (parse_state != ARRAY_QUOTED_ELEM_STARTED)
parse_state = ARRAY_ELEM_STARTED;
/* skip the escaped character */
ptr++;
else
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
break;
case '\"':
/*
* A quote must be after a level start, after a quoted
- * element start, or after an element delimiter. In
- * any case we now must be past an element start.
+ * element start, or after an element delimiter. In any
+ * case we now must be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_QUOTED_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
in_quotes = !in_quotes;
if (in_quotes)
parse_state = ARRAY_QUOTED_ELEM_STARTED;
if (!in_quotes)
{
/*
- * A left brace can occur if no nesting has
- * occurred yet, after a level start, or after a
- * level delimiter.
+ * A left brace can occur if no nesting has occurred
+ * yet, after a level start, or after a level
+ * delimiter.
*/
if (parse_state != ARRAY_NO_LEVEL &&
parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_LEVEL_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_LEVEL_STARTED;
if (nest_level >= MAXDIM)
ereport(ERROR,
- (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)",
- nest_level, MAXDIM)));
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)",
+ nest_level, MAXDIM)));
temp[nest_level] = 0;
nest_level++;
if (ndim < nest_level)
if (!in_quotes)
{
/*
- * A right brace can occur after an element start,
- * an element completion, a quoted element
- * completion, or a level completion.
+ * A right brace can occur after an element start, an
+ * element completion, a quoted element completion, or
+ * a level completion.
*/
if (parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_COMPLETED &&
parse_state != ARRAY_LEVEL_COMPLETED &&
!(nest_level == 1 && parse_state == ARRAY_LEVEL_STARTED))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_LEVEL_COMPLETED;
if (nest_level == 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
nest_level--;
if ((nelems_last[nest_level] != 1) &&
- (nelems[nest_level] != nelems_last[nest_level]))
+ (nelems[nest_level] != nelems_last[nest_level]))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("multidimensional arrays must have "
- "array expressions with matching "
- "dimensions")));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("multidimensional arrays must have "
+ "array expressions with matching "
+ "dimensions")));
nelems_last[nest_level] = nelems[nest_level];
nelems[nest_level] = 1;
if (nest_level == 0)
if (*ptr == typdelim)
{
/*
- * Delimiters can occur after an element
- * start, an element completion, a quoted
- * element completion, or a level completion.
+ * Delimiters can occur after an element start, an
+ * element completion, a quoted element
+ * completion, or a level completion.
*/
if (parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_COMPLETED &&
- parse_state != ARRAY_QUOTED_ELEM_COMPLETED &&
+ parse_state != ARRAY_QUOTED_ELEM_COMPLETED &&
parse_state != ARRAY_LEVEL_COMPLETED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
if (parse_state == ARRAY_LEVEL_COMPLETED)
parse_state = ARRAY_LEVEL_DELIMITED;
else
{
/*
* Other non-space characters must be after a
- * level start, after an element start, or
- * after an element delimiter. In any case we
- * now must be past an element start.
+ * level start, after an element start, or after
+ * an element delimiter. In any case we now must
+ * be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_ELEM_STARTED;
}
}
MemSet(indx, 0, sizeof(indx));
/*
- * We have to remove " and \ characters to create a clean item value
- * to pass to the datatype input routine. We overwrite each item
- * value in-place within arrayStr to do this. srcptr is the current
- * scan point, and dstptr is where we are copying to.
+ * We have to remove " and \ characters to create a clean item value to
+ * pass to the datatype input routine. We overwrite each item value
+ * in-place within arrayStr to do this. srcptr is the current scan point,
+ * and dstptr is where we are copying to.
*
- * We also want to suppress leading and trailing unquoted whitespace.
- * We use the leadingspace flag to suppress leading space. Trailing
- * space is tracked by using dstendptr to point to the last significant
- * output character.
+ * We also want to suppress leading and trailing unquoted whitespace. We use
+ * the leadingspace flag to suppress leading space. Trailing space is
+ * tracked by using dstendptr to point to the last significant output
+ * character.
*
- * The error checking in this routine is mostly pro-forma, since we
- * expect that ArrayCount() already validated the string.
+ * The error checking in this routine is mostly pro-forma, since we expect
+ * that ArrayCount() already validated the string.
*/
srcptr = arrayStr;
while (!eoArray)
{
if (nest_level >= ndim)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"",
- origStr)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"",
+ origStr)));
nest_level++;
indx[nest_level - 1] = 0;
srcptr++;
{
if (nest_level == 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"",
- origStr)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"",
+ origStr)));
if (i == -1)
i = ArrayGetOffset0(ndim, indx, prod);
indx[nest_level - 1] = 0;
else if (isspace((unsigned char) *srcptr))
{
/*
- * If leading space, drop it immediately. Else,
- * copy but don't advance dstendptr.
+ * If leading space, drop it immediately. Else, copy
+ * but don't advance dstendptr.
*/
if (leadingspace)
srcptr++;
/*
* We arrange to look up info about element type, including its output
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
if (my_extra->element_type != element_type)
{
/*
- * Get info about element type, including its output conversion
- * proc
+ * Get info about element type, including its output conversion proc
*/
get_type_io_data(element_type, IOFunc_output,
&my_extra->typlen, &my_extra->typbyval,
}
/*
- * we will need to add explicit dimensions if any dimension has a
- * lower bound other than one
+ * we will need to add explicit dimensions if any dimension has a lower
+ * bound other than one
*/
for (i = 0; i < ndim; i++)
{
}
/*
- * Convert all values to string form, count total space needed
- * (including any overhead such as escaping backslashes), and detect
- * whether each item needs double quotes.
+ * Convert all values to string form, count total space needed (including
+ * any overhead such as escaping backslashes), and detect whether each
+ * item needs double quotes.
*/
values = (char **) palloc(nitems * sizeof(char *));
needquotes = (bool *) palloc(nitems * sizeof(bool));
/* count data plus backslashes; detect chars needing quotes */
if (values[i][0] == '\0')
- needquote = true; /* force quotes for empty string */
+ needquote = true; /* force quotes for empty string */
else
needquote = false;
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
Oid spec_element_type = PG_GETARG_OID(1); /* type of an array
* element */
- int32 typmod = PG_GETARG_INT32(2); /* typmod for array
- * elements */
+ int32 typmod = PG_GETARG_INT32(2); /* typmod for array elements */
Oid element_type;
int typlen;
bool typbyval;
nitems = ArrayGetNItems(ndim, dim);
/*
- * We arrange to look up info about element type, including its
- * receive conversion proc, only once per series of calls, assuming
- * the element type doesn't change underneath us.
+ * We arrange to look up info about element type, including its receive
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = ~element_type;
}
if (!OidIsValid(my_extra->typiofunc))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary input function available for type %s",
- format_type_be(element_type))));
+ errmsg("no binary input function available for type %s",
+ format_type_be(element_type))));
fmgr_info_cxt(my_extra->typiofunc, &my_extra->proc,
fcinfo->flinfo->fn_mcxt);
my_extra->element_type = element_type;
errmsg("insufficient data left in message")));
/*
- * Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the input buffer.
- * We assume we can scribble on the input buffer so as to maintain
- * the convention that StringInfos have a trailing null.
+ * Rather than copying data around, we just set up a phony StringInfo
+ * pointing to the correct portion of the input buffer. We assume we
+ * can scribble on the input buffer so as to maintain the convention
+ * that StringInfos have a trailing null.
*/
elem_buf.data = &buf->data[buf->cursor];
elem_buf.maxlen = itemlen + 1;
/*
* We arrange to look up info about element type, including its send
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
if (!OidIsValid(my_extra->typiofunc))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary output function available for type %s",
- format_type_be(element_type))));
+ errmsg("no binary output function available for type %s",
+ format_type_be(element_type))));
fmgr_info_cxt(my_extra->typiofunc, &my_extra->proc,
fcinfo->flinfo->fn_mcxt);
my_extra->element_type = element_type;
if (arraylen > 0)
{
/*
- * fixed-length arrays -- currently, cannot slice these because
- * parser labels output as being of the fixed-length array type!
- * Code below shows how we could support it if the parser were
- * changed to label output as a suitable varlena array type.
+ * fixed-length arrays -- currently, cannot slice these because parser
+ * labels output as being of the fixed-length array type! Code below
+ * shows how we could support it if the parser were changed to label
+ * output as a suitable varlena array type.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("slices of fixed-length arrays not implemented")));
+ errmsg("slices of fixed-length arrays not implemented")));
/*
* fixed-length arrays -- these are assumed to be 1-d, 0-based XXX
}
/*
- * Check provided subscripts. A slice exceeding the current array
- * limits is silently truncated to the array limits. If we end up
- * with an empty slice, return NULL (should it be an empty array
- * instead?)
+ * Check provided subscripts. A slice exceeding the current array limits
+ * is silently truncated to the array limits. If we end up with an empty
+ * slice, return NULL (should it be an empty array instead?)
*/
if (ndim < nSubscripts || ndim <= 0 || ndim > MAXDIM)
RETURN_NULL(ArrayType *);
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3)
- * we copied the given lowerIndx values ... but that seems confusing.
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
for (i = 0; i < ndim; i++)
ndim = ARR_NDIM(array);
/*
- * if number of dims is zero, i.e. an empty array, create an array
- * with nSubscripts dimensions, and set the lower bounds to the
- * supplied subscripts
+ * if number of dims is zero, i.e. an empty array, create an array with
+ * nSubscripts dimensions, and set the lower bounds to the supplied
+ * subscripts
*/
if (ndim == 0)
{
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("updates on slices of fixed-length arrays not implemented")));
+ errmsg("updates on slices of fixed-length arrays not implemented")));
}
/* detoast arrays if necessary */
ndim = ARR_NDIM(array);
/*
- * if number of dims is zero, i.e. an empty array, create an array
- * with nSubscripts dimensions, and set the upper and lower bounds to
- * the supplied subscripts
+ * if number of dims is zero, i.e. an empty array, create an array with
+ * nSubscripts dimensions, and set the upper and lower bounds to the
+ * supplied subscripts
*/
if (ndim == 0)
{
memcpy(lb, ARR_LBOUND(array), ndim * sizeof(int));
/*
- * Check provided subscripts. A slice exceeding the current array
- * limits throws an error, *except* in the 1-D case where we will
- * extend the array as long as no hole is created. An empty slice is
- * an error, too.
+ * Check provided subscripts. A slice exceeding the current array limits
+ * throws an error, *except* in the 1-D case where we will extend the
+ * array as long as no hole is created. An empty slice is an error, too.
*/
for (i = 0; i < nSubscripts; i++)
{
}
/*
- * Make sure source array has enough entries. Note we ignore the
- * shape of the source array and just read entries serially.
+ * Make sure source array has enough entries. Note we ignore the shape of
+ * the source array and just read entries serially.
*/
mda_get_range(ndim, span, lowerIndx, upperIndx);
nsrcitems = ArrayGetNItems(ndim, span);
if (ndim > 1)
{
/*
- * here we do not need to cope with extension of the array; it
- * would be a lot more complicated if we had to do so...
+ * here we do not need to cope with extension of the array; it would
+ * be a lot more complicated if we had to do so...
*/
olditemsize = array_slice_size(ndim, dim, lb, ARR_DATA_PTR(array),
lowerIndx, upperIndx,
else
{
/*
- * here we must allow for possibility of slice larger than orig
- * array
+ * here we must allow for possibility of slice larger than orig array
*/
int oldlb = ARR_LBOUND(array)[0];
int oldub = oldlb + ARR_DIMS(array)[0] - 1;
if (ndim > 1)
{
/*
- * here we do not need to cope with extension of the array; it
- * would be a lot more complicated if we had to do so...
+ * here we do not need to cope with extension of the array; it would
+ * be a lot more complicated if we had to do so...
*/
array_insert_slice(ndim, dim, lb, ARR_DATA_PTR(array), olddatasize,
ARR_DATA_PTR(newarray),
* or binary-compatible with, the first argument type of fn().
* * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
}
/*
- * We arrange to look up info about input and return element types
- * only once per series of calls, assuming the element type doesn't
- * change underneath us.
+ * We arrange to look up info about input and return element types only
+ * once per series of calls, assuming the element type doesn't change
+ * underneath us.
*/
inp_extra = &amstate->inp_extra;
ret_extra = &amstate->ret_extra;
/*
* Apply the given function to source elt and extra args.
*
- * We assume the extra args are non-NULL, so need not check whether
- * fn() is strict. Would need to do more work here to support
- * arrays containing nulls, too.
+ * We assume the extra args are non-NULL, so need not check whether fn()
+ * is strict. Would need to do more work here to support arrays
+ * containing nulls, too.
*/
fcinfo->arg[0] = elt;
fcinfo->argnull[0] = false;
memcpy(ARR_DIMS(result), ARR_DIMS(v), 2 * ndim * sizeof(int));
/*
- * Note: do not risk trying to pfree the results of the called
- * function
+ * Note: do not risk trying to pfree the results of the called function
*/
CopyArrayEls(ARR_DATA_PTR(result), values, nitems,
typlen, typbyval, typalign, false);
if (element_type != ARR_ELEMTYPE(array2))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot compare arrays of different element types")));
+ errmsg("cannot compare arrays of different element types")));
/* fast path if the arrays do not have the same number of elements */
if (nitems1 != nitems2)
else
{
/*
- * We arrange to look up the equality function only once per
- * series of calls, assuming the element type doesn't change
- * underneath us. The typcache is used so that we have no memory
- * leakage when being used as an index support function.
+ * We arrange to look up the equality function only once per series of
+ * calls, assuming the element type doesn't change underneath us. The
+ * typcache is used so that we have no memory leakage when being used
+ * as an index support function.
*/
typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra;
if (typentry == NULL ||
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
fcinfo->flinfo->fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
if (element_type != ARR_ELEMTYPE(array2))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot compare arrays of different element types")));
+ errmsg("cannot compare arrays of different element types")));
/*
- * We arrange to look up the comparison function only once per series
- * of calls, assuming the element type doesn't change underneath us.
- * The typcache is used so that we have no memory leakage when being
- * used as an index support function.
+ * We arrange to look up the comparison function only once per series of
+ * calls, assuming the element type doesn't change underneath us. The
+ * typcache is used so that we have no memory leakage when being used as
+ * an index support function.
*/
typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra;
if (typentry == NULL ||
if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify a comparison function for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify a comparison function for type %s",
+ format_type_be(element_type))));
fcinfo->flinfo->fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
errmsg("target type is not an array")));
/*
- * We don't deal with domain constraints yet, so bail out. This
- * isn't currently a problem, because we also don't support arrays
- * of domain type elements either. But in the future we might. At
- * that point consideration should be given to removing the check
- * below and adding a domain constraints check to the coercion.
+ * We don't deal with domain constraints yet, so bail out. This isn't
+ * currently a problem, because we also don't support arrays of domain
+ * type elements either. But in the future we might. At that point
+ * consideration should be given to removing the check below and
+ * adding a domain constraints check to the coercion.
*/
if (getBaseType(tgt_elem_type) != tgt_elem_type)
ereport(ERROR,
}
/*
- * If it's binary-compatible, modify the element type in the array
- * header, but otherwise leave the array as we received it.
+ * If it's binary-compatible, modify the element type in the array header,
+ * but otherwise leave the array as we received it.
*/
if (my_extra->coerce_finfo.fn_oid == InvalidOid)
{
/*
* Use array_map to apply the function to each array element.
*
- * We pass on the desttypmod and isExplicit flags whether or not the
- * function wants them.
+ * We pass on the desttypmod and isExplicit flags whether or not the function
+ * wants them.
*/
InitFunctionCallInfoData(locfcinfo, &my_extra->coerce_finfo, 3,
NULL, NULL);
PG_RETURN_ARRAYTYPE_P(v);
/*
- * We arrange to look up the element type's coercion function only
- * once per series of calls, assuming the element type doesn't change
+ * We arrange to look up the element type's coercion function only once
+ * per series of calls, assuming the element type doesn't change
* underneath us.
*/
my_extra = (alc_extra *) fmgr_info->fn_extra;
if ((astate->nelems % ARRAY_ELEMS_CHUNKSIZE) == 0)
astate->dvalues = (Datum *)
repalloc(astate->dvalues,
- (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(Datum));
+ (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(Datum));
}
if (disnull)
Datum
array_larger(PG_FUNCTION_ARGS)
{
- ArrayType *v1,
- *v2,
- *result;
+ ArrayType *v1,
+ *v2,
+ *result;
v1 = PG_GETARG_ARRAYTYPE_P(0);
v2 = PG_GETARG_ARRAYTYPE_P(1);
Datum
array_smaller(PG_FUNCTION_ARGS)
{
- ArrayType *v1,
- *v2,
- *result;
+ ArrayType *v1,
+ *v2,
+ *result;
v1 = PG_GETARG_ARRAYTYPE_P(0);
v2 = PG_GETARG_ARRAYTYPE_P(1);
* Portions Copyright (c) 1999-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ascii.c,v 1.25 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ascii.c,v 1.26 2005/10/15 02:49:28 momjian Exp $
*
*-----------------------------------------------------------------------
*/
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("encoding conversion from %s to ASCII not supported",
- pg_encoding_to_char(enc))));
+ errmsg("encoding conversion from %s to ASCII not supported",
+ pg_encoding_to_char(enc))));
return; /* keep compiler quiet */
}
* workings can be found in the book "Software Solutions in C" by
* Dale Schumacher, Academic Press, ISBN: 0-12-632360-7.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.65 2005/07/21 04:41:43 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.66 2005/10/15 02:49:28 momjian Exp $
*/
#include "postgres.h"
struct lconv *lconvert = PGLC_localeconv();
/*
- * frac_digits will be CHAR_MAX in some locales, notably C. However,
- * just testing for == CHAR_MAX is risky, because of compilers like
- * gcc that "helpfully" let you alter the platform-standard definition
- * of whether char is signed or not. If we are so unfortunate as to
- * get compiled with a nonstandard -fsigned-char or -funsigned-char
- * switch, then our idea of CHAR_MAX will not agree with libc's. The
- * safest course is not to test for CHAR_MAX at all, but to impose a
- * range check for plausible frac_digits values.
+ * frac_digits will be CHAR_MAX in some locales, notably C. However, just
+ * testing for == CHAR_MAX is risky, because of compilers like gcc that
+ * "helpfully" let you alter the platform-standard definition of whether
+ * char is signed or not. If we are so unfortunate as to get compiled
+ * with a nonstandard -fsigned-char or -funsigned-char switch, then our
+ * idea of CHAR_MAX will not agree with libc's. The safest course is not
+ * to test for CHAR_MAX at all, but to impose a range check for plausible
+ * frac_digits values.
*/
fpoint = lconvert->frac_digits;
if (fpoint < 0 || fpoint > 10)
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type money: \"%s\"", str)));
+ errmsg("invalid input syntax for type money: \"%s\"", str)));
result = value * sgn;
points = 2; /* best guess in this case, I think */
/*
- * As with frac_digits, must apply a range check to mon_grouping to
- * avoid being fooled by variant CHAR_MAX values.
+ * As with frac_digits, must apply a range check to mon_grouping to avoid
+ * being fooled by variant CHAR_MAX values.
*/
mon_group = *lconvert->mon_grouping;
if (mon_group <= 0 || mon_group > 6)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/char.c,v 1.42 2004/12/31 22:01:21 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/char.c,v 1.43 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char result;
/*
- * An empty input string is converted to \0 (for consistency with
- * charin). If the input is longer than one character, the excess data
- * is silently discarded.
+ * An empty input string is converted to \0 (for consistency with charin).
+ * If the input is longer than one character, the excess data is silently
+ * discarded.
*/
if (VARSIZE(arg1) > VARHDRSZ)
result = *(VARDATA(arg1));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.121 2005/10/09 17:21:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.122 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include
#include
#include
-#include
+#include
#include "access/hash.h"
#include "libpq/pqformat.h"
#endif
-static int time2tm(TimeADT time, struct pg_tm *tm, fsec_t *fsec);
-static int timetz2tm(TimeTzADT *time, struct pg_tm *tm, fsec_t *fsec, int *tzp);
-static int tm2time(struct pg_tm *tm, fsec_t fsec, TimeADT *result);
-static int tm2timetz(struct pg_tm *tm, fsec_t fsec, int tz, TimeTzADT *result);
+static int time2tm(TimeADT time, struct pg_tm * tm, fsec_t *fsec);
+static int timetz2tm(TimeTzADT *time, struct pg_tm * tm, fsec_t *fsec, int *tzp);
+static int tm2time(struct pg_tm * tm, fsec_t fsec, TimeADT *result);
+static int tm2timetz(struct pg_tm * tm, fsec_t fsec, int tz, TimeTzADT *result);
static void AdjustTimeForTypmod(TimeADT *time, int32 typmod);
/*****************************************************************************
date_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
- DateADT date;
+ DateADT date;
fsec_t fsec;
struct pg_tm tt,
*tm = &tt;
case DTK_CURRENT:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"current\" is no longer supported")));
+ errmsg("date/time value \"current\" is no longer supported")));
GetCurrentDateTime(tm);
break;
Datum
date_out(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
char *result;
struct pg_tm tt,
*tm = &tt;
char buf[MAXDATELEN + 1];
- j2date(date +POSTGRES_EPOCH_JDATE,
+ j2date(date + POSTGRES_EPOCH_JDATE,
&(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
EncodeDateOnly(tm, DateStyle, buf);
Datum
date_send(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
StringInfoData buf;
pq_begintypsend(&buf);
#ifdef HAVE_INT64_TIMESTAMP
result = dateVal * USECS_PER_DAY + tz * USECS_PER_SEC;
#else
- result = dateVal * (double)SECS_PER_DAY + tz;
+ result = dateVal * (double) SECS_PER_DAY + tz;
#endif
return result;
Datum
timestamp_date(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
DateADT result;
struct pg_tm tt,
*tm = &tt;
case NOEND_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert reserved abstime value to date")));
+ errmsg("cannot convert reserved abstime value to date")));
/*
- * pretend to drop through to make compiler think that result
- * will be set
+ * pretend to drop through to make compiler think that result will
+ * be set
*/
default:
date_text(PG_FUNCTION_ARGS)
{
/* Input is a Date, but may as well leave it in Datum form */
- Datum date = PG_GETARG_DATUM(0);
+ Datum date = PG_GETARG_DATUM(0);
text *result;
char *str;
int len;
* Convert a tm structure to a time data type.
*/
static int
-tm2time(struct pg_tm *tm, fsec_t fsec, TimeADT *result)
+tm2time(struct pg_tm * tm, fsec_t fsec, TimeADT *result)
{
#ifdef HAVE_INT64_TIMESTAMP
*result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec)
- * USECS_PER_SEC) + fsec;
+ * USECS_PER_SEC) + fsec;
#else
*result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec;
#endif
* local time zone. If out of this range, leave as GMT. - tgl 97/05/27
*/
static int
-time2tm(TimeADT time, struct pg_tm *tm, fsec_t *fsec)
+time2tm(TimeADT time, struct pg_tm * tm, fsec_t *fsec)
{
#ifdef HAVE_INT64_TIMESTAMP
tm->tm_hour = time / USECS_PER_HOUR;
recalc:
trem = time;
- TMODULO(trem, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(trem, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(trem, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(trem, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(trem, tm->tm_sec, 1.0);
trem = TIMEROUND(trem);
/* roundoff may need to propagate to higher-order fields */
time_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
INT64CONST(5),
INT64CONST(0)
};
-
#else
/* note MAX_TIME_PRECISION differs in this case */
static const double TimeScales[MAX_TIME_PRECISION + 1] = {
if (typmod >= 0 && typmod <= MAX_TIME_PRECISION)
{
/*
- * Note: this round-to-nearest code is not completely consistent
- * about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
- * round-to-nearest-even, but the integer code always rounds up
- * (away from zero). Is it worth trying to be consistent?
+ * Note: this round-to-nearest code is not completely consistent about
+ * rounding values that are exactly halfway between integral values.
+ * On most platforms, rint() will implement round-to-nearest-even, but
+ * the integer code always rounds up (away from zero). Is it worth
+ * trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (*time >= INT64CONST(0))
*time = ((*time + TimeOffsets[typmod]) / TimeScales[typmod]) *
- TimeScales[typmod];
+ TimeScales[typmod];
else
*time = -((((-*time) + TimeOffsets[typmod]) / TimeScales[typmod]) *
- TimeScales[typmod]);
+ TimeScales[typmod]);
#else
- *time = rint((double) * time * TimeScales[typmod]) / TimeScales[typmod];
+ *time = rint((double) *time * TimeScales[typmod]) / TimeScales[typmod];
#endif
}
}
overlaps_time(PG_FUNCTION_ARGS)
{
/*
- * The arguments are TimeADT, but we leave them as generic Datums to
- * avoid dereferencing nulls (TimeADT is pass-by-reference!)
+ * The arguments are TimeADT, but we leave them as generic Datums to avoid
+ * dereferencing nulls (TimeADT is pass-by-reference!)
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
(DatumGetTimeADT(t1) < DatumGetTimeADT(t2))
/*
- * If both endpoints of interval 1 are null, the result is null
- * (unknown). If just one endpoint is null, take ts1 as the non-null
- * one. Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null (unknown).
+ * If just one endpoint is null, take ts1 as the non-null one. Otherwise,
+ * take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
if (TIMEADT_GT(ts1, ts2))
{
/*
- * This case is ts1 < te2 OR te1 < te2, which may look redundant
- * but in the presence of nulls it's not quite completely so.
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant but
+ * in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
PG_RETURN_NULL();
PG_RETURN_NULL();
/*
- * If te1 is not null then we had ts1 <= te1 above, and we just
- * found ts1 >= te2, hence te1 >= te2.
+ * If te1 is not null then we had ts1 <= te1 above, and we just found
+ * ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
}
PG_RETURN_NULL();
/*
- * If te2 is not null then we had ts2 <= te2 above, and we just
- * found ts2 >= te1, hence te2 >= te1.
+ * If te2 is not null then we had ts2 <= te2 above, and we just found
+ * ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
{
/*
* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else
- * null".
+ * rather silly way of saying "true if both are nonnull, else null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
Datum
timestamp_time(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
TimeADT result;
struct pg_tm tt,
*tm = &tt;
* USECS_PER_DAY) - timestamp;
*/
result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) *
- USECS_PER_SEC) + fsec;
+ USECS_PER_SEC) + fsec;
#else
result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec;
#endif
* USECS_PER_DAY) - timestamp;
*/
result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) *
- USECS_PER_SEC) + fsec;
+ USECS_PER_SEC) + fsec;
#else
result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec;
#endif
Datum
datetime_timestamp(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
TimeADT time = PG_GETARG_TIMEADT(1);
Timestamp result;
result = DatumGetTimestamp(DirectFunctionCall1(date_timestamp,
- DateADTGetDatum(date)));
+ DateADTGetDatum(date)));
result += time;
PG_RETURN_TIMESTAMP(result);
}
#else
result = span->time;
- if (result >= (double)SECS_PER_DAY || result < 0)
- result -= floor(result / (double)SECS_PER_DAY) * (double)SECS_PER_DAY;
+ if (result >= (double) SECS_PER_DAY || result < 0)
+ result -= floor(result / (double) SECS_PER_DAY) * (double) SECS_PER_DAY;
#endif
PG_RETURN_TIMEADT(result);
TimeADT time1;
result = time + span->time;
- TMODULO(result, time1, (double)SECS_PER_DAY);
+ TMODULO(result, time1, (double) SECS_PER_DAY);
if (result < 0)
result += SECS_PER_DAY;
#endif
TimeADT time1;
result = time - span->time;
- TMODULO(result, time1, (double)SECS_PER_DAY);
+ TMODULO(result, time1, (double) SECS_PER_DAY);
if (result < 0)
result += SECS_PER_DAY;
#endif
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"time\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"time\" units \"%s\" not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ PointerGetDatum(units))))));
result = 0;
}
* Convert a tm structure to a time data type.
*/
static int
-tm2timetz(struct pg_tm *tm, fsec_t fsec, int tz, TimeTzADT *result)
+tm2timetz(struct pg_tm * tm, fsec_t fsec, int tz, TimeTzADT *result)
{
#ifdef HAVE_INT64_TIMESTAMP
result->time = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) *
timetz_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
* Convert TIME WITH TIME ZONE data type to POSIX time structure.
*/
static int
-timetz2tm(TimeTzADT *time, struct pg_tm *tm, fsec_t *fsec, int *tzp)
+timetz2tm(TimeTzADT *time, struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 trem = time->time;
double trem = time->time;
recalc:
- TMODULO(trem, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(trem, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(trem, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(trem, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(trem, tm->tm_sec, 1.0);
trem = TIMEROUND(trem);
/* roundoff may need to propagate to higher-order fields */
/*
* Specify hash length as sizeof(double) + sizeof(int4), not as
- * sizeof(TimeTzADT), so that any garbage pad bytes in the structure
- * won't be included in the hash!
+ * sizeof(TimeTzADT), so that any garbage pad bytes in the structure won't
+ * be included in the hash!
*/
return hash_any((unsigned char *) key, sizeof(key->time) + sizeof(key->zone));
}
result->time += USECS_PER_DAY;
#else
result->time = time->time + span->time;
- TMODULO(result->time, time1.time, (double)SECS_PER_DAY);
+ TMODULO(result->time, time1.time, (double) SECS_PER_DAY);
if (result->time < 0)
result->time += SECS_PER_DAY;
#endif
result->time += USECS_PER_DAY;
#else
result->time = time->time - span->time;
- TMODULO(result->time, time1.time, (double)SECS_PER_DAY);
+ TMODULO(result->time, time1.time, (double) SECS_PER_DAY);
if (result->time < 0)
result->time += SECS_PER_DAY;
#endif
overlaps_timetz(PG_FUNCTION_ARGS)
{
/*
- * The arguments are TimeTzADT *, but we leave them as generic Datums
- * for convenience of notation --- and to avoid dereferencing nulls.
+ * The arguments are TimeTzADT *, but we leave them as generic Datums for
+ * convenience of notation --- and to avoid dereferencing nulls.
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
DatumGetBool(DirectFunctionCall2(timetz_lt,t1,t2))
/*
- * If both endpoints of interval 1 are null, the result is null
- * (unknown). If just one endpoint is null, take ts1 as the non-null
- * one. Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null (unknown).
+ * If just one endpoint is null, take ts1 as the non-null one. Otherwise,
+ * take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
if (TIMETZ_GT(ts1, ts2))
{
/*
- * This case is ts1 < te2 OR te1 < te2, which may look redundant
- * but in the presence of nulls it's not quite completely so.
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant but
+ * in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
PG_RETURN_NULL();
PG_RETURN_NULL();
/*
- * If te1 is not null then we had ts1 <= te1 above, and we just
- * found ts1 >= te2, hence te1 >= te2.
+ * If te1 is not null then we had ts1 <= te1 above, and we just found
+ * ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
}
PG_RETURN_NULL();
/*
- * If te2 is not null then we had ts2 <= te2 above, and we just
- * found ts2 >= te1, hence te2 >= te1.
+ * If te2 is not null then we had ts2 <= te2 above, and we just found
+ * ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
{
/*
* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else
- * null".
+ * rather silly way of saying "true if both are nonnull, else null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
Datum
datetimetz_timestamptz(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
TimeTzADT *time = PG_GETARG_TIMETZADT_P(1);
TimestampTz result;
#ifdef HAVE_INT64_TIMESTAMP
result = date * USECS_PER_DAY + time->time + time->zone * USECS_PER_SEC;
#else
- result = date * (double)SECS_PER_DAY + time->time + time->zone;
+ result = date * (double) SECS_PER_DAY + time->time + time->zone;
#endif
PG_RETURN_TIMESTAMP(result);
if (VARSIZE(str) - VARHDRSZ > MAXDATELEN)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for type time with time zone: \"%s\"",
- VARDATA(str))));
+ errmsg("invalid input syntax for type time with time zone: \"%s\"",
+ VARDATA(str))));
sp = VARDATA(str);
dp = dstr;
case DTK_TZ_MINUTE:
result = -tz;
result /= SECS_PER_MINUTE;
- FMODULO(result, dummy, (double)SECS_PER_MINUTE);
+ FMODULO(result, dummy, (double) SECS_PER_MINUTE);
break;
case DTK_TZ_HOUR:
dummy = -tz;
- FMODULO(dummy, result, (double)SECS_PER_HOUR);
+ FMODULO(dummy, result, (double) SECS_PER_HOUR);
break;
case DTK_MICROSEC:
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("\"time with time zone\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ errmsg("\"time with time zone\" units \"%s\" not recognized",
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("\"time with time zone\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ errmsg("\"time with time zone\" units \"%s\" not recognized",
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
TimeTzADT *t = PG_GETARG_TIMETZADT_P(1);
TimeTzADT *result;
int tz;
- char tzname[TZ_STRLEN_MAX + 1];
- int len;
+ char tzname[TZ_STRLEN_MAX + 1];
+ int len;
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the timezone
- * database (to handle cases like "America/New_York"), and if that
- * fails, we look in the date token table (to handle cases like "EST").
- */
+ * Look up the requested timezone. First we look in the timezone database
+ * (to handle cases like "America/New_York"), and if that fails, we look
+ * in the date token table (to handle cases like "EST").
+ */
len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX);
memcpy(tzname, VARDATA(zone), len);
tzname[len] = '\0';
if (tzp)
{
/* Get the offset-from-GMT that is valid today for the selected zone */
- pg_time_t now;
+ pg_time_t now;
struct pg_tm *tm;
now = time(NULL);
}
result = (TimeTzADT *) palloc(sizeof(TimeTzADT));
-
+
#ifdef HAVE_INT64_TIMESTAMP
result->time = t->time + (t->zone - tz) * USECS_PER_SEC;
while (result->time < INT64CONST(0))
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"interval\" time zone \"%s\" not valid",
DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = -(zone->time / USECS_PER_SEC);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.159 2005/10/14 11:47:57 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.160 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int DecodeNumber(int flen, char *field, bool haveTextMonth,
int fmask, int *tmask,
- struct pg_tm *tm, fsec_t *fsec, int *is2digits);
+ struct pg_tm * tm, fsec_t *fsec, int *is2digits);
static int DecodeNumberField(int len, char *str,
int fmask, int *tmask,
- struct pg_tm *tm, fsec_t *fsec, int *is2digits);
+ struct pg_tm * tm, fsec_t *fsec, int *is2digits);
static int DecodeTime(char *str, int fmask, int *tmask,
- struct pg_tm *tm, fsec_t *fsec);
+ struct pg_tm * tm, fsec_t *fsec);
static int DecodeTimezone(char *str, int *tzp);
static int DecodePosixTimezone(char *str, int *tzp);
static datetkn *datebsearch(char *key, datetkn *base, unsigned int nel);
-static int DecodeDate(char *str, int fmask, int *tmask, struct pg_tm *tm);
+static int DecodeDate(char *str, int fmask, int *tmask, struct pg_tm * tm);
static void TrimTrailingZeros(char *str);
{"lhdt", DTZ, POS(44)}, /* Lord Howe Daylight Time, Australia */
{"lhst", TZ, POS(42)}, /* Lord Howe Standard Time, Australia */
{"ligt", TZ, POS(40)}, /* From Melbourne, Australia */
- {"lint", TZ, POS(56)}, /* Line Islands Time (Kiribati; +14
- * hours!) */
+ {"lint", TZ, POS(56)}, /* Line Islands Time (Kiribati; +14 hours!) */
{"lkt", TZ, POS(24)}, /* Lanka Time */
{"m", UNITS, DTK_MONTH}, /* "month" for ISO input */
{"magst", DTZ, POS(48)}, /* Magadan Summer Time */
* Get the transaction start time ("now()") broken down as a struct pg_tm.
*/
void
-GetCurrentDateTime(struct pg_tm *tm)
+GetCurrentDateTime(struct pg_tm * tm)
{
int tz;
fsec_t fsec;
* including fractional seconds and timezone offset.
*/
void
-GetCurrentTimeUsec(struct pg_tm *tm, fsec_t *fsec, int *tzp)
+GetCurrentTimeUsec(struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
int tz;
*
* timestr - the input string
* workbuf - workspace for field string storage. This must be
- * larger than the largest legal input for this datetime type --
- * some additional space will be needed to NUL terminate fields.
+ * larger than the largest legal input for this datetime type --
+ * some additional space will be needed to NUL terminate fields.
* buflen - the size of workbuf
* field[] - pointers to field strings are returned in this array
* ftype[] - field type indicators are returned in this array
const char *bufend = workbuf + buflen;
/*
- * Set the character pointed-to by "bufptr" to "newchar", and
- * increment "bufptr". "end" gives the end of the buffer -- we
- * return an error if there is no space left to append a character
- * to the buffer. Note that "bufptr" is evaluated twice.
+ * Set the character pointed-to by "bufptr" to "newchar", and increment
+ * "bufptr". "end" gives the end of the buffer -- we return an error if
+ * there is no space left to append a character to the buffer. Note that
+ * "bufptr" is evaluated twice.
*/
#define APPEND_CHAR(bufptr, end, newchar) \
do \
APPEND_CHAR(bufp, bufend, *cp++);
/*
- * insist that the delimiters match to get a
- * three-field date.
+ * insist that the delimiters match to get a three-field
+ * date.
*/
if (*cp == delim)
{
}
/*
- * otherwise, number only and will determine year, month, day,
- * or concatenated fields later...
+ * otherwise, number only and will determine year, month, day, or
+ * concatenated fields later...
*/
else
ftype[nf] = DTK_NUMBER;
}
/*
- * text? then date string, month, day of week, special, or
- * timezone
+ * text? then date string, month, day of week, special, or timezone
*/
else if (isalpha((unsigned char) *cp))
{
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
/*
- * Full date string with leading text month? Could also be a
- * POSIX time zone...
+ * Full date string with leading text month? Could also be a POSIX
+ * time zone...
*/
if (*cp == '-' || *cp == '/' || *cp == '.')
{
*/
int
DecodeDateTime(char **field, int *ftype, int nf,
- int *dtype, struct pg_tm *tm, fsec_t *fsec, int *tzp)
+ int *dtype, struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
int fmask = 0,
tmask,
type;
- int ptype = 0; /* "prefix type" for ISO y2001m02d04
- * format */
+ int ptype = 0; /* "prefix type" for ISO y2001m02d04 format */
int i;
int val;
int dterr;
/*
* Starts with a digit but we already have a time
- * field? Then we are in trouble with a date and
- * time already...
+ * field? Then we are in trouble with a date and time
+ * already...
*/
if ((fmask & DTK_TIME_M) == DTK_TIME_M)
return DTERR_BAD_FORMAT;
*cp = '\0';
/*
- * Then read the rest of the field as a
- * concatenated time
+ * Then read the rest of the field as a concatenated
+ * time
*/
dterr = DecodeNumberField(strlen(field[i]), field[i],
fmask,
* DecodeTime()
*/
/* test for > 24:00:00 */
- if (tm->tm_hour > 24 ||
- (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)))
+ if (tm->tm_hour > 24 ||
+ (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)))
return DTERR_FIELD_OVERFLOW;
break;
return dterr;
/*
- * Already have a time zone? Then maybe this is the
- * second field of a POSIX time: EST+3 (equivalent to
- * PST)
+ * Already have a time zone? Then maybe this is the second
+ * field of a POSIX time: EST+3 (equivalent to PST)
*/
if (i > 0 && (fmask & DTK_M(TZ)) != 0 &&
ftype[i - 1] == DTK_TZ &&
case DTK_TIME:
/* previous field was "t" for ISO time */
dterr = DecodeNumberField(strlen(field[i]), field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
else if (cp != NULL && flen - strlen(cp) > 2)
{
/*
- * Interpret as a concatenated date or time Set
- * the type field to allow decoding other fields
- * later. Example: 20011223 or 040506
+ * Interpret as a concatenated date or time Set the
+ * type field to allow decoding other fields later.
+ * Example: 20011223 or 040506
*/
dterr = DecodeNumberField(flen, field[i], fmask,
&tmask, tm,
{
case DTK_CURRENT:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"current\" is no longer supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("date/time value \"current\" is no longer supported")));
return DTERR_BAD_FORMAT;
break;
*dtype = DTK_DATE;
GetCurrentDateTime(tm);
j2date(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - 1,
- &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
+ &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
*dtype = DTK_DATE;
GetCurrentDateTime(tm);
j2date(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + 1,
- &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
+ &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
case MONTH:
/*
- * already have a (numeric) month? then see if we
- * can substitute...
+ * already have a (numeric) month? then see if we can
+ * substitute...
*/
if ((fmask & DTK_M(MONTH)) && !haveTextMonth &&
!(fmask & DTK_M(DAY)) && tm->tm_mon >= 1 &&
case DTZMOD:
/*
- * daylight savings time modifier (solves "MET
- * DST" syntax)
+ * daylight savings time modifier (solves "MET DST"
+ * syntax)
*/
tmask |= DTK_M(DTZ);
tm->tm_isdst = 1;
case DTZ:
/*
- * set mask for TZ here _or_ check for DTZ later
- * when getting default timezone
+ * set mask for TZ here _or_ check for DTZ later when
+ * getting default timezone
*/
tmask |= DTK_M(TZ);
tm->tm_isdst = 1;
case ISOTIME:
/*
- * This is a filler field "t" indicating that the
- * next field is time. Try to verify that this is
- * sensible.
+ * This is a filler field "t" indicating that the next
+ * field is time. Try to verify that this is sensible.
*/
tmask = 0;
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("inconsistent use of year %04d and \"BC\"",
- tm->tm_year)));
+ errmsg("inconsistent use of year %04d and \"BC\"",
+ tm->tm_year)));
}
else if (is2digits)
{
}
/*
- * Check for valid day of month, now that we know for sure the
- * month and year. Note we don't use MD_FIELD_OVERFLOW here,
- * since it seems unlikely that "Feb 29" is a YMD-order error.
+ * Check for valid day of month, now that we know for sure the month
+ * and year. Note we don't use MD_FIELD_OVERFLOW here, since it seems
+ * unlikely that "Feb 29" is a YMD-order error.
*/
if (tm->tm_mday > day_tab[isleap(tm->tm_year)][tm->tm_mon - 1])
return DTERR_FIELD_OVERFLOW;
if (tzp != NULL && !(fmask & DTK_M(TZ)))
{
/*
- * daylight savings time modifier but no standard timezone?
- * then error
+ * daylight savings time modifier but no standard timezone? then
+ * error
*/
if (fmask & DTK_M(DTZMOD))
return DTERR_BAD_FORMAT;
* of mktime(), anyway.
*/
int
-DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
+DetermineTimeZoneOffset(struct pg_tm * tm, pg_tz *tzp)
{
int date,
sec;
/*
* First, generate the pg_time_t value corresponding to the given
- * y/m/d/h/m/s taken as GMT time. If this overflows, punt and decide
- * the timezone is GMT. (We only need to worry about overflow on
- * machines where pg_time_t is 32 bits.)
+ * y/m/d/h/m/s taken as GMT time. If this overflows, punt and decide the
+ * timezone is GMT. (We only need to worry about overflow on machines
+ * where pg_time_t is 32 bits.)
*/
if (!IS_VALID_JULIAN(tm->tm_year, tm->tm_mon, tm->tm_mday))
goto overflow;
date = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - UNIX_EPOCH_JDATE;
- day = ((pg_time_t) date) *SECS_PER_DAY;
+ day = ((pg_time_t) date) * SECS_PER_DAY;
if (day / SECS_PER_DAY != date)
goto overflow;
sec = tm->tm_sec + (tm->tm_min + tm->tm_hour * MINS_PER_HOUR) * SECS_PER_MINUTE;
goto overflow;
/*
- * Find the DST time boundary just before or following the target time.
- * We assume that all zones have GMT offsets less than 24 hours, and
- * that DST boundaries can't be closer together than 48 hours, so
- * backing up 24 hours and finding the "next" boundary will work.
+ * Find the DST time boundary just before or following the target time. We
+ * assume that all zones have GMT offsets less than 24 hours, and that DST
+ * boundaries can't be closer together than 48 hours, so backing up 24
+ * hours and finding the "next" boundary will work.
*/
prevtime = mytime - SECS_PER_DAY;
if (mytime < 0 && prevtime > 0)
&before_gmtoff, &before_isdst,
&boundary,
&after_gmtoff, &after_isdst,
- tzp);
+ tzp);
if (res < 0)
goto overflow; /* failure? */
{
/* Non-DST zone, life is simple */
tm->tm_isdst = before_isdst;
- return - (int) before_gmtoff;
+ return -(int) before_gmtoff;
}
/*
if (beforetime <= boundary && aftertime < boundary)
{
tm->tm_isdst = before_isdst;
- return - (int) before_gmtoff;
+ return -(int) before_gmtoff;
}
if (beforetime > boundary && aftertime >= boundary)
{
tm->tm_isdst = after_isdst;
- return - (int) after_gmtoff;
+ return -(int) after_gmtoff;
}
+
/*
- * It's an invalid or ambiguous time due to timezone transition.
- * Prefer the standard-time interpretation.
+ * It's an invalid or ambiguous time due to timezone transition. Prefer
+ * the standard-time interpretation.
*/
if (after_isdst == 0)
{
tm->tm_isdst = after_isdst;
- return - (int) after_gmtoff;
+ return -(int) after_gmtoff;
}
tm->tm_isdst = before_isdst;
- return - (int) before_gmtoff;
+ return -(int) before_gmtoff;
overflow:
/* Given date is out of range, so assume UTC */
*/
int
DecodeTimeOnly(char **field, int *ftype, int nf,
- int *dtype, struct pg_tm *tm, fsec_t *fsec, int *tzp)
+ int *dtype, struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
int fmask = 0,
tmask,
case DTK_DATE:
/*
- * Time zone not allowed? Then should not accept dates or
- * time zones no matter what else!
+ * Time zone not allowed? Then should not accept dates or time
+ * zones no matter what else!
*/
if (tzp == NULL)
return DTERR_BAD_FORMAT;
/*
* Starts with a digit but we already have a time
- * field? Then we are in trouble with time
- * already...
+ * field? Then we are in trouble with time already...
*/
if ((fmask & DTK_TIME_M) == DTK_TIME_M)
return DTERR_BAD_FORMAT;
/*
- * Should not get here and fail. Sanity check
- * only...
+ * Should not get here and fail. Sanity check only...
*/
if ((cp = strchr(field[i], '-')) == NULL)
return DTERR_BAD_FORMAT;
*cp = '\0';
/*
- * Then read the rest of the field as a
- * concatenated time
+ * Then read the rest of the field as a concatenated
+ * time
*/
dterr = DecodeNumberField(strlen(field[i]), field[i],
(fmask | DTK_DATE_M),
return dterr;
/*
- * Already have a time zone? Then maybe this is the
- * second field of a POSIX time: EST+3 (equivalent to
- * PST)
+ * Already have a time zone? Then maybe this is the second
+ * field of a POSIX time: EST+3 (equivalent to PST)
*/
if (i > 0 && (fmask & DTK_M(TZ)) != 0 &&
ftype[i - 1] == DTK_TZ &&
tmask |= DTK_TIME_M;
#ifdef HAVE_INT64_TIMESTAMP
dt2time(time * USECS_PER_DAY,
- &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
+ &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#else
dt2time(time * SECS_PER_DAY,
- &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
+ &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#endif
}
break;
case DTK_TIME:
/* previous field was "t" for ISO time */
dterr = DecodeNumberField(strlen(field[i]), field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
else if (flen - strlen(cp) > 2)
{
/*
- * Interpret as a concatenated date or time
- * Set the type field to allow decoding other
- * fields later. Example: 20011223 or 040506
+ * Interpret as a concatenated date or time Set
+ * the type field to allow decoding other fields
+ * later. Example: 20011223 or 040506
*/
dterr = DecodeNumberField(flen, field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
{
case DTK_CURRENT:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"current\" is no longer supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("date/time value \"current\" is no longer supported")));
return DTERR_BAD_FORMAT;
break;
case DTZMOD:
/*
- * daylight savings time modifier (solves "MET
- * DST" syntax)
+ * daylight savings time modifier (solves "MET DST"
+ * syntax)
*/
tmask |= DTK_M(DTZ);
tm->tm_isdst = 1;
case DTZ:
/*
- * set mask for TZ here _or_ check for DTZ later
- * when getting default timezone
+ * set mask for TZ here _or_ check for DTZ later when
+ * getting default timezone
*/
tmask |= DTK_M(TZ);
tm->tm_isdst = 1;
if (tm->tm_hour < 0 || tm->tm_min < 0 || tm->tm_min > 59 ||
tm->tm_sec < 0 || tm->tm_sec > 60 || tm->tm_hour > 24 ||
- /* test for > 24:00:00 */
- (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0 ||
+ /* test for > 24:00:00 */
+ (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0 ||
#ifdef HAVE_INT64_TIMESTAMP
- *fsec > INT64CONST(0))) ||
+ *fsec > INT64CONST(0))) ||
*fsec < INT64CONST(0) || *fsec >= USECS_PER_SEC)
return DTERR_FIELD_OVERFLOW;
#else
- *fsec > 0)) ||
+ *fsec > 0)) ||
*fsec < 0 || *fsec >= 1)
return DTERR_FIELD_OVERFLOW;
#endif
*tmp = &tt;
/*
- * daylight savings time modifier but no standard timezone? then
- * error
+ * daylight savings time modifier but no standard timezone? then error
*/
if (fmask & DTK_M(DTZMOD))
return DTERR_BAD_FORMAT;
* Insist on a complete set of fields.
*/
static int
-DecodeDate(char *str, int fmask, int *tmask, struct pg_tm *tm)
+DecodeDate(char *str, int fmask, int *tmask, struct pg_tm * tm)
{
fsec_t fsec;
int nf = 0;
* can be used to represent time spans.
*/
static int
-DecodeTime(char *str, int fmask, int *tmask, struct pg_tm *tm, fsec_t *fsec)
+DecodeTime(char *str, int fmask, int *tmask, struct pg_tm * tm, fsec_t *fsec)
{
char *cp;
*/
static int
DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
- int *tmask, struct pg_tm *tm, fsec_t *fsec, int *is2digits)
+ int *tmask, struct pg_tm * tm, fsec_t *fsec, int *is2digits)
{
int val;
char *cp;
double frac;
/*
- * More than two digits before decimal point? Then could be a date
- * or a run-together time: 2001.360 20011225 040506.789
+ * More than two digits before decimal point? Then could be a date or
+ * a run-together time: 2001.360 20011225 040506.789
*/
if (cp - str > 2)
{
case 0:
/*
- * Nothing so far; make a decision about what we think the
- * input is. There used to be lots of heuristics here, but
- * the consensus now is to be paranoid. It *must* be either
+ * Nothing so far; make a decision about what we think the input
+ * is. There used to be lots of heuristics here, but the
+ * consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
*/
if (haveTextMonth)
{
/*
- * We are at the first numeric field of a date that
- * included a textual month name. We want to support the
- * variants MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as
- * unambiguous inputs. We will also accept MON-DD-YY or
- * DD-MON-YY in either DMY or MDY modes, as well as
- * YY-MON-DD in YMD mode.
+ * We are at the first numeric field of a date that included a
+ * textual month name. We want to support the variants
+ * MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
{
}
/*
- * When processing a year field, mark it for adjustment if it's only
- * one or two digits.
+ * When processing a year field, mark it for adjustment if it's only one
+ * or two digits.
*/
if (*tmask == DTK_M(YEAR))
*is2digits = (flen <= 2);
*/
static int
DecodeNumberField(int len, char *str, int fmask,
- int *tmask, struct pg_tm *tm, fsec_t *fsec, int *is2digits)
+ int *tmask, struct pg_tm * tm, fsec_t *fsec, int *is2digits)
{
char *cp;
/*
- * Have a decimal point? Then this is a date or something with a
- * seconds field...
+ * Have a decimal point? Then this is a date or something with a seconds
+ * field...
*/
if ((cp = strchr(str, '.')) != NULL)
{
* preceding an hh:mm:ss field. - thomas 1998-04-30
*/
int
-DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm *tm, fsec_t *fsec)
+DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm * tm, fsec_t *fsec)
{
int is_before = FALSE;
char *cp;
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * A single signed number ends up here, but will be
- * rejected by DecodeTime(). So, work this out to drop
- * through to DTK_NUMBER, which *can* tolerate this.
+ * A single signed number ends up here, but will be rejected
+ * by DecodeTime(). So, work this out to drop through to
+ * DTK_NUMBER, which *can* tolerate this.
*/
cp = field[i] + 1;
while (*cp != '\0' && *cp != ':' && *cp != '.')
/*
* Set the next type to be a day, if units are not
- * specified. This handles the case of '1 +02:03'
- * since we are reading right to left.
+ * specified. This handles the case of '1 +02:03' since we
+ * are reading right to left.
*/
type = DTK_DAY;
tmask = DTK_M(TZ);
(errcode(ERRCODE_DATETIME_FIELD_OVERFLOW),
errmsg("date/time field value out of range: \"%s\"",
str),
- errhint("Perhaps you need a different \"datestyle\" setting.")));
+ errhint("Perhaps you need a different \"datestyle\" setting.")));
break;
case DTERR_INTERVAL_OVERFLOW:
ereport(ERROR,
break;
case DTERR_TZDISP_OVERFLOW:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE),
- errmsg("time zone displacement out of range: \"%s\"",
- str)));
+ (errcode(ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE),
+ errmsg("time zone displacement out of range: \"%s\"",
+ str)));
break;
case DTERR_BAD_FORMAT:
default:
* Encode date as local time.
*/
int
-EncodeDateOnly(struct pg_tm *tm, int style, char *str)
+EncodeDateOnly(struct pg_tm * tm, int style, char *str)
{
if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR)
return -1;
tm->tm_year, tm->tm_mon, tm->tm_mday);
else
sprintf(str, "%04d-%02d-%02d %s",
- -(tm->tm_year - 1), tm->tm_mon, tm->tm_mday, "BC");
+ -(tm->tm_year - 1), tm->tm_mon, tm->tm_mday, "BC");
break;
case USE_SQL_DATES:
* Encode time fields only.
*/
int
-EncodeTimeOnly(struct pg_tm *tm, fsec_t fsec, int *tzp, int style, char *str)
+EncodeTimeOnly(struct pg_tm * tm, fsec_t fsec, int *tzp, int style, char *str)
{
if (tm->tm_hour < 0 || tm->tm_hour > HOURS_PER_DAY)
return -1;
sprintf(str, "%02d:%02d", tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The fractional field widths
- * here should be equal to the larger of MAX_TIME_PRECISION and
+ * Print fractional seconds if any. The fractional field widths here
+ * should be equal to the larger of MAX_TIME_PRECISION and
* MAX_TIMESTAMP_PRECISION.
*/
if (fsec != 0)
* European - dd/mm/yyyy
*/
int
-EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str)
+EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str)
{
int day,
hour,
min;
/*
- * Why are we checking only the month field? Change this to an
- * assert... if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR) return -1;
+ * Why are we checking only the month field? Change this to an assert...
+ * if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR) return -1;
*/
Assert(tm->tm_mon >= 1 && tm->tm_mon <= MONTHS_PER_YEAR);
tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
sprintf(str + strlen(str), ":%02d", tm->tm_sec);
/*
- * tzp == NULL indicates that we don't want *any* time zone
- * info in the output string. *tzn != NULL indicates that we
- * have alpha time zone info available. tm_isdst != -1
- * indicates that we have a valid time zone translation.
+ * tzp == NULL indicates that we don't want *any* time zone info
+ * in the output string. *tzn != NULL indicates that we have alpha
+ * time zone info available. tm_isdst != -1 indicates that we have
+ * a valid time zone translation.
*/
if (tzp != NULL && tm->tm_isdst >= 0)
{
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
strncpy(str, days[tm->tm_wday], 3);
strcpy(str + 3, " ");
-
+
if (DateOrder == DATEORDER_DMY)
sprintf(str + 4, "%02d %3s", tm->tm_mday, months[tm->tm_mon - 1]);
else
sprintf(str + 10, " %02d:%02d", tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
sprintf(str + strlen(str), ":%02d", tm->tm_sec);
sprintf(str + strlen(str), " %04d",
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1));
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1));
if (tzp != NULL && tm->tm_isdst >= 0)
{
{
/*
* We have a time zone, but no string version. Use the
- * numeric form, but be sure to include a leading
- * space to avoid formatting something which would be
- * rejected by the date/time parser later. - thomas
- * 2001-10-19
+ * numeric form, but be sure to include a leading space to
+ * avoid formatting something which would be rejected by
+ * the date/time parser later. - thomas 2001-10-19
*/
hour = -(*tzp / SECS_PER_HOUR);
min = (abs(*tzp) / MINS_PER_HOUR) % MINS_PER_HOUR;
* - thomas 1998-04-30
*/
int
-EncodeInterval(struct pg_tm *tm, fsec_t fsec, int style, char *str)
+EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
{
int is_before = FALSE;
int is_nonzero = FALSE;
/*
* The sign of year and month are guaranteed to match, since they are
- * stored internally as "month". But we'll need to check for is_before
- * and is_nonzero when determining the signs of hour/minute/seconds
- * fields.
+ * stored internally as "month". But we'll need to check for is_before and
+ * is_nonzero when determining the signs of hour/minute/seconds fields.
*/
switch (style)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datum.c,v 1.30 2004/12/31 22:01:21 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datum.c,v 1.31 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (typByVal)
{
/*
- * just compare the two datums. NOTE: just comparing "len" bytes
- * will not do the work, because we do not know how these bytes
- * are aligned inside the "Datum". We assume instead that any
- * given datatype is consistent about how it fills extraneous bits
- * in the Datum.
+ * just compare the two datums. NOTE: just comparing "len" bytes will
+ * not do the work, because we do not know how these bytes are aligned
+ * inside the "Datum". We assume instead that any given datatype is
+ * consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
}
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.5 2005/09/29 22:04:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.6 2005/10/15 02:49:28 momjian Exp $
*
*/
db_dir_size(const char *path)
{
int64 dirsize = 0;
- struct dirent *direntry;
- DIR *dirdesc;
- char filename[MAXPGPATH];
+ struct dirent *direntry;
+ DIR *dirdesc;
+ char filename[MAXPGPATH];
dirdesc = AllocateDir(path);
if (!dirdesc)
- return 0;
+ return 0;
while ((direntry = ReadDir(dirdesc, path)) != NULL)
{
- struct stat fst;
+ struct stat fst;
- if (strcmp(direntry->d_name, ".") == 0 ||
+ if (strcmp(direntry->d_name, ".") == 0 ||
strcmp(direntry->d_name, "..") == 0)
- continue;
+ continue;
snprintf(filename, MAXPGPATH, "%s/%s", path, direntry->d_name);
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not stat \"%s\": %m", filename)));
-
- dirsize += fst.st_size;
+
+ dirsize += fst.st_size;
}
FreeDir(dirdesc);
calculate_database_size(Oid dbOid)
{
int64 totalsize;
- DIR *dirdesc;
- struct dirent *direntry;
- char dirpath[MAXPGPATH];
- char pathname[MAXPGPATH];
+ DIR *dirdesc;
+ struct dirent *direntry;
+ char dirpath[MAXPGPATH];
+ char pathname[MAXPGPATH];
/* Shared storage in pg_global is not counted */
snprintf(dirpath, MAXPGPATH, "%s/pg_tblspc", DataDir);
dirdesc = AllocateDir(dirpath);
if (!dirdesc)
- ereport(ERROR,
+ ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open tablespace directory \"%s\": %m",
dirpath)));
while ((direntry = ReadDir(dirdesc, dirpath)) != NULL)
{
- if (strcmp(direntry->d_name, ".") == 0 ||
+ if (strcmp(direntry->d_name, ".") == 0 ||
strcmp(direntry->d_name, "..") == 0)
- continue;
+ continue;
snprintf(pathname, MAXPGPATH, "%s/pg_tblspc/%s/%u",
DataDir, direntry->d_name, dbOid);
/* Complain if we found no trace of the DB at all */
if (!totalsize)
- ereport(ERROR,
+ ereport(ERROR,
(ERRCODE_UNDEFINED_DATABASE,
errmsg("database with OID %u does not exist", dbOid)));
Datum
pg_database_size_oid(PG_FUNCTION_ARGS)
{
- Oid dbOid = PG_GETARG_OID(0);
+ Oid dbOid = PG_GETARG_OID(0);
PG_RETURN_INT64(calculate_database_size(dbOid));
}
Datum
pg_database_size_name(PG_FUNCTION_ARGS)
{
- Name dbName = PG_GETARG_NAME(0);
- Oid dbOid = get_database_oid(NameStr(*dbName));
+ Name dbName = PG_GETARG_NAME(0);
+ Oid dbOid = get_database_oid(NameStr(*dbName));
if (!OidIsValid(dbOid))
ereport(ERROR,
static int64
calculate_tablespace_size(Oid tblspcOid)
{
- char tblspcPath[MAXPGPATH];
- char pathname[MAXPGPATH];
- int64 totalsize=0;
- DIR *dirdesc;
- struct dirent *direntry;
+ char tblspcPath[MAXPGPATH];
+ char pathname[MAXPGPATH];
+ int64 totalsize = 0;
+ DIR *dirdesc;
+ struct dirent *direntry;
if (tblspcOid == DEFAULTTABLESPACE_OID)
- snprintf(tblspcPath, MAXPGPATH, "%s/base", DataDir);
+ snprintf(tblspcPath, MAXPGPATH, "%s/base", DataDir);
else if (tblspcOid == GLOBALTABLESPACE_OID)
- snprintf(tblspcPath, MAXPGPATH, "%s/global", DataDir);
+ snprintf(tblspcPath, MAXPGPATH, "%s/global", DataDir);
else
snprintf(tblspcPath, MAXPGPATH, "%s/pg_tblspc/%u", DataDir, tblspcOid);
while ((direntry = ReadDir(dirdesc, tblspcPath)) != NULL)
{
- struct stat fst;
+ struct stat fst;
- if (strcmp(direntry->d_name, ".") == 0 ||
+ if (strcmp(direntry->d_name, ".") == 0 ||
strcmp(direntry->d_name, "..") == 0)
- continue;
+ continue;
snprintf(pathname, MAXPGPATH, "%s/%s", tblspcPath, direntry->d_name);
errmsg("could not stat \"%s\": %m", pathname)));
if (fst.st_mode & S_IFDIR)
- totalsize += db_dir_size(pathname);
-
- totalsize += fst.st_size;
+ totalsize += db_dir_size(pathname);
+
+ totalsize += fst.st_size;
}
FreeDir(dirdesc);
-
+
return totalsize;
}
Datum
pg_tablespace_size_oid(PG_FUNCTION_ARGS)
{
- Oid tblspcOid = PG_GETARG_OID(0);
-
+ Oid tblspcOid = PG_GETARG_OID(0);
+
PG_RETURN_INT64(calculate_tablespace_size(tblspcOid));
}
Datum
pg_tablespace_size_name(PG_FUNCTION_ARGS)
{
- Name tblspcName = PG_GETARG_NAME(0);
- Oid tblspcOid = get_tablespace_oid(NameStr(*tblspcName));
+ Name tblspcName = PG_GETARG_NAME(0);
+ Oid tblspcOid = get_tablespace_oid(NameStr(*tblspcName));
if (!OidIsValid(tblspcOid))
ereport(ERROR,
Assert(OidIsValid(rfn->spcNode));
if (rfn->spcNode == DEFAULTTABLESPACE_OID)
- snprintf(dirpath, MAXPGPATH, "%s/base/%u", DataDir, rfn->dbNode);
+ snprintf(dirpath, MAXPGPATH, "%s/base/%u", DataDir, rfn->dbNode);
else if (rfn->spcNode == GLOBALTABLESPACE_OID)
- snprintf(dirpath, MAXPGPATH, "%s/global", DataDir);
+ snprintf(dirpath, MAXPGPATH, "%s/global", DataDir);
else
- snprintf(dirpath, MAXPGPATH, "%s/pg_tblspc/%u/%u",
+ snprintf(dirpath, MAXPGPATH, "%s/pg_tblspc/%u/%u",
DataDir, rfn->spcNode, rfn->dbNode);
- for (segcount = 0; ; segcount++)
+ for (segcount = 0;; segcount++)
{
struct stat fst;
if (segcount == 0)
- snprintf(pathname, MAXPGPATH, "%s/%u",
+ snprintf(pathname, MAXPGPATH, "%s/%u",
dirpath, rfn->relNode);
else
- snprintf(pathname, MAXPGPATH, "%s/%u.%u",
+ snprintf(pathname, MAXPGPATH, "%s/%u.%u",
dirpath, rfn->relNode, segcount);
if (stat(pathname, &fst) < 0)
Datum
pg_relation_size_oid(PG_FUNCTION_ARGS)
{
- Oid relOid=PG_GETARG_OID(0);
+ Oid relOid = PG_GETARG_OID(0);
Relation rel;
int64 size;
RangeVar *relrv;
Relation rel;
int64 size;
-
- relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
+
+ relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
-
+
size = calculate_relation_size(&(rel->rd_node));
-
+
relation_close(rel, AccessShareLock);
PG_RETURN_INT64(size);
/*
- * Compute the on-disk size of files for the relation according to the
- * stat function, optionally including heap data, index data, and/or
- * toast data.
+ * Compute the on-disk size of files for the relation according to the
+ * stat function, optionally including heap data, index data, and/or
+ * toast data.
*/
static int64
calculate_total_relation_size(Oid Relid)
if (heapRel->rd_rel->relhasindex)
{
/* recursively include any dependent indexes */
- List *index_oids = RelationGetIndexList(heapRel);
+ List *index_oids = RelationGetIndexList(heapRel);
foreach(cell, index_oids)
{
}
/*
- * Compute on-disk size of files for 'relation' including
- * heap data, index data, and toasted data.
+ * Compute on-disk size of files for 'relation' including
+ * heap data, index data, and toasted data.
*/
Datum
pg_total_relation_size_oid(PG_FUNCTION_ARGS)
{
- Oid relid = PG_GETARG_OID(0);
+ Oid relid = PG_GETARG_OID(0);
PG_RETURN_INT64(calculate_total_relation_size(relid));
}
text *relname = PG_GETARG_TEXT_P(0);
RangeVar *relrv;
Oid relid;
-
- relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
+
+ relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
relid = RangeVarGetRelid(relrv, false);
-
+
PG_RETURN_INT64(calculate_total_relation_size(relid));
}
Datum
pg_size_pretty(PG_FUNCTION_ARGS)
{
- int64 size = PG_GETARG_INT64(0);
- char *result = palloc(50 + VARHDRSZ);
- int64 limit = 10 * 1024;
- int64 mult = 1;
+ int64 size = PG_GETARG_INT64(0);
+ char *result = palloc(50 + VARHDRSZ);
+ int64 limit = 10 * 1024;
+ int64 mult = 1;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " bytes", size);
+ snprintf(VARDATA(result), 50, INT64_FORMAT " bytes", size);
else
{
mult *= 1024;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " kB",
- (size + mult / 2) / mult);
+ snprintf(VARDATA(result), 50, INT64_FORMAT " kB",
+ (size + mult / 2) / mult);
else
{
mult *= 1024;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " MB",
+ snprintf(VARDATA(result), 50, INT64_FORMAT " MB",
(size + mult / 2) / mult);
else
{
mult *= 1024;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " GB",
+ snprintf(VARDATA(result), 50, INT64_FORMAT " GB",
(size + mult / 2) / mult);
else
{
- mult *= 1024;
- snprintf(VARDATA(result), 50, INT64_FORMAT " TB",
+ mult *= 1024;
+ snprintf(VARDATA(result), 50, INT64_FORMAT " TB",
(size + mult / 2) / mult);
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/encode.c,v 1.15 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/encode.c,v 1.16 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (s >= srcend)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid hexadecimal data: odd number of digits")));
+ errmsg("invalid hexadecimal data: odd number of digits")));
v2 = get_hex(*s++);
*p++ = v1 | v2;
else
{
/*
- * One backslash, not followed by ### valid octal. Should
- * never get here, since esc_dec_len does same check.
+ * One backslash, not followed by ### valid octal. Should never
+ * get here, since esc_dec_len does same check.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.114 2005/04/06 23:56:07 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.115 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (fabs(val) > FLOAT8_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("type \"double precision\" value out of range: overflow")));
+ errmsg("type \"double precision\" value out of range: overflow")));
if (val != 0.0 && fabs(val) < FLOAT8_MIN)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("type \"double precision\" value out of range: underflow")));
+ errmsg("type \"double precision\" value out of range: underflow")));
}
/*
char *endptr;
/*
- * endptr points to the first character _after_ the sequence we
- * recognized as a valid floating point number. orig_num points to the
- * original input string.
+ * endptr points to the first character _after_ the sequence we recognized
+ * as a valid floating point number. orig_num points to the original input
+ * string.
*/
orig_num = num;
/*
- * Check for an empty-string input to begin with, to avoid the
- * vagaries of strtod() on different platforms.
+ * Check for an empty-string input to begin with, to avoid the vagaries of
+ * strtod() on different platforms.
*/
if (*num == '\0')
ereport(ERROR,
if (endptr == num || errno != 0)
{
/*
- * C99 requires that strtod() accept NaN and [-]Infinity, but not
- * all platforms support that yet (and some accept them but set
- * ERANGE anyway...) Therefore, we check for these inputs
- * ourselves.
+ * C99 requires that strtod() accept NaN and [-]Infinity, but not all
+ * platforms support that yet (and some accept them but set ERANGE
+ * anyway...) Therefore, we check for these inputs ourselves.
*/
if (pg_strncasecmp(num, "NaN", 3) == 0)
{
else
{
/*
- * Many versions of Solaris have a bug wherein strtod sets endptr
- * to point one byte beyond the end of the string when given "inf"
- * or "infinity".
+ * Many versions of Solaris have a bug wherein strtod sets endptr to
+ * point one byte beyond the end of the string when given "inf" or
+ * "infinity".
*/
if (endptr != num && endptr[-1] == '\0')
endptr--;
orig_num)));
/*
- * if we get here, we have a legal double, still need to check to see
- * if it's a legal float4
+ * if we get here, we have a legal double, still need to check to see if
+ * it's a legal float4
*/
if (!isinf(val))
CheckFloat4Val(val);
char *endptr;
/*
- * endptr points to the first character _after_ the sequence we
- * recognized as a valid floating point number. orig_num points to the
- * original input string.
+ * endptr points to the first character _after_ the sequence we recognized
+ * as a valid floating point number. orig_num points to the original input
+ * string.
*/
orig_num = num;
/*
- * Check for an empty-string input to begin with, to avoid the
- * vagaries of strtod() on different platforms.
+ * Check for an empty-string input to begin with, to avoid the vagaries of
+ * strtod() on different platforms.
*/
if (*num == '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
/* skip leading whitespace */
while (*num != '\0' && isspace((unsigned char) *num))
if (endptr == num || errno != 0)
{
/*
- * C99 requires that strtod() accept NaN and [-]Infinity, but not
- * all platforms support that yet (and some accept them but set
- * ERANGE anyway...) Therefore, we check for these inputs
- * ourselves.
+ * C99 requires that strtod() accept NaN and [-]Infinity, but not all
+ * platforms support that yet (and some accept them but set ERANGE
+ * anyway...) Therefore, we check for these inputs ourselves.
*/
if (pg_strncasecmp(num, "NaN", 3) == 0)
{
else if (errno == ERANGE)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"%s\" is out of range for type double precision",
- orig_num)));
+ errmsg("\"%s\" is out of range for type double precision",
+ orig_num)));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
}
#ifdef HAVE_BUGGY_SOLARIS_STRTOD
else
{
/*
- * Many versions of Solaris have a bug wherein strtod sets endptr
- * to point one byte beyond the end of the string when given "inf"
- * or "infinity".
+ * Many versions of Solaris have a bug wherein strtod sets endptr to
+ * point one byte beyond the end of the string when given "inf" or
+ * "infinity".
*/
if (endptr != num && endptr[-1] == '\0')
endptr--;
if (*endptr != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
if (!isinf(val))
CheckFloat8Val(val);
float4_cmp_internal(float4 a, float4 b)
{
/*
- * We consider all NANs to be equal and larger than any non-NAN. This
- * is somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * We consider all NANs to be equal and larger than any non-NAN. This is
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (isnan(a))
{
float8_cmp_internal(float8 a, float8 b)
{
/*
- * We consider all NANs to be equal and larger than any non-NAN. This
- * is somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * We consider all NANs to be equal and larger than any non-NAN. This is
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (isnan(a))
{
float8 result;
/*
- * The SQL spec requires that we emit a particular SQLSTATE error code
- * for certain error conditions.
+ * The SQL spec requires that we emit a particular SQLSTATE error code for
+ * certain error conditions.
*/
if ((arg1 == 0 && arg2 < 0) ||
(arg1 < 0 && floor(arg2) != arg2))
errmsg("invalid argument for power function")));
/*
- * We must check both for errno getting set and for a NaN result, in
- * order to deal with the vagaries of different platforms...
+ * We must check both for errno getting set and for a NaN result, in order
+ * to deal with the vagaries of different platforms...
*/
errno = 0;
result = pow(arg1, arg2);
float8 result;
/*
- * We must check both for errno getting set and for a NaN result, in
- * order to deal with the vagaries of different platforms. Also, a
- * zero result implies unreported underflow.
+ * We must check both for errno getting set and for a NaN result, in order
+ * to deal with the vagaries of different platforms. Also, a zero result
+ * implies unreported underflow.
*/
errno = 0;
result = exp(arg1);
float8 result;
/*
- * Emit particular SQLSTATE error codes for ln(). This is required by
- * the SQL standard.
+ * Emit particular SQLSTATE error codes for ln(). This is required by the
+ * SQL standard.
*/
if (arg1 == 0.0)
ereport(ERROR,
float8 result;
/*
- * Emit particular SQLSTATE error codes for log(). The SQL spec
- * doesn't define log(), but it does define ln(), so it makes sense to
- * emit the same error code for an analogous error condition.
+ * Emit particular SQLSTATE error codes for log(). The SQL spec doesn't
+ * define log(), but it does define ln(), so it makes sense to emit the
+ * same error code for an analogous error condition.
*/
if (arg1 == 0.0)
ereport(ERROR,
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we
- * construct a new array with the updated transition data and
- * return it.
+ * parameter in-place to reduce palloc overhead. Otherwise we construct a
+ * new array with the updated transition data and return it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
result = construct_array(transdatums, 3,
FLOAT8OID,
- sizeof(float8), false /* float8 byval */ , 'd');
+ sizeof(float8), false /* float8 byval */ , 'd');
PG_RETURN_ARRAYTYPE_P(result);
}
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we
- * construct a new array with the updated transition data and
- * return it.
+ * parameter in-place to reduce palloc overhead. Otherwise we construct a
+ * new array with the updated transition data and return it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
result = construct_array(transdatums, 3,
FLOAT8OID,
- sizeof(float8), false /* float8 byval */ , 'd');
+ sizeof(float8), false /* float8 byval */ , 'd');
PG_RETURN_ARRAYTYPE_P(result);
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/format_type.c,v 1.40 2005/03/29 00:17:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/format_type.c,v 1.41 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typeform = (Form_pg_type) GETSTRUCT(tuple);
/*
- * Check if it's an array (and not a domain --- we don't want to show
- * the substructure of a domain type). Fixed-length array types such
- * as "name" shouldn't get deconstructed either. As of Postgres 8.1,
- * rather than checking typlen we check the toast property, and don't
- * deconstruct "plain storage" array types --- this is because we don't
- * want to show oidvector as oid[].
+ * Check if it's an array (and not a domain --- we don't want to show the
+ * substructure of a domain type). Fixed-length array types such as
+ * "name" shouldn't get deconstructed either. As of Postgres 8.1, rather
+ * than checking typlen we check the toast property, and don't deconstruct
+ * "plain storage" array types --- this is because we don't want to show
+ * oidvector as oid[].
*/
array_base_type = typeform->typelem;
is_array = false;
/*
- * See if we want to special-case the output for certain built-in
- * types. Note that these special cases should all correspond to
- * special productions in gram.y, to ensure that the type name will be
- * taken as a system type, not a user type of the same name.
+ * See if we want to special-case the output for certain built-in types.
+ * Note that these special cases should all correspond to special
+ * productions in gram.y, to ensure that the type name will be taken as a
+ * system type, not a user type of the same name.
*
* If we do not provide a special-case output here, the type name will be
- * handled the same way as a user type name --- in particular, it will
- * be double-quoted if it matches any lexer keyword. This behavior is
+ * handled the same way as a user type name --- in particular, it will be
+ * double-quoted if it matches any lexer keyword. This behavior is
* essential for some cases, such as types "bit" and "char".
*/
buf = NULL; /* flag for no special case */
{
/*
* bit with typmod -1 is not the same as BIT, which means
- * BIT(1) per SQL spec. Report it as the quoted typename
- * so that parser will not assign a bogus typmod.
+ * BIT(1) per SQL spec. Report it as the quoted typename so
+ * that parser will not assign a bogus typmod.
*/
}
else
else if (typemod_given)
{
/*
- * bpchar with typmod -1 is not the same as CHARACTER,
- * which means CHARACTER(1) per SQL spec. Report it as
- * bpchar so that parser will not assign a bogus typmod.
+ * bpchar with typmod -1 is not the same as CHARACTER, which
+ * means CHARACTER(1) per SQL spec. Report it as bpchar so
+ * that parser will not assign a bogus typmod.
*/
}
else
{
/*
* Default handling: report the name as it appears in the catalog.
- * Here, we must qualify the name if it is not visible in the
- * search path, and we must double-quote it if it's not a standard
- * identifier or if it matches any keyword.
+ * Here, we must qualify the name if it is not visible in the search
+ * path, and we must double-quote it if it's not a standard identifier
+ * or if it matches any keyword.
*/
char *nspname;
char *typname;
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.99 2005/08/18 13:43:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.100 2005/10/15 02:49:28 momjian Exp $
*
*
* Portions Copyright (c) 1999-2005, PostgreSQL Global Development Group
{
const char *name; /* keyword */
int len; /* keyword length */
- int (*action) (int arg, char *inout, /* action for keyword */
- int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data);
+ int (*action) (int arg, char *inout, /* action for keyword */
+ int suf, bool is_to_char, bool is_interval,
+ FormatNode *node, void *data);
int id; /* keyword id */
bool isitdigit; /* is expected output/input digit */
} KeyWord;
* Flags for DCH version
* ----------
*/
-static bool DCH_global_fx = false;
+static bool DCH_global_fx = false;
/* ----------
q,
j,
us,
- yysz; /* is it YY or YYYY ? */
+ yysz; /* is it YY or YYYY ? */
} TmFromChar;
#define ZERO_tmfc(_X) memset(_X, 0, sizeof(TmFromChar))
errmsg("invalid format specification for an interval value"), \
errhint("Intervals are not tied to specific calendar dates."))); \
} while(0)
-
+
/*****************************************************************************
* KeyWords definition & action
*****************************************************************************/
-static int dch_global(int arg, char *inout, int suf, bool is_to_char,
- bool is_interval, FormatNode *node, void *data);
-static int dch_time(int arg, char *inout, int suf, bool is_to_char,
- bool is_interval, FormatNode *node, void *data);
-static int dch_date(int arg, char *inout, int suf, bool is_to_char,
- bool is_interval, FormatNode *node, void *data);
+static int dch_global(int arg, char *inout, int suf, bool is_to_char,
+ bool is_interval, FormatNode *node, void *data);
+static int dch_time(int arg, char *inout, int suf, bool is_to_char,
+ bool is_interval, FormatNode *node, void *data);
+static int dch_date(int arg, char *inout, int suf, bool is_to_char,
+ bool is_interval, FormatNode *node, void *data);
/* ----------
* Suffixes:
* KeyWords index for DATE-TIME version
* ----------
*/
-static const int DCH_index[KeyWord_INDEX_SIZE] = {
+static const int DCH_index[KeyWord_INDEX_SIZE] = {
/*
0 1 2 3 4 5 6 7 8 9
*/
* KeyWords index for NUMBER version
* ----------
*/
-static const int NUM_index[KeyWord_INDEX_SIZE] = {
+static const int NUM_index[KeyWord_INDEX_SIZE] = {
/*
0 1 2 3 4 5 6 7 8 9
*/
*number_p, /* pointer to current number position */
*inout, /* in / out buffer */
*inout_p, /* pointer to current inout position */
- *last_relevant, /* last relevant number after decimal
- * point */
+ *last_relevant, /* last relevant number after decimal point */
*L_negative_sign, /* Locale */
*L_positive_sign,
* ----------
*/
static const KeyWord *index_seq_search(char *str, const KeyWord *kw,
- const int *index);
+ const int *index);
static KeySuffix *suff_search(char *str, KeySuffix *suf, int type);
static void NUMDesc_prepare(NUMDesc *num, FormatNode *n);
static void parse_format(FormatNode *node, char *str, const KeyWord *kw,
KeySuffix *suf, const int *index, int ver, NUMDesc *Num);
static char *DCH_processor(FormatNode *node, char *inout, bool is_to_char,
- bool is_interval, void *data);
+ bool is_interval, void *data);
#ifdef DEBUG_TO_FROM_CHAR
static void dump_index(const KeyWord *k, const int *index);
/* static int is_acdc(char *str, int *len); */
static int seq_search(char *name, char **array, int type, int max, int *len);
static void do_to_timestamp(text *date_txt, text *fmt,
- struct pg_tm *tm, fsec_t *fsec);
+ struct pg_tm * tm, fsec_t *fsec);
static char *fill_str(char *str, int c, int max);
static FormatNode *NUM_cache(int len, NUMDesc *Num, char *pars_str, bool *shouldFree);
static char *int_to_roman(int number);
NUM_cache_remove(last_NUMCacheEntry);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot use \"V\" and decimal point together")));
+ errmsg("cannot use \"V\" and decimal point together")));
}
num->flag |= NUM_F_DECIMAL;
break;
NUM_cache_remove(last_NUMCacheEntry);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot use \"V\" and decimal point together")));
+ errmsg("cannot use \"V\" and decimal point together")));
}
num->flag |= NUM_F_MULTI;
break;
if (!is_to_char && *s == '\0')
/*
- * The input string is shorter than format picture, so it's
- * good time to break this loop...
+ * The input string is shorter than format picture, so it's good
+ * time to break this loop...
*
- * Note: this isn't relevant for TO_CHAR mode, beacuse it use
- * 'inout' allocated by format picture length.
+ * Note: this isn't relevant for TO_CHAR mode, beacuse it use 'inout'
+ * allocated by format picture length.
*/
break;
{
if (n->type == NODE_TYPE_ACTION)
elog(DEBUG_elog_output, "%d:\t NODE_TYPE_ACTION '%s'\t(%s,%s)",
- a, n->key->name, DUMP_THth(n->suffix), DUMP_FM(n->suffix));
+ a, n->key->name, DUMP_THth(n->suffix), DUMP_FM(n->suffix));
else if (n->type == NODE_TYPE_CHAR)
elog(DEBUG_elog_output, "%d:\t NODE_TYPE_CHAR '%c'", a, n->character);
else if (n->type == NODE_TYPE_END)
#ifdef DEBUG_TO_FROM_CHAR
/*
- * elog(DEBUG_elog_output, "N: %c, P: %c, A: %s (%s)", *n, *p,
- * *a, name);
+ * elog(DEBUG_elog_output, "N: %c, P: %c, A: %s (%s)", *n, *p, *a,
+ * name);
*/
#endif
if (*n != *p)
*/
static int
dch_global(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data)
+ FormatNode *node, void *data)
{
if (arg == DCH_FX)
DCH_global_fx = true;
*/
static int
dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data)
+ FormatNode *node, void *data)
{
char *p_inout = inout;
struct pg_tm *tm = NULL;
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? P_M_STR : A_M_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? P_M_STR : A_M_STR));
return strlen(p_inout);
}
else
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? PM_STR : AM_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? PM_STR : AM_STR));
return strlen(p_inout);
}
else
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? p_m_STR : a_m_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? p_m_STR : a_m_STR));
return strlen(p_inout);
}
else
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? pm_STR : am_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? pm_STR : am_STR));
return strlen(p_inout);
}
else
}
/*
- * 25 is 0.25 and 250 is 0.25 too; 025 is 0.025 and not
- * 0.25
+ * 25 is 0.25 and 250 is 0.25 too; 025 is 0.025 and not 0.25
*/
tmfc->ms *= x == 1 ? 100 :
x == 2 ? 10 : 1;
/*
- * elog(DEBUG3, "X: %d, MS: %d, LEN: %d", x, tmfc->ms,
- * len);
+ * elog(DEBUG3, "X: %d, MS: %d, LEN: %d", x, tmfc->ms, len);
*/
return len + SKIP_THth(suf);
}
x == 5 ? 10 : 1;
/*
- * elog(DEBUG3, "X: %d, US: %d, LEN: %d", x, tmfc->us,
- * len);
+ * elog(DEBUG3, "X: %d, US: %d, LEN: %d", x, tmfc->us, len);
*/
return len + SKIP_THth(suf);
}
*/
static int
dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data)
+ FormatNode *node, void *data)
{
char buff[DCH_CACHE_SIZE],
workbuff[32],
tmfc = (TmFromChar *) data;
/*
- * In the FROM-char is not difference between "January" or "JANUARY"
- * or "january", all is before search convert to "first-upper". This
+ * In the FROM-char is not difference between "January" or "JANUARY" or
+ * "january", all is before search convert to "first-upper". This
* convention is used for MONTH, MON, DAY, DY
*/
if (!is_to_char)
return strlen(p_inout);
case DCH_MON:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (!tm->tm_mon)
return -1;
strcpy(inout, months[tm->tm_mon - 1]);
return strlen(p_inout);
case DCH_Mon:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (!tm->tm_mon)
return -1;
strcpy(inout, months[tm->tm_mon - 1]);
return strlen(p_inout);
case DCH_mon:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (!tm->tm_mon)
return -1;
strcpy(inout, months[tm->tm_mon - 1]);
}
break;
case DCH_DAY:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(workbuff, days[tm->tm_wday]);
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, str_toupper(workbuff));
return strlen(p_inout);
case DCH_Day:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, days[tm->tm_wday]);
return strlen(p_inout);
case DCH_day:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, days[tm->tm_wday]);
*inout = pg_tolower((unsigned char) *inout);
return strlen(p_inout);
case DCH_DY:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(inout, days[tm->tm_wday]);
str_toupper(inout);
- return 3; /* truncate */
+ return 3; /* truncate */
case DCH_Dy:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(inout, days[tm->tm_wday]);
- return 3; /* truncate */
+ return 3; /* truncate */
case DCH_dy:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(inout, days[tm->tm_wday]);
*inout = pg_tolower((unsigned char) *inout);
- return 3; /* truncate */
+ return 3; /* truncate */
case DCH_DDD:
if (is_to_char)
}
break;
case DCH_D:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (is_to_char)
{
sprintf(inout, "%d", tm->tm_wday + 1);
if (is_to_char)
{
sprintf(inout, "%0*d", S_FM(suf) ? 0 : 2,
- date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday));
+ date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday));
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
return strlen(p_inout);
arg == DCH_YYYY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(
- tm->tm_year,
- tm->tm_mon,
- tm->tm_mday), is_interval));
+ tm->tm_year,
+ tm->tm_mon,
+ tm->tm_mday), is_interval));
else
sprintf(inout, "%d",
arg == DCH_YYYY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(
- tm->tm_year,
- tm->tm_mon,
- tm->tm_mday), is_interval));
+ tm->tm_year,
+ tm->tm_mon,
+ tm->tm_mday), is_interval));
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
return strlen(p_inout);
arg == DCH_YYY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday),
- is_interval));
+ tm->tm_mon, tm->tm_mday),
+ is_interval));
i = strlen(buff);
strcpy(inout, buff + (i - 3));
if (S_THth(suf))
arg == DCH_YY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday),
- is_interval));
+ tm->tm_mon, tm->tm_mday),
+ is_interval));
i = strlen(buff);
strcpy(inout, buff + (i - 2));
if (S_THth(suf))
sscanf(inout, "%02d", &tmfc->year);
/*
- * 2-digit year: '00' ... '69' = 2000 ... 2069 '70' ...
- * '99' = 1970 ... 1999
+ * 2-digit year: '00' ... '69' = 2000 ... 2069 '70' ... '99'
+ * = 1970 ... 1999
*/
if (tmfc->year < 70)
tmfc->year += 2000;
arg == DCH_Y ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday),
- is_interval));
+ tm->tm_mon, tm->tm_mday),
+ is_interval));
i = strlen(buff);
strcpy(inout, buff + (i - 1));
if (S_THth(suf))
result = palloc((fmt_len * DCH_MAX_ITEM_SIZ) + 1);
/*
- * Allocate new memory if format picture is bigger than static cache
- * and not use cache (call parser always)
+ * Allocate new memory if format picture is bigger than static cache and
+ * not use cache (call parser always)
*/
if (fmt_len > DCH_CACHE_SIZE)
{
ent = DCH_cache_getnew(fmt_str);
/*
- * Not in the cache, must run parser and save a new
- * format-picture to the cache.
+ * Not in the cache, must run parser and save a new format-picture
+ * to the cache.
*/
parse_format(ent->format, fmt_str, DCH_keywords,
DCH_suff, DCH_index, DCH_TYPE, NULL);
pfree(fmt_str);
/*
- * for result is allocated max memory, which current format-picture
- * needs, now it allocate result with real size
+ * for result is allocated max memory, which current format-picture needs,
+ * now it allocate result with real size
*/
if (result && *result)
{
*/
static void
do_to_timestamp(text *date_txt, text *fmt,
- struct pg_tm *tm, fsec_t *fsec)
+ struct pg_tm * tm, fsec_t *fsec)
{
FormatNode *format;
TmFromChar tmfc;
*(fmt_str + fmt_len) = '\0';
/*
- * Allocate new memory if format picture is bigger than static
- * cache and not use cache (call parser always)
+ * Allocate new memory if format picture is bigger than static cache
+ * and not use cache (call parser always)
*/
if (fmt_len > DCH_CACHE_SIZE)
{
DEBUG_TMFC(&tmfc);
/*
- * Convert values that user define for FROM_CHAR
- * (to_date/to_timestamp) to standard 'tm'
+ * Convert values that user define for FROM_CHAR (to_date/to_timestamp) to
+ * standard 'tm'
*/
if (tmfc.ssss)
{
if (tmfc.year)
{
- if (tmfc.yysz==2 && tmfc.cc)
+ if (tmfc.yysz == 2 && tmfc.cc)
{
- /* CC and YY defined
- * why -[2000|1900]? See dch_date() DCH_YY code.
+ /*
+ * CC and YY defined why -[2000|1900]? See dch_date() DCH_YY code.
*/
- tm->tm_year = (tmfc.cc-1)*100 + (tmfc.year >= 2000 ? tmfc.year-2000 : tmfc.year-1900);
+ tm->tm_year = (tmfc.cc - 1) * 100 + (tmfc.year >= 2000 ? tmfc.year - 2000 : tmfc.year - 1900);
}
- else if (tmfc.yysz==1 && tmfc.cc)
+ else if (tmfc.yysz == 1 && tmfc.cc)
{
- /* CC and Y defined
+ /*
+ * CC and Y defined
*/
- tm->tm_year = (tmfc.cc-1)*100 + tmfc.year-2000;
+ tm->tm_year = (tmfc.cc - 1) * 100 + tmfc.year - 2000;
}
else
/* set year (and ignore CC if defined) */
if (!tm->tm_year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("cannot calculate day of year without year information")));
+ errmsg("cannot calculate day of year without year information")));
y = ysum[isleap(tm->tm_year)];
*(str + len) = '\0';
/*
- * Allocate new memory if format picture is bigger than static cache
- * and not use cache (call parser always). This branches sets
- * shouldFree to true, accordingly.
+ * Allocate new memory if format picture is bigger than static cache and
+ * not use cache (call parser always). This branches sets shouldFree to
+ * true, accordingly.
*/
if (len > NUM_CACHE_SIZE)
{
ent = NUM_cache_getnew(str);
/*
- * Not in the cache, must run parser and save a new
- * format-picture to the cache.
+ * Not in the cache, must run parser and save a new format-picture
+ * to the cache.
*/
parse_format(ent->format, str, NUM_keywords,
NULL, NUM_index, NUM_TYPE, &ent->Num);
static void
NUM_numpart_from_char(NUMProc *Np, int id, int plen)
{
- bool isread = FALSE;
-
+ bool isread = FALSE;
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, " --- scan start --- id=%s",
- (id==NUM_0 || id==NUM_9) ? "NUM_0/9" : id==NUM_DEC ? "NUM_DEC" : "???");
+ (id == NUM_0 || id == NUM_9) ? "NUM_0/9" : id == NUM_DEC ? "NUM_DEC" : "???");
#endif
if (*Np->inout_p == ' ')
Np->inout_p++;
#define OVERLOAD_TEST (Np->inout_p >= Np->inout + plen)
-#define AMOUNT_TEST(_s) (plen-(Np->inout_p-Np->inout) >= _s)
+#define AMOUNT_TEST(_s) (plen-(Np->inout_p-Np->inout) >= _s)
if (*Np->inout_p == ' ')
Np->inout_p++;
/*
* read sign before number
*/
- if (*Np->number == ' ' && (id == NUM_0 || id == NUM_9 ) &&
- (Np->read_pre + Np->read_post)==0)
+ if (*Np->number == ' ' && (id == NUM_0 || id == NUM_9) &&
+ (Np->read_pre + Np->read_post) == 0)
{
#ifdef DEBUG_TO_FROM_CHAR
- elog(DEBUG_elog_output, "Try read sign (%c), locale positive: %s, negative: %s",
- *Np->inout_p, Np->L_positive_sign, Np->L_negative_sign);
+ elog(DEBUG_elog_output, "Try read sign (%c), locale positive: %s, negative: %s",
+ *Np->inout_p, Np->L_positive_sign, Np->L_negative_sign);
#endif
/*
*/
if (IS_LSIGN(Np->Num) && Np->Num->lsign == NUM_LSIGN_PRE)
{
- int x=0;
+ int x = 0;
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read locale pre-sign (%c)", *Np->inout_p);
#endif
- if ((x = strlen(Np->L_negative_sign)) &&
+ if ((x = strlen(Np->L_negative_sign)) &&
AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_negative_sign, x)==0)
+ strncmp(Np->inout_p, Np->L_negative_sign, x) == 0)
{
Np->inout_p += x;
*Np->number = '-';
}
- else if ((x = strlen(Np->L_positive_sign)) &&
- AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_positive_sign, x)==0)
+ else if ((x = strlen(Np->L_positive_sign)) &&
+ AMOUNT_TEST(x) &&
+ strncmp(Np->inout_p, Np->L_positive_sign, x) == 0)
{
Np->inout_p += x;
*Np->number = '+';
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read simple sign (%c)", *Np->inout_p);
#endif
+
/*
* simple + - < >
*/
*Np->inout_p == '<'))
{
- *Np->number = '-'; /* set - */
+ *Np->number = '-'; /* set - */
Np->inout_p++;
}
else if (*Np->inout_p == '+')
{
- *Np->number = '+'; /* set + */
+ *Np->number = '+'; /* set + */
Np->inout_p++;
}
}
if (OVERLOAD_TEST)
return;
-
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Scan for numbers (%c), current number: '%s'", *Np->inout_p, Np->number);
#endif
-
+
/*
* read digit
*/
Np->read_pre++;
isread = TRUE;
-
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Read digit (%c)", *Np->inout_p);
#endif
- /*
- * read decimal point
- */
+
+ /*
+ * read decimal point
+ */
}
else if (IS_DECIMAL(Np->Num) && Np->read_dec == FALSE)
{
elog(DEBUG_elog_output, "Try read locale point (%c)",
*Np->inout_p);
#endif
- if (x && AMOUNT_TEST(x) && strncmp(Np->inout_p, Np->decimal, x)==0)
+ if (x && AMOUNT_TEST(x) && strncmp(Np->inout_p, Np->decimal, x) == 0)
{
Np->inout_p += x - 1;
*Np->number_p = '.';
if (OVERLOAD_TEST)
return;
-
+
/*
* Read sign behind "last" number
*
- * We need sign detection because determine exact position of
- * post-sign is difficult:
+ * We need sign detection because determine exact position of post-sign is
+ * difficult:
*
- * FM9999.9999999S -> 123.001-
- * 9.9S -> .5-
- * FM9.999999MI -> 5.01-
+ * FM9999.9999999S -> 123.001- 9.9S -> .5- FM9.999999MI
+ * -> 5.01-
*/
if (*Np->number == ' ' && Np->read_pre + Np->read_post > 0)
{
/*
- * locale sign (NUM_S) is always anchored behind a last number, if:
- * - locale sign expected
- * - last read char was NUM_0/9 or NUM_DEC
- * - and next char is not digit
- */
- if (IS_LSIGN(Np->Num) && isread &&
- (Np->inout_p+1) <= Np->inout + plen &&
- !isdigit((unsigned char) *(Np->inout_p+1)))
+ * locale sign (NUM_S) is always anchored behind a last number, if: -
+ * locale sign expected - last read char was NUM_0/9 or NUM_DEC - and
+ * next char is not digit
+ */
+ if (IS_LSIGN(Np->Num) && isread &&
+ (Np->inout_p + 1) <= Np->inout + plen &&
+ !isdigit((unsigned char) *(Np->inout_p + 1)))
{
- int x;
- char *tmp = Np->inout_p++;
-
+ int x;
+ char *tmp = Np->inout_p++;
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read locale post-sign (%c)", *Np->inout_p);
#endif
- if ((x = strlen(Np->L_negative_sign)) &&
+ if ((x = strlen(Np->L_negative_sign)) &&
AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_negative_sign, x)==0)
+ strncmp(Np->inout_p, Np->L_negative_sign, x) == 0)
{
- Np->inout_p += x-1; /* -1 .. NUM_processor() do inout_p++ */
+ Np->inout_p += x - 1; /* -1 .. NUM_processor() do inout_p++ */
*Np->number = '-';
}
- else if ((x = strlen(Np->L_positive_sign)) &&
- AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_positive_sign, x)==0)
+ else if ((x = strlen(Np->L_positive_sign)) &&
+ AMOUNT_TEST(x) &&
+ strncmp(Np->inout_p, Np->L_positive_sign, x) == 0)
{
- Np->inout_p += x-1; /* -1 .. NUM_processor() do inout_p++ */
+ Np->inout_p += x - 1; /* -1 .. NUM_processor() do inout_p++ */
*Np->number = '+';
}
if (*Np->number == ' ')
/* no sign read */
Np->inout_p = tmp;
}
-
+
/*
* try read non-locale sign, it's happen only if format is not exact
* and we cannot determine sign position of MI/PL/SG, an example:
*
- * FM9.999999MI -> 5.01-
+ * FM9.999999MI -> 5.01-
*
- * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats
- * like to_number('1 -', '9S') where sign is not anchored to last number.
+ * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats like
+ * to_number('1 -', '9S') where sign is not anchored to last number.
*/
- else if (isread==FALSE && IS_LSIGN(Np->Num)==FALSE &&
- (IS_PLUS(Np->Num) || IS_MINUS(Np->Num)))
+ else if (isread == FALSE && IS_LSIGN(Np->Num) == FALSE &&
+ (IS_PLUS(Np->Num) || IS_MINUS(Np->Num)))
{
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read simple post-sign (%c)", *Np->inout_p);
#endif
+
/*
* simple + -
*/
Np->num_in = FALSE;
/*
- * Write sign if real number will write to output Note:
- * IS_PREDEC_SPACE() handle "9.9" --> " .1"
+ * Write sign if real number will write to output Note: IS_PREDEC_SPACE()
+ * handle "9.9" --> " .1"
*/
if (Np->sign_wrote == FALSE &&
(Np->num_curr >= Np->num_pre || (IS_ZERO(Np->Num) && Np->Num->zero_start == Np->num_curr)) &&
Np->inout = inout;
Np->last_relevant = NULL;
Np->read_post = 0;
- Np->read_pre = 0;
+ Np->read_pre = 0;
Np->read_dec = FALSE;
if (Np->Num->zero_start)
if (IS_DECIMAL(Np->Num))
Np->last_relevant = get_last_relevant_decnum(
Np->number +
- ((Np->Num->zero_end - Np->num_pre > 0) ?
- Np->Num->zero_end - Np->num_pre : 0));
+ ((Np->Num->zero_end - Np->num_pre > 0) ?
+ Np->Num->zero_end - Np->num_pre : 0));
}
if (Np->sign_wrote == FALSE && Np->num_pre == 0)
/*
* Create/reading digit/zero/blank/sing
*
- * 'NUM_S' note:
- * The locale sign is anchored to number and we read/write it
- * when we work with first or last number (NUM_0/NUM_9). This
- * is reason why NUM_S missing in follow switch().
+ * 'NUM_S' note: The locale sign is anchored to number and we
+ * read/write it when we work with first or last number
+ * (NUM_0/NUM_9). This is reason why NUM_S missing in follow
+ * switch().
*/
switch (n->key->id)
{
result = DirectFunctionCall3(numeric_in,
CStringGetDatum(numstr),
ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(((precision << 16) | scale) + VARHDRSZ));
+ Int32GetDatum(((precision << 16) | scale) + VARHDRSZ));
pfree(numstr);
return result;
}
Int32GetDatum(0)));
numstr = orgnum =
int_to_roman(DatumGetInt32(DirectFunctionCall1(numeric_int4,
- NumericGetDatum(x))));
+ NumericGetDatum(x))));
pfree(x);
}
else
if (IS_MULTI(&Num))
{
Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(10)));
+ Int32GetDatum(10)));
Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(Num.multi)));
+ Int32GetDatum(Num.multi)));
x = DatumGetNumeric(DirectFunctionCall2(numeric_power,
NumericGetDatum(a),
NumericGetDatum(b)));
val = DatumGetNumeric(DirectFunctionCall2(numeric_mul,
- NumericGetDatum(value),
- NumericGetDatum(x)));
+ NumericGetDatum(value),
+ NumericGetDatum(x)));
pfree(x);
pfree(a);
pfree(b);
else
{
orgnum = DatumGetCString(DirectFunctionCall1(int4out,
- Int32GetDatum(value)));
+ Int32GetDatum(value)));
}
len = strlen(orgnum);
{
/* Currently don't support int8 conversion to roman... */
numstr = orgnum = int_to_roman(DatumGetInt32(
- DirectFunctionCall1(int84, Int64GetDatum(value))));
+ DirectFunctionCall1(int84, Int64GetDatum(value))));
}
else
{
double multi = pow((double) 10, (double) Num.multi);
value = DatumGetInt64(DirectFunctionCall2(int8mul,
- Int64GetDatum(value),
- DirectFunctionCall1(dtoi8,
- Float8GetDatum(multi))));
+ Int64GetDatum(value),
+ DirectFunctionCall1(dtoi8,
+ Float8GetDatum(multi))));
Num.pre += Num.multi;
}
orgnum = DatumGetCString(DirectFunctionCall1(int8out,
- Int64GetDatum(value)));
+ Int64GetDatum(value)));
len = strlen(orgnum);
if (*orgnum == '-')
*
*
* Copyright (c) 2004-2005, PostgreSQL Global Development Group
- *
+ *
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/genfile.c,v 1.6 2005/08/29 19:39:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/genfile.c,v 1.7 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "utils/memutils.h"
-typedef struct
+typedef struct
{
- char *location;
- DIR *dirdesc;
+ char *location;
+ DIR *dirdesc;
} directory_fctx;
static char *
check_and_make_absolute(text *arg)
{
- int input_len = VARSIZE(arg) - VARHDRSZ;
- char *filename = palloc(input_len + 1);
-
+ int input_len = VARSIZE(arg) - VARHDRSZ;
+ char *filename = palloc(input_len + 1);
+
memcpy(filename, VARDATA(arg), input_len);
filename[input_len] = '\0';
if (path_contains_parent_reference(filename))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("reference to parent directory (\"..\") not allowed"))));
+ (errmsg("reference to parent directory (\"..\") not allowed"))));
if (is_absolute_path(filename))
{
path_is_prefix_of_path(Log_directory, filename))
return filename;
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("absolute path not allowed"))));
return NULL; /* keep compiler quiet */
}
else
{
- char *absname = palloc(strlen(DataDir) + strlen(filename) + 2);
+ char *absname = palloc(strlen(DataDir) + strlen(filename) + 2);
+
sprintf(absname, "%s/%s", DataDir, filename);
pfree(filename);
return absname;
text *filename_t = PG_GETARG_TEXT_P(0);
int64 seek_offset = PG_GETARG_INT64(1);
int64 bytes_to_read = PG_GETARG_INT64(2);
- char *buf;
+ char *buf;
size_t nbytes;
- FILE *file;
- char *filename;
+ FILE *file;
+ char *filename;
if (!superuser())
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to read files"))));
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("requested length too large")));
-
+
buf = palloc((Size) bytes_to_read + VARHDRSZ);
nbytes = fread(VARDATA(buf), 1, (size_t) bytes_to_read, file);
pg_stat_file(PG_FUNCTION_ARGS)
{
text *filename_t = PG_GETARG_TEXT_P(0);
- char *filename;
+ char *filename;
struct stat fst;
Datum values[6];
bool isnull[6];
TupleDesc tupdesc;
if (!superuser())
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to get file information"))));
errmsg("could not stat file \"%s\": %m", filename)));
/*
- * This record type had better match the output parameters declared
- * for me in pg_proc.h (actually, in system_views.sql at the moment).
+ * This record type had better match the output parameters declared for me
+ * in pg_proc.h (actually, in system_views.sql at the moment).
*/
tupdesc = CreateTemplateTupleDesc(6, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1,
Datum
pg_ls_dir(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- struct dirent *de;
- directory_fctx *fctx;
+ FuncCallContext *funcctx;
+ struct dirent *de;
+ directory_fctx *fctx;
if (!superuser())
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to get directory listings"))));
fctx->dirdesc = AllocateDir(fctx->location);
if (!fctx->dirdesc)
- ereport(ERROR,
+ ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open directory \"%s\": %m",
fctx->location)));
}
funcctx = SRF_PERCALL_SETUP();
- fctx = (directory_fctx*) funcctx->user_fctx;
+ fctx = (directory_fctx *) funcctx->user_fctx;
while ((de = ReadDir(fctx->dirdesc, fctx->location)) != NULL)
{
int len = strlen(de->d_name);
- text *result;
+ text *result;
if (strcmp(de->d_name, ".") == 0 ||
strcmp(de->d_name, "..") == 0)
- continue;
+ continue;
result = palloc(len + VARHDRSZ);
VARATT_SIZEP(result) = len + VARHDRSZ;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.90 2005/07/01 19:19:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.91 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type box: \"%s\"", str)));
+ errmsg("invalid input syntax for type box: \"%s\"", str)));
/* reorder corners if necessary... */
if (box->high.x < box->low.x)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type line: \"%s\"", str)));
+ errmsg("invalid input syntax for type line: \"%s\"", str)));
line = (LINE *) palloc(sizeof(LINE));
line_construct_pts(line, &lseg.p[0], &lseg.p[1]);
y;
/*
- * NOTE: if the lines are identical then we will find they are
- * parallel and report "no intersection". This is a little weird, but
- * since there's no *unique* intersection, maybe it's appropriate
- * behavior.
+ * NOTE: if the lines are identical then we will find they are parallel
+ * and report "no intersection". This is a little weird, but since
+ * there's no *unique* intersection, maybe it's appropriate behavior.
*/
if (DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type path: \"%s\"", str)));
+ errmsg("invalid input syntax for type path: \"%s\"", str)));
s = str;
while (isspace((unsigned char) *s))
path->npts = npts;
if ((!path_decode(TRUE, npts, s, &isopen, &s, &(path->p[0])))
- && (!((depth == 0) && (*s == '\0'))) && !((depth >= 1) && (*s == RDELIM)))
+ && (!((depth == 0) && (*s == '\0'))) && !((depth >= 1) && (*s == RDELIM)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type path: \"%s\"", str)));
+ errmsg("invalid input syntax for type path: \"%s\"", str)));
path->closed = (!isopen);
if (npts < 0 || npts >= (int32) ((INT_MAX - offsetof(PATH, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid number of points in external \"path\" value")));
+ errmsg("invalid number of points in external \"path\" value")));
size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * npts;
path = (PATH *) palloc(size);
tmp = DatumGetFloat8(DirectFunctionCall2(lseg_distance,
LsegPGetDatum(&seg1),
- LsegPGetDatum(&seg2)));
+ LsegPGetDatum(&seg2)));
if (!have_min || tmp < min)
{
min = tmp;
if (!pair_decode(str, &x, &y, &s) || (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type point: \"%s\"", str)));
+ errmsg("invalid input syntax for type point: \"%s\"", str)));
point = (Point *) palloc(sizeof(Point));
{
#ifdef GEODEBUG
printf("point_dt- segment (%f,%f),(%f,%f) length is %f\n",
- pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
+ pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
#endif
return HYPOT(pt1->x - pt2->x, pt1->y - pt2->y);
}
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type lseg: \"%s\"", str)));
+ errmsg("invalid input syntax for type lseg: \"%s\"", str)));
#ifdef NOT_USED
lseg->m = point_sl(&lseg->p[0], &lseg->p[1]);
PG_RETURN_NULL();
/*
- * If the line intersection point isn't within l1 (or equivalently
- * l2), there is no valid segment intersection point at all.
+ * If the line intersection point isn't within l1 (or equivalently l2),
+ * there is no valid segment intersection point at all.
*/
if (!on_ps_internal(result, l1) ||
!on_ps_internal(result, l2))
result->y = l1->p[0].y;
}
else if ((FPeq(l1->p[1].x, l2->p[0].x) && FPeq(l1->p[1].y, l2->p[0].y)) ||
- (FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
+ (FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
{
result->x = l1->p[1].x;
result->y = l1->p[1].y;
Assert(path->npts > 1);
/*
- * the distance from a point to a path is the smallest
- * distance from the point to any of its constituent segments.
+ * the distance from a point to a path is the smallest distance
+ * from the point to any of its constituent segments.
*/
for (i = 0; i < path->npts; i++)
{
{
if (!path->closed)
continue;
- iprev = path->npts - 1; /* include the closure
- * segment */
+ iprev = path->npts - 1; /* include the closure segment */
}
statlseg_construct(&lseg, &path->p[iprev], &path->p[i]);
}
/*
- * vert. and horiz. cases are down, now check if the closest point is
- * one of the end points or someplace on the lseg.
+ * vert. and horiz. cases are down, now check if the closest point is one
+ * of the end points or someplace on the lseg.
*/
invm = -1.0 / point_sl(&(lseg->p[0]), &(lseg->p[1]));
* "band" */
if (pt->y < (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
- result = point_copy(&lseg->p[!yh]); /* below the lseg, take
- * lower end pt */
+ result = point_copy(&lseg->p[!yh]); /* below the lseg, take lower
+ * end pt */
#ifdef GEODEBUG
printf("close_ps below: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
* "band" */
if (pt->y > (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
- result = point_copy(&lseg->p[yh]); /* above the lseg, take
- * higher end pt */
+ result = point_copy(&lseg->p[yh]); /* above the lseg, take higher
+ * end pt */
#ifdef GEODEBUG
printf("close_ps above: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
}
/*
- * at this point the "normal" from point will hit lseg. The closet
- * point will be somewhere on the lseg
+ * at this point the "normal" from point will hit lseg. The closet point
+ * will be somewhere on the lseg
*/
tmp = line_construct_pm(pt, invm);
#ifdef GEODEBUG
if ((d = dist_ps_internal(&l2->p[0], l1)) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&l2->p[0]),
+ PointPGetDatum(&l2->p[0]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&point),
+ PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
if ((d = dist_ps_internal(&l2->p[1], l1)) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&l2->p[1]),
+ PointPGetDatum(&l2->p[1]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&point),
+ PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pl,
- PointPGetDatum(&lseg->p[0]),
- LinePGetDatum(line))) &&
+ PointPGetDatum(&lseg->p[0]),
+ LinePGetDatum(line))) &&
DatumGetBool(DirectFunctionCall2(on_pl,
- PointPGetDatum(&lseg->p[1]),
- LinePGetDatum(line))));
+ PointPGetDatum(&lseg->p[1]),
+ LinePGetDatum(line))));
}
Datum
BOX *box = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pb,
- PointPGetDatum(&lseg->p[0]),
+ PointPGetDatum(&lseg->p[0]),
BoxPGetDatum(box))) &&
DatumGetBool(DirectFunctionCall2(on_pb,
- PointPGetDatum(&lseg->p[1]),
+ PointPGetDatum(&lseg->p[1]),
BoxPGetDatum(box))));
}
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for type polygon: \"%s\"", str)));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for type polygon: \"%s\"", str)));
make_bound_box(poly);
if (npts < 0 || npts >= (int32) ((INT_MAX - offsetof(POLYGON, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid number of points in external \"polygon\" value")));
+ errmsg("invalid number of points in external \"polygon\" value")));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
result = polya->boundbox.high.x < polyb->boundbox.low.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
result = polya->boundbox.high.x <= polyb->boundbox.high.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
result = polya->boundbox.low.x > polyb->boundbox.high.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
result = polya->boundbox.low.x >= polyb->boundbox.low.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
result = polya->boundbox.high.y < polyb->boundbox.low.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
result = polya->boundbox.high.y <= polyb->boundbox.high.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
result = polya->boundbox.low.y > polyb->boundbox.high.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
result = polya->boundbox.low.y >= polyb->boundbox.low.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
result = plist_same(polya->npts, polya->p, polyb->p);
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
result = box_ov(&polya->boundbox, &polyb->boundbox);
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
}
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_mul,
- PointPGetDatum(&path->p[i]),
+ PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_div,
- PointPGetDatum(&path->p[i]),
+ PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
if (!pair_decode(s, &circle->center.x, &circle->center.y, &s))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
if (*s == DELIM)
s++;
if ((!single_decode(s, &circle->radius, &s)) || (circle->radius < 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
while (depth > 0)
{
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
}
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
PG_RETURN_CIRCLE_P(circle);
}
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_mul,
- PointPGetDatum(&circle->center),
+ PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_div,
- PointPGetDatum(&circle->center),
+ PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
if (FPzero(circle->radius))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert circle with radius zero to polygon")));
+ errmsg("cannot convert circle with radius zero to polygon")));
if (npts < 2)
ereport(ERROR,
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_ntop.c,v 1.20 2005/09/24 22:54:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_ntop.c,v 1.21 2005/10/15 02:49:28 momjian Exp $
*/
#if defined(LIBC_SCCS) && !defined(lint)
inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
{
/*
- * Note that int32_t and int16_t need only be "at least" large enough
- * to contain a value of the specified size. On some systems, like
- * Crays, there is no such thing as an integer variable with 16 bits.
- * Keep this in mind if you think this function should have been coded
- * to use pointer overlays. All the world's not a VAX.
+ * Note that int32_t and int16_t need only be "at least" large enough to
+ * contain a value of the specified size. On some systems, like Crays,
+ * there is no such thing as an integer variable with 16 bits. Keep this
+ * in mind if you think this function should have been coded to use
+ * pointer overlays. All the world's not a VAX.
*/
char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255/128"];
char *tp;
}
/*
- * Preprocess: Copy the input (bytewise) array into a wordwise array.
- * Find the longest run of 0x00's in src[] for :: shorthanding.
+ * Preprocess: Copy the input (bytewise) array into a wordwise array. Find
+ * the longest run of 0x00's in src[] for :: shorthanding.
*/
memset(words, '\0', sizeof words);
for (i = 0; i < NS_IN6ADDRSZ; i++)
*tp++ = ':';
/* Is this address an encapsulated IPv4? */
if (i == 6 && best.base == 0 && (best.len == 6 ||
- (best.len == 7 && words[7] != 0x0001) ||
- (best.len == 5 && words[5] == 0xffff)))
+ (best.len == 7 && words[7] != 0x0001) ||
+ (best.len == 5 && words[5] == 0xffff)))
{
int n;
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_pton.c,v 1.20 2005/02/01 00:59:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_pton.c,v 1.21 2005/10/15 02:49:28 momjian Exp $
*/
#if defined(LIBC_SCCS) && !defined(lint)
bits = 24;
else if (*odst >= 128) /* Class B */
bits = 16;
- else /* Class A */
+ else
+ /* Class A */
bits = 8;
/* If imputed mask is narrower than specified octets, widen. */
if (bits < ((dst - odst) * 8))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.67 2005/07/10 21:36:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.68 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int2vector *
buildint2vector(const int2 *int2s, int n)
{
- int2vector *result;
+ int2vector *result;
result = (int2vector *) palloc0(Int2VectorSize(n));
memcpy(result->values, int2s, n * sizeof(int2));
/*
- * Attach standard array header. For historical reasons, we set the
- * index lower bound to 0 not 1.
+ * Attach standard array header. For historical reasons, we set the index
+ * lower bound to 0 not 1.
*/
result->size = Int2VectorSize(n);
result->ndim = 1;
int2vectorrecv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- int2vector *result;
+ int2vector *result;
result = (int2vector *)
DatumGetPointer(DirectFunctionCall3(array_recv,
int32 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int32 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int32 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There are two cases where this fails: arg2 = 0 (which
- * cannot overflow) and arg1 = INT_MIN, arg2 = -1 (where the division
- * itself will overflow and thus incorrectly match).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There are two cases where this fails: arg2 = 0 (which cannot
+ * overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
+ * overflow and thus incorrectly match).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int16 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int16
+ * range; if so, no overflow is possible.
*/
if (!(arg1 >= (int32) SHRT_MIN && arg1 <= (int32) SHRT_MAX &&
arg2 >= (int32) SHRT_MIN && arg2 <= (int32) SHRT_MAX) &&
arg2 != 0 &&
- (result/arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
+ (result / arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT_MIN, arg2 = -1, where the correct result is -INT_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 = INT_MIN,
+ * arg2 = -1, where the correct result is -INT_MIN, which can't be
+ * represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
int16 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int16 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int32 result32;
/*
- * The most practical way to detect overflow is to do the arithmetic
- * in int32 (so that the result can't overflow) and then do a range
- * check.
+ * The most practical way to detect overflow is to do the arithmetic in
+ * int32 (so that the result can't overflow) and then do a range check.
*/
- result32 = (int32) arg1 * (int32) arg2;
+ result32 = (int32) arg1 *(int32) arg2;
+
if (result32 < SHRT_MIN || result32 > SHRT_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = SHRT_MIN, arg2 = -1, where the correct result is -SHRT_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 =
+ * SHRT_MIN, arg2 = -1, where the correct result is -SHRT_MIN, which can't
+ * be represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
int32 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int32 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int32 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There is one case where this fails: arg2 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There is one case where this fails: arg2 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int16 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int16
+ * range; if so, no overflow is possible.
*/
if (!(arg2 >= (int32) SHRT_MIN && arg2 <= (int32) SHRT_MAX) &&
- result/arg2 != arg1)
+ result / arg2 != arg1)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
int32 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int32 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int32 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg1 gives
- * arg2 again. There is one case where this fails: arg1 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg1 gives arg2
+ * again. There is one case where this fails: arg1 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int16 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int16
+ * range; if so, no overflow is possible.
*/
if (!(arg1 >= (int32) SHRT_MIN && arg1 <= (int32) SHRT_MAX) &&
- result/arg1 != arg2)
+ result / arg1 != arg2)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT_MIN, arg2 = -1, where the correct result is -INT_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 = INT_MIN,
+ * arg2 = -1, where the correct result is -INT_MIN, which can't be
+ * represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
funcctx = SRF_PERCALL_SETUP();
/*
- * get the saved state and use current as the result for this
- * iteration
+ * get the saved state and use current as the result for this iteration
*/
fctx = funcctx->user_fctx;
result = fctx->current;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.58 2005/03/12 20:25:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.59 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int sign = 1;
/*
- * Do our own scan, rather than relying on sscanf which might be
- * broken for long long.
+ * Do our own scan, rather than relying on sscanf which might be broken
+ * for long long.
*/
/* skip leading spaces */
/*
* Do an explicit check for INT64_MIN. Ugly though this is, it's
- * cleaner than trying to get the loop below to handle it
- * portably.
+ * cleaner than trying to get the loop below to handle it portably.
*/
#ifndef INT64_IS_BUSTED
if (strncmp(ptr, "9223372036854775808", 19) == 0)
else
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for type bigint",
- str)));
+ errmsg("value \"%s\" is out of range for type bigint",
+ str)));
}
tmp = newtmp;
}
int64 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int64 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int64 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There are two cases where this fails: arg2 = 0 (which
- * cannot overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division
- * itself will overflow and thus incorrectly match).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There are two cases where this fails: arg2 = 0 (which cannot
+ * overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
+ * will overflow and thus incorrectly match).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int32 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int32
+ * range; if so, no overflow is possible.
*/
if (!(arg1 == (int64) ((int32) arg1) &&
arg2 == (int64) ((int32) arg2)) &&
arg2 != 0 &&
- (result/arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
+ (result / arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 =
+ * INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN, which
+ * can't be represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
/*
- * Special case to avoid palloc overhead for COUNT(): when called
- * from nodeAgg, we know that the argument is modifiable local
- * storage, so just update it in-place.
+ * Special case to avoid palloc overhead for COUNT(): when called from
+ * nodeAgg, we know that the argument is modifiable local storage, so
+ * just update it in-place.
*
* Note: this assumes int8 is a pass-by-ref type; if we ever support
* pass-by-val int8, this should be ifdef'd out when int8 is
int64 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int64 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int64 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg1 gives
- * arg2 again. There is one case where this fails: arg1 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg1 gives arg2
+ * again. There is one case where this fails: arg1 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int32 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int32
+ * range; if so, no overflow is possible.
*/
if (arg1 != (int64) ((int32) arg1) &&
- result/arg1 != arg2)
+ result / arg1 != arg2)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 =
+ * INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN, which
+ * can't be represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
int64 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int64 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
int64 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There is one case where this fails: arg2 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There is one case where this fails: arg2 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int32 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int32
+ * range; if so, no overflow is possible.
*/
if (arg2 != (int64) ((int32) arg2) &&
- result/arg2 != arg1)
+ result / arg2 != arg1)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
arg = rint(arg);
/*
- * Does it fit in an int64? Avoid assuming that we have handy
- * constants defined for the range boundaries, instead test for
- * overflow by reverse-conversion.
+ * Does it fit in an int64? Avoid assuming that we have handy constants
+ * defined for the range boundaries, instead test for overflow by
+ * reverse-conversion.
*/
result = (int64) arg;
darg = rint(arg);
/*
- * Does it fit in an int64? Avoid assuming that we have handy
- * constants defined for the range boundaries, instead test for
- * overflow by reverse-conversion.
+ * Does it fit in an int64? Avoid assuming that we have handy constants
+ * defined for the range boundaries, instead test for overflow by
+ * reverse-conversion.
*/
result = (int64) darg;
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
funcctx = SRF_PERCALL_SETUP();
/*
- * get the saved state and use current as the result for this
- * iteration
+ * get the saved state and use current as the result for this iteration
*/
fctx = funcctx->user_fctx;
result = fctx->current;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.61 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.62 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define LIKE_ABORT (-1)
-static int MatchText(char *t, int tlen, char *p, int plen);
-static int MatchTextIC(char *t, int tlen, char *p, int plen);
-static int MatchBytea(char *t, int tlen, char *p, int plen);
+static int MatchText(char *t, int tlen, char *p, int plen);
+static int MatchTextIC(char *t, int tlen, char *p, int plen);
+static int MatchBytea(char *t, int tlen, char *p, int plen);
static text *do_like_escape(text *, text *);
-static int MBMatchText(char *t, int tlen, char *p, int plen);
-static int MBMatchTextIC(char *t, int tlen, char *p, int plen);
+static int MBMatchText(char *t, int tlen, char *p, int plen);
+static int MBMatchTextIC(char *t, int tlen, char *p, int plen);
static text *MB_do_like_escape(text *, text *);
/*--------------------
int p1_len;
/* Optimization: quickly compare the first byte. */
- if(*p1 != *p2)
+ if (*p1 != *p2)
return (0);
p1_len = pg_mblen(p1);
int l;
/*
- * short cut. if *p1 and *p2 is lower than CHARMAX, then we could
- * assume they are ASCII
+ * short cut. if *p1 and *p2 is lower than CHARMAX, then we could assume
+ * they are ASCII
*/
if ((unsigned char) *p1 < CHARMAX && (unsigned char) *p2 < CHARMAX)
return (tolower((unsigned char) *p1) == tolower((unsigned char) *p2));
/*
- * if one of them is an ASCII while the other is not, then they must
- * be different characters
+ * if one of them is an ASCII while the other is not, then they must be
+ * different characters
*/
else if ((unsigned char) *p1 < CHARMAX || (unsigned char) *p2 < CHARMAX)
return (0);
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
e = VARDATA(esc);
}
/*
- * Otherwise, convert occurrences of the specified escape
- * character to '\', and double occurrences of '\' --- unless they
- * immediately follow an escape character!
+ * Otherwise, convert occurrences of the specified escape character to
+ * '\', and double occurrences of '\' --- unless they immediately
+ * follow an escape character!
*/
afterescape = false;
while (plen > 0)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can match
- * the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match the
+ * rest of the pattern.
*/
while (tlen > 0)
{
}
/*
- * End of text with no match, so no point in trying later
- * places to start matching this pattern.
+ * End of text with no match, so no point in trying later places
+ * to start matching this pattern.
*/
return LIKE_ABORT;
}
else if ((*p != '_') && !BYTEA_CHAREQ(t, p))
{
/*
- * Not the single-character wildcard and no explicit match?
- * Then time to quit...
+ * Not the single-character wildcard and no explicit match? Then
+ * time to quit...
*/
return LIKE_FALSE;
}
return LIKE_TRUE;
/*
- * End of text with no match, so no point in trying later places to
- * start matching this pattern.
+ * End of text with no match, so no point in trying later places to start
+ * matching this pattern.
*/
return LIKE_ABORT;
} /* MatchBytea() */
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.11 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.12 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can match
- * the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match the
+ * rest of the pattern.
*/
while (tlen > 0)
{
}
/*
- * End of text with no match, so no point in trying later
- * places to start matching this pattern.
+ * End of text with no match, so no point in trying later places
+ * to start matching this pattern.
*/
return LIKE_ABORT;
}
else if ((*p != '_') && !CHAREQ(t, p))
{
/*
- * Not the single-character wildcard and no explicit match?
- * Then time to quit...
+ * Not the single-character wildcard and no explicit match? Then
+ * time to quit...
*/
return LIKE_FALSE;
}
return LIKE_TRUE;
/*
- * End of text with no match, so no point in trying later places to
- * start matching this pattern.
+ * End of text with no match, so no point in trying later places to start
+ * matching this pattern.
*/
return LIKE_ABORT;
} /* MatchText() */
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can match
- * the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match the
+ * rest of the pattern.
*/
while (tlen > 0)
{
}
/*
- * End of text with no match, so no point in trying later
- * places to start matching this pattern.
+ * End of text with no match, so no point in trying later places
+ * to start matching this pattern.
*/
return LIKE_ABORT;
}
else if ((*p != '_') && !ICHAREQ(t, p))
{
/*
- * Not the single-character wildcard and no explicit match?
- * Then time to quit...
+ * Not the single-character wildcard and no explicit match? Then
+ * time to quit...
*/
return LIKE_FALSE;
}
return LIKE_TRUE;
/*
- * End of text with no match, so no point in trying later places to
- * start matching this pattern.
+ * End of text with no match, so no point in trying later places to start
+ * matching this pattern.
*/
return LIKE_ABORT;
} /* MatchTextIC() */
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
e = VARDATA(esc);
}
/*
- * Otherwise, convert occurrences of the specified escape
- * character to '\', and double occurrences of '\' --- unless they
- * immediately follow an escape character!
+ * Otherwise, convert occurrences of the specified escape character to
+ * '\', and double occurrences of '\' --- unless they immediately
+ * follow an escape character!
*/
afterescape = false;
while (plen > 0)
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.19 2005/06/18 19:33:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.20 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* This must match enum LockTagType! */
-static const char * const LockTagTypeNames[] = {
+static const char *const LockTagTypeNames[] = {
"relation",
"extend",
"page",
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
/*
- * Collect all the locking information that we will format and
- * send out as a result set.
+ * Collect all the locking information that we will format and send
+ * out as a result set.
*/
mystatus = (PG_Lock_Status *) palloc(sizeof(PG_Lock_Status));
funcctx->user_fctx = (void *) mystatus;
proc = &(lockData->procs[mystatus->currIdx]);
/*
- * Look to see if there are any held lock modes in this PROCLOCK.
- * If so, report, and destructively modify lockData so we don't
- * report again.
+ * Look to see if there are any held lock modes in this PROCLOCK. If
+ * so, report, and destructively modify lockData so we don't report
+ * again.
*/
granted = false;
if (proclock->holdMask)
mode = proc->waitLockMode;
/*
- * We are now done with this PROCLOCK, so advance pointer
- * to continue with next one on next call.
+ * We are now done with this PROCLOCK, so advance pointer to
+ * continue with next one on next call.
*/
mystatus->currIdx++;
}
else
{
/*
- * Okay, we've displayed all the locks associated with
- * this PROCLOCK, proceed to the next one.
+ * Okay, we've displayed all the locks associated with this
+ * PROCLOCK, proceed to the next one.
*/
mystatus->currIdx++;
continue;
locktypename = tnbuf;
}
values[0] = DirectFunctionCall1(textin,
- CStringGetDatum(locktypename));
+ CStringGetDatum(locktypename));
switch (lock->tag.locktag_type)
else
nulls[10] = 'n';
values[11] = DirectFunctionCall1(textin,
- CStringGetDatum(GetLockmodeName(mode)));
+ CStringGetDatum(GetLockmodeName(mode)));
values[12] = BoolGetDatum(granted);
tuple = heap_formtuple(funcctx->tuple_desc, values, nulls);
/*
* PostgreSQL type definitions for MAC addresses.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/mac.c,v 1.34 2004/08/29 05:06:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/mac.c,v 1.35 2005/10/15 02:49:28 momjian Exp $
*/
#include "postgres.h"
if (count != 6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type macaddr: \"%s\"", str)));
+ errmsg("invalid input syntax for type macaddr: \"%s\"", str)));
if ((a < 0) || (a > 255) || (b < 0) || (b > 255) ||
(c < 0) || (c > 255) || (d < 0) || (d > 255) ||
(e < 0) || (e > 255) || (f < 0) || (f > 255))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str)));
+ errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str)));
result = (macaddr *) palloc(sizeof(macaddr));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.48 2005/09/16 05:35:40 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.49 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to signal other server processes"))));
+ (errmsg("must be superuser to signal other server processes"))));
if (!IsBackendPid(pid))
{
/*
- * This is just a warning so a loop-through-resultset will not
- * abort if one backend terminated on it's own during the run
+ * This is just a warning so a loop-through-resultset will not abort
+ * if one backend terminated on it's own during the run
*/
ereport(WARNING,
- (errmsg("PID %d is not a PostgreSQL server process", pid)));
+ (errmsg("PID %d is not a PostgreSQL server process", pid)));
return false;
}
Datum
pg_reload_conf(PG_FUNCTION_ARGS)
{
- if (!superuser())
+ if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to signal the postmaster"))));
Datum
pg_rotate_logfile(PG_FUNCTION_ARGS)
{
- if (!superuser())
+ if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to rotate log files"))));
if (!Redirect_stderr)
{
ereport(WARNING,
- (errmsg("rotation not possible because log redirection not active")));
+ (errmsg("rotation not possible because log redirection not active")));
PG_RETURN_BOOL(false);
}
fctx = palloc(sizeof(ts_db_fctx));
/*
- * size = tablespace dirname length + dir sep
- * char + oid + terminator
+ * size = tablespace dirname length + dir sep char + oid + terminator
*/
fctx->location = (char *) palloc(10 + 10 + 1);
if (tablespaceOid == GLOBALTABLESPACE_OID)
errmsg("could not open directory \"%s\": %m",
fctx->location)));
ereport(WARNING,
- (errmsg("%u is not a tablespace OID", tablespaceOid)));
+ (errmsg("%u is not a tablespace OID", tablespaceOid)));
}
}
funcctx->user_fctx = fctx;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.144 2005/10/14 11:47:57 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.145 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Function prototypes -- internal to this file only
*/
-static AbsoluteTime tm2abstime(struct pg_tm *tm, int tz);
-static void reltime2tm(RelativeTime time, struct pg_tm *tm);
+static AbsoluteTime tm2abstime(struct pg_tm * tm, int tz);
+static void reltime2tm(RelativeTime time, struct pg_tm * tm);
static void parsetinterval(char *i_string,
- AbsoluteTime *i_start,
- AbsoluteTime *i_end);
+ AbsoluteTime *i_start,
+ AbsoluteTime *i_end);
/*
void
-abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm *tm, char **tzn)
+abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm * tm, char **tzn)
{
pg_time_t time = (pg_time_t) _time;
struct pg_tm *tx;
/*
- * If HasCTZSet is true then we have a brute force time zone
- * specified. Go ahead and rotate to the local time zone since we will
- * later bypass any calls which adjust the tm fields.
+ * If HasCTZSet is true then we have a brute force time zone specified. Go
+ * ahead and rotate to the local time zone since we will later bypass any
+ * calls which adjust the tm fields.
*/
if (HasCTZSet && (tzp != NULL))
time -= CTimeZone;
if (!HasCTZSet && tzp != NULL)
- tx = pg_localtime(&time,global_timezone);
+ tx = pg_localtime(&time, global_timezone);
else
tx = pg_gmtime(&time);
{
/*
* Copy no more than MAXTZLEN bytes of timezone to tzn, in
- * case it contains an error message, which doesn't fit in
- * the buffer
+ * case it contains an error message, which doesn't fit in the
+ * buffer
*/
StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
if (strlen(tm->tm_zone) > MAXTZLEN)
* Note that tm has full year (not 1900-based) and 1-based month.
*/
static AbsoluteTime
-tm2abstime(struct pg_tm *tm, int tz)
+tm2abstime(struct pg_tm * tm, int tz)
{
int day;
AbsoluteTime sec;
tm->tm_mon < 1 || tm->tm_mon > 12 ||
tm->tm_mday < 1 || tm->tm_mday > 31 ||
tm->tm_hour < 0 ||
- tm->tm_hour > 24 || /* test for > 24:00:00 */
+ tm->tm_hour > 24 || /* test for > 24:00:00 */
(tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)) ||
tm->tm_min < 0 || tm->tm_min > 59 ||
tm->tm_sec < 0 || tm->tm_sec > 60)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
- if ((day >= MAX_DAYNUM-10 && sec < 0) ||
- (day <= MIN_DAYNUM+10 && sec > 0))
+ if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
+ (day <= MIN_DAYNUM + 10 && sec > 0))
return INVALID_ABSTIME;
/* check for reserved values (e.g. "current" on edge of usual range */
case DTK_EPOCH:
/*
- * Don't bother retaining this as a reserved value, but
- * instead just set to the actual epoch time (1970-01-01)
+ * Don't bother retaining this as a reserved value, but instead
+ * just set to the actual epoch time (1970-01-01)
*/
result = 0;
break;
abstime_cmp_internal(AbsoluteTime a, AbsoluteTime b)
{
/*
- * We consider all INVALIDs to be equal and larger than any
- * non-INVALID. This is somewhat arbitrary; the important thing is to
- * have a consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any non-INVALID.
+ * This is somewhat arbitrary; the important thing is to have a consistent
+ * sort order.
*/
if (a == INVALID_ABSTIME)
{
Datum
timestamp_abstime(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
AbsoluteTime result;
fsec_t fsec;
int tz;
case INVALID_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert abstime \"invalid\" to timestamp")));
+ errmsg("cannot convert abstime \"invalid\" to timestamp")));
TIMESTAMP_NOBEGIN(result);
break;
case INVALID_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert abstime \"invalid\" to timestamp")));
+ errmsg("cannot convert abstime \"invalid\" to timestamp")));
TIMESTAMP_NOBEGIN(result);
break;
static void
-reltime2tm(RelativeTime time, struct pg_tm *tm)
+reltime2tm(RelativeTime time, struct pg_tm * tm)
{
double dtime = time;
else
{
p = DatumGetCString(DirectFunctionCall1(abstimeout,
- AbsoluteTimeGetDatum(tinterval->data[0])));
+ AbsoluteTimeGetDatum(tinterval->data[0])));
strcat(i_str, p);
pfree(p);
strcat(i_str, "\" \"");
p = DatumGetCString(DirectFunctionCall1(abstimeout,
- AbsoluteTimeGetDatum(tinterval->data[1])));
+ AbsoluteTimeGetDatum(tinterval->data[1])));
strcat(i_str, p);
pfree(p);
}
tinterval = (TimeInterval) palloc(sizeof(TimeIntervalData));
- tinterval ->status = pq_getmsgint(buf, sizeof(tinterval->status));
+ tinterval->status = pq_getmsgint(buf, sizeof(tinterval->status));
if (!(tinterval->status == T_INTERVAL_INVAL ||
tinterval->status == T_INTERVAL_VALID))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid status in external \"tinterval\" value")));
+ errmsg("invalid status in external \"tinterval\" value")));
- tinterval ->data[0] = pq_getmsgint(buf, sizeof(tinterval->data[0]));
- tinterval ->data[1] = pq_getmsgint(buf, sizeof(tinterval->data[1]));
+ tinterval->data[0] = pq_getmsgint(buf, sizeof(tinterval->data[0]));
+ tinterval->data[1] = pq_getmsgint(buf, sizeof(tinterval->data[1]));
PG_RETURN_TIMEINTERVAL(tinterval);
}
#ifdef HAVE_INT64_TIMESTAMP
span = ((INT64CONST(365250000) * year + INT64CONST(30000000) * month +
- INT64CONST(1000000) * day) * INT64CONST(86400)) +
- interval->time;
+ INT64CONST(1000000) * day) * INT64CONST(86400)) +
+ interval->time;
span /= USECS_PER_SEC;
#else
- span = (DAYS_PER_YEAR * year + (double)DAYS_PER_MONTH * month + day) * SECS_PER_DAY + interval->time;
+ span = (DAYS_PER_YEAR * year + (double) DAYS_PER_MONTH * month + day) * SECS_PER_DAY + interval->time;
#endif
if (span < INT_MIN || span > INT_MAX)
case INVALID_RELTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert reltime \"invalid\" to interval")));
+ errmsg("cannot convert reltime \"invalid\" to interval")));
result->time = 0;
result->day = 0;
result->month = 0;
if (AbsoluteTimeIsReal(t1) &&
RelativeTimeIsValid(t2) &&
((t2 > 0 && t1 < NOEND_ABSTIME - t2) ||
- (t2 <= 0 && t1 > NOSTART_ABSTIME - t2))) /* prevent overflow */
+ (t2 <= 0 && t1 > NOSTART_ABSTIME - t2))) /* prevent overflow */
PG_RETURN_ABSOLUTETIME(t1 + t2);
PG_RETURN_ABSOLUTETIME(INVALID_ABSTIME);
if (AbsoluteTimeIsReal(t1) &&
RelativeTimeIsValid(t2) &&
((t2 > 0 && t1 > NOSTART_ABSTIME + t2) ||
- (t2 <= 0 && t1 < NOEND_ABSTIME + t2))) /* prevent overflow */
+ (t2 <= 0 && t1 < NOEND_ABSTIME + t2))) /* prevent overflow */
PG_RETURN_ABSOLUTETIME(t1 - t2);
PG_RETURN_ABSOLUTETIME(INVALID_ABSTIME);
{
if (DatumGetBool(DirectFunctionCall2(abstimege,
AbsoluteTimeGetDatum(t),
- AbsoluteTimeGetDatum(tinterval->data[0]))) &&
+ AbsoluteTimeGetDatum(tinterval->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimele,
AbsoluteTimeGetDatum(t),
- AbsoluteTimeGetDatum(tinterval->data[1]))))
+ AbsoluteTimeGetDatum(tinterval->data[1]))))
PG_RETURN_BOOL(true);
}
PG_RETURN_BOOL(false);
reltime_cmp_internal(RelativeTime a, RelativeTime b)
{
/*
- * We consider all INVALIDs to be equal and larger than any
- * non-INVALID. This is somewhat arbitrary; the important thing is to
- * have a consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any non-INVALID.
+ * This is somewhat arbitrary; the important thing is to have a consistent
+ * sort order.
*/
if (a == INVALID_RELTIME)
{
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimeeq,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[0]))) &&
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimeeq,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(true);
PG_RETURN_BOOL(false);
}
AbsoluteTime b_len;
/*
- * We consider all INVALIDs to be equal and larger than any
- * non-INVALID. This is somewhat arbitrary; the important thing is to
- * have a consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any non-INVALID.
+ * This is somewhat arbitrary; the important thing is to have a consistent
+ * sort order.
*/
a_invalid = a->status == T_INTERVAL_INVAL ||
- a->data[0] == INVALID_ABSTIME ||
- a->data[1] == INVALID_ABSTIME;
+ a->data[0] == INVALID_ABSTIME ||
+ a->data[1] == INVALID_ABSTIME;
b_invalid = b->status == T_INTERVAL_INVAL ||
- b->data[0] == INVALID_ABSTIME ||
- b->data[1] == INVALID_ABSTIME;
+ b->data[0] == INVALID_ABSTIME ||
+ b->data[1] == INVALID_ABSTIME;
if (a_invalid)
{
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt == t);
}
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt != t);
}
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt < t);
}
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt > t);
}
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt <= t);
}
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt >= t);
}
if (i1->status == T_INTERVAL_INVAL || i2->status == T_INTERVAL_INVAL)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimele,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[0]))) &&
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimege,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(true);
PG_RETURN_BOOL(false);
}
if (i1->status == T_INTERVAL_INVAL || i2->status == T_INTERVAL_INVAL)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimelt,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[0]))) ||
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[0]))) ||
DatumGetBool(DirectFunctionCall2(abstimegt,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(false);
PG_RETURN_BOOL(true);
}
goto bogus; /* syntax error */
p++;
if (strncmp(INVALID_INTERVAL_STR, p, strlen(INVALID_INTERVAL_STR)) == 0)
- goto bogus; /* undefined range, handled like a syntax
- * err. */
+ goto bogus; /* undefined range, handled like a syntax err. */
/* search for the end of the first date and change it to a \0 */
p1 = p;
while ((c = *p1) != '\0')
*p1 = '\0';
/* get the first date */
*i_start = DatumGetAbsoluteTime(DirectFunctionCall1(abstimein,
- CStringGetDatum(p)));
+ CStringGetDatum(p)));
/* undo change to \0 */
*p1 = c;
p = ++p1;
*p1 = '\0';
/* get the second date */
*i_end = DatumGetAbsoluteTime(DirectFunctionCall1(abstimein,
- CStringGetDatum(p)));
+ CStringGetDatum(p)));
/* undo change to \0 */
*p1 = c;
p = ++p1;
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type tinterval: \"%s\"",
i_string)));
- *i_start = *i_end = INVALID_ABSTIME; /* keep compiler quiet */
+ *i_start = *i_end = INVALID_ABSTIME; /* keep compiler quiet */
}
gettimeofday(&tp, &tpz);
tt = (pg_time_t) tp.tv_sec;
pg_strftime(templ, sizeof(templ), "%a %b %d %H:%M:%S.%%06d %Y %Z",
- pg_localtime(&tt,global_timezone));
+ pg_localtime(&tt, global_timezone));
snprintf(buf, sizeof(buf), templ, tp.tv_usec);
len = VARHDRSZ + strlen(buf);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/name.c,v 1.55 2004/12/31 22:01:22 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/name.c,v 1.56 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int
namecat(Name n1, Name n2)
{
- return namestrcat(n1, NameStr(*n2)); /* n2 can't be any longer
- * than n1 */
+ return namestrcat(n1, NameStr(*n2)); /* n2 can't be any longer than
+ * n1 */
}
#endif
/*
* PostgreSQL type definitions for the INET and CIDR types.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.54 2004/10/08 01:10:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.55 2005/10/15 02:49:29 momjian Exp $
*
* Jon Postel RIP 16 Oct 1998
*/
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6
- * addresses will have a : somewhere in them (several, in fact) so if
- * there is one present, assume it's V6, otherwise assume it's V4.
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * will have a : somewhere in them (several, in fact) so if there is one
+ * present, assume it's V6, otherwise assume it's V4.
*/
if (strchr(src, ':') != NULL)
type ? "cidr" : "inet", src)));
/*
- * Error check: CIDR values must not have any bits set beyond the
- * masklen.
+ * Error check: CIDR values must not have any bits set beyond the masklen.
*/
if (type)
{
ip_family(addr) != PGSQL_AF_INET6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid address family in external \"inet\" value")));
+ errmsg("invalid address family in external \"inet\" value")));
bits = pq_getmsgbyte(buf);
if (bits < 0 || bits > ip_maxbits(addr))
ereport(ERROR,
addrptr[i] = pq_getmsgbyte(buf);
/*
- * Error check: CIDR values must not have any bits set beyond the
- * masklen.
+ * Error check: CIDR values must not have any bits set beyond the masklen.
*/
if (ip_type(addr))
{
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) > ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
}
PG_RETURN_BOOL(false);
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) >= ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
}
PG_RETURN_BOOL(false);
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) < ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
}
PG_RETURN_BOOL(false);
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) <= ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
}
PG_RETURN_BOOL(false);
}
/*
- * Can't get here unless someone tries to use scalarltsel/scalargtsel
- * on an operator with one network and one non-network operand.
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel on
+ * an operator with one network and one non-network operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return 0;
* Copyright (c) 1998-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.85 2005/07/10 21:13:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.86 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
int ndigits; /* # of digits in digits[] - can be 0! */
int weight; /* weight of first digit */
- int sign; /* NUMERIC_POS, NUMERIC_NEG, or
- * NUMERIC_NAN */
+ int sign; /* NUMERIC_POS, NUMERIC_NEG, or NUMERIC_NAN */
int dscale; /* display scale */
NumericDigit *buf; /* start of palloc'd space for digits[] */
NumericDigit *digits; /* base-NBASE digits */
#if DEC_DIGITS == 4
static NumericDigit const_zero_point_five_data[1] = {5000};
-
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_five_data[1] = {50};
-
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_five_data[1] = {5};
#endif
#if DEC_DIGITS == 4
static NumericDigit const_zero_point_nine_data[1] = {9000};
-
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_nine_data[1] = {90};
-
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_nine_data[1] = {9};
#endif
static NumericDigit const_zero_point_01_data[1] = {100};
static NumericVar const_zero_point_01 =
{1, -1, NUMERIC_POS, 2, NULL, const_zero_point_01_data};
-
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_01_data[1] = {1};
static NumericVar const_zero_point_01 =
{1, -1, NUMERIC_POS, 2, NULL, const_zero_point_01_data};
-
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_01_data[1] = {1};
static NumericVar const_zero_point_01 =
#if DEC_DIGITS == 4
static NumericDigit const_one_point_one_data[2] = {1, 1000};
-
#elif DEC_DIGITS == 2
static NumericDigit const_one_point_one_data[2] = {1, 10};
-
#elif DEC_DIGITS == 1
static NumericDigit const_one_point_one_data[2] = {1, 1};
#endif
#ifdef NUMERIC_DEBUG
static void dump_numeric(const char *str, Numeric num);
static void dump_var(const char *str, NumericVar *var);
-
#else
#define dump_numeric(s,n)
#define dump_var(s,v)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Use set_var_from_str() to parse the input string and return it in
- * the packed DB storage format
+ * Use set_var_from_str() to parse the input string and return it in the
+ * packed DB storage format
*/
init_var(&value);
set_var_from_str(str, &value);
/*
* Get the number in the variable format.
*
- * Even if we didn't need to change format, we'd still need to copy the
- * value to have a modifiable copy for rounding. set_var_from_num()
- * also guarantees there is extra digit space in case we produce a
- * carry out from rounding.
+ * Even if we didn't need to change format, we'd still need to copy the value
+ * to have a modifiable copy for rounding. set_var_from_num() also
+ * guarantees there is extra digit space in case we produce a carry out
+ * from rounding.
*/
init_var(&x);
set_var_from_num(num, &x);
numeric_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
if (d < 0 || d >= NBASE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid digit in external \"numeric\" value")));
+ errmsg("invalid digit in external \"numeric\" value")));
value.digits[i] = d;
}
* scale of the attribute have to be applied on the value.
*/
Datum
-numeric (PG_FUNCTION_ARGS)
+numeric(PG_FUNCTION_ARGS)
{
Numeric num = PG_GETARG_NUMERIC(0);
int32 typmod = PG_GETARG_INT32(1);
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * If the value isn't a valid type modifier, simply return a copy of
- * the input value
+ * If the value isn't a valid type modifier, simply return a copy of the
+ * input value
*/
if (typmod < (int32) (VARHDRSZ))
{
/*
* If the number is certainly in bounds and due to the target scale no
- * rounding could be necessary, just make a copy of the input and
- * modify its scale fields. (Note we assume the existing dscale is
- * honest...)
+ * rounding could be necessary, just make a copy of the input and modify
+ * its scale fields. (Note we assume the existing dscale is honest...)
*/
ddigits = (num->n_weight + 1) * DEC_DIGITS;
if (ddigits <= maxdigits && scale >= NUMERIC_DSCALE(num))
memcpy(res, num, num->varlen);
/*
- * The packed format is known to be totally zero digit trimmed always.
- * So we can identify a ZERO by the fact that there are no digits at
- * all. Do nothing to a zero.
+ * The packed format is known to be totally zero digit trimmed always. So
+ * we can identify a ZERO by the fact that there are no digits at all. Do
+ * nothing to a zero.
*/
if (num->varlen != NUMERIC_HDRSZ)
{
init_var(&result);
/*
- * The packed format is known to be totally zero digit trimmed always.
- * So we can identify a ZERO by the fact that there are no digits at
- * all.
+ * The packed format is known to be totally zero digit trimmed always. So
+ * we can identify a ZERO by the fact that there are no digits at all.
*/
if (num->varlen == NUMERIC_HDRSZ)
set_var_from_var(&const_zero, &result);
else
{
/*
- * And if there are some, we return a copy of ONE with the sign of
- * our argument
+ * And if there are some, we return a copy of ONE with the sign of our
+ * argument
*/
set_var_from_var(&const_one, &result);
result.sign = NUMERIC_SIGN(num);
if (count <= 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("count must be greater than zero")));
+ (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
+ errmsg("count must be greater than zero")));
init_var(&result_var);
init_var(&count_var);
{
case 0:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("lower bound cannot equal upper bound")));
+ (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
+ errmsg("lower bound cannot equal upper bound")));
/* bound1 < bound2 */
case -1:
int result;
/*
- * We consider all NANs to be equal and larger than any non-NAN. This
- * is somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * We consider all NANs to be equal and larger than any non-NAN. This is
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (NUMERIC_IS_NAN(num1))
{
/*
* Unpack the values, let mul_var() compute the result and return it.
- * Unlike add_var() and sub_var(), mul_var() will round its result. In
- * the case of numeric_mul(), which is invoked for the * operator on
- * numerics, we request exact representation for the product (rscale =
- * sum(dscale of arg1, dscale of arg2)).
+ * Unlike add_var() and sub_var(), mul_var() will round its result. In the
+ * case of numeric_mul(), which is invoked for the * operator on numerics,
+ * we request exact representation for the product (rscale = sum(dscale of
+ * arg1, dscale of arg2)).
*/
init_var(&arg1);
init_var(&arg2);
Numeric num2 = PG_GETARG_NUMERIC(1);
/*
- * Use cmp_numerics so that this will agree with the comparison
- * operators, particularly as regards comparisons involving NaN.
+ * Use cmp_numerics so that this will agree with the comparison operators,
+ * particularly as regards comparisons involving NaN.
*/
if (cmp_numerics(num1, num2) < 0)
PG_RETURN_NUMERIC(num1);
Numeric num2 = PG_GETARG_NUMERIC(1);
/*
- * Use cmp_numerics so that this will agree with the comparison
- * operators, particularly as regards comparisons involving NaN.
+ * Use cmp_numerics so that this will agree with the comparison operators,
+ * particularly as regards comparisons involving NaN.
*/
if (cmp_numerics(num1, num2) > 0)
PG_RETURN_NUMERIC(num1);
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a
- * scale to give at least NUMERIC_MIN_SIG_DIGITS significant digits;
- * but in any case not less than the input's dscale.
+ * Unpack the argument and determine the result scale. We choose a scale
+ * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
+ * case not less than the input's dscale.
*/
init_var(&arg);
init_var(&result);
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a
- * scale to give at least NUMERIC_MIN_SIG_DIGITS significant digits;
- * but in any case not less than the input's dscale.
+ * Unpack the argument and determine the result scale. We choose a scale
+ * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
+ * case not less than the input's dscale.
*/
init_var(&arg);
init_var(&result);
val = numericvar_to_double_no_overflow(&arg);
/*
- * log10(result) = num * log10(e), so this is approximately the
- * decimal weight of the result:
+ * log10(result) = num * log10(e), so this is approximately the decimal
+ * weight of the result:
*/
val *= 0.434294481903252;
set_var_from_num(num2, &arg2);
/*
- * Call log_var() to compute and return the result; note it handles
- * scale selection itself.
+ * Call log_var() to compute and return the result; note it handles scale
+ * selection itself.
*/
log_var(&arg1, &arg2, &result);
trunc_var(&arg2_trunc, 0);
/*
- * Return special SQLSTATE error codes for a few conditions mandated
- * by the standard.
+ * Return special SQLSTATE error codes for a few conditions mandated by
+ * the standard.
*/
if ((cmp_var(&arg1, &const_zero) == 0 &&
cmp_var(&arg2, &const_zero) < 0) ||
NumericGetDatum(newval));
sumX2 = DirectFunctionCall2(numeric_add, sumX2,
DirectFunctionCall2(numeric_mul,
- NumericGetDatum(newval),
- NumericGetDatum(newval)));
+ NumericGetDatum(newval),
+ NumericGetDatum(newval)));
transdatums[0] = N;
transdatums[1] = sumX;
{
mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
- div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
+ div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
res = make_result(&vsumX);
}
{
mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
- div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
+ div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
sqrt_var(&vsumX, &vsumX, rscale); /* stddev */
res = make_result(&vsumX);
/*
* If we're invoked by nodeAgg, we can cheat and modify out first
- * parameter in-place to avoid palloc overhead. If not, we need to
- * return the new value of the transition variable.
+ * parameter in-place to avoid palloc overhead. If not, we need to return
+ * the new value of the transition variable.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
- int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
+ int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
/* Leave the running sum unchanged in the new input is null */
if (!PG_ARGISNULL(1))
/*
* If we're invoked by nodeAgg, we can cheat and modify out first
- * parameter in-place to avoid palloc overhead. If not, we need to
- * return the new value of the transition variable.
+ * parameter in-place to avoid palloc overhead. If not, we need to return
+ * the new value of the transition variable.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
- int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
+ int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
/* Leave the running sum unchanged in the new input is null */
if (!PG_ARGISNULL(1))
}
/*
- * Note that we cannot special-case the nodeAgg case here, as we
- * do for int2_sum and int4_sum: numeric is of variable size, so
- * we cannot modify our first parameter in-place.
+ * Note that we cannot special-case the nodeAgg case here, as we do for
+ * int2_sum and int4_sum: numeric is of variable size, so we cannot modify
+ * our first parameter in-place.
*/
oldsum = PG_GETARG_NUMERIC(0);
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we need
- * to make a copy of it before scribbling on it.
+ * parameter in-place to reduce palloc overhead. Otherwise we need to make
+ * a copy of it before scribbling on it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
transarray = PG_GETARG_ARRAYTYPE_P(0);
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we need
- * to make a copy of it before scribbling on it.
+ * parameter in-place to reduce palloc overhead. Otherwise we need to make
+ * a copy of it before scribbling on it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
transarray = PG_GETARG_ARRAYTYPE_P(0);
NumericDigit *digits;
/*
- * We first parse the string to extract decimal digits and determine
- * the correct decimal weight. Then convert to NBASE representation.
+ * We first parse the string to extract decimal digits and determine the
+ * correct decimal weight. Then convert to NBASE representation.
*/
/* skip leading spaces */
if (!isdigit((unsigned char) *cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"", str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"", str)));
decdigits = (unsigned char *) palloc(strlen(cp) + DEC_DIGITS * 2);
if (have_dp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
have_dp = TRUE;
cp++;
}
if (endptr == cp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
cp = endptr;
if (exponent > NUMERIC_MAX_PRECISION ||
exponent < -NUMERIC_MAX_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
dweight += (int) exponent;
dscale -= (int) exponent;
if (dscale < 0)
if (!isspace((unsigned char) *cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
cp++;
}
/*
- * Okay, convert pure-decimal representation to base NBASE. First we
- * need to determine the converted weight and ndigits. offset is the
- * number of decimal zeroes to insert before the first given digit to
- * have a correctly aligned first NBASE digit.
+ * Okay, convert pure-decimal representation to base NBASE. First we need
+ * to determine the converted weight and ndigits. offset is the number of
+ * decimal zeroes to insert before the first given digit to have a
+ * correctly aligned first NBASE digit.
*/
if (dweight >= 0)
weight = (dweight + 1 + DEC_DIGITS - 1) / DEC_DIGITS - 1;
/*
* Allocate space for the result.
*
- * i is set to to # of decimal digits before decimal point. dscale is the
- * # of decimal digits we will print after decimal point. We may
- * generate as many as DEC_DIGITS-1 excess digits at the end, and in
- * addition we need room for sign, decimal point, null terminator.
+ * i is set to to # of decimal digits before decimal point. dscale is the #
+ * of decimal digits we will print after decimal point. We may generate as
+ * many as DEC_DIGITS-1 excess digits at the end, and in addition we need
+ * room for sign, decimal point, null terminator.
*/
i = (var->weight + 1) * DEC_DIGITS;
if (i <= 0)
}
/*
- * If requested, output a decimal point and all the digits that follow
- * it. We initially put out a multiple of DEC_DIGITS digits, then
- * truncate if needed.
+ * If requested, output a decimal point and all the digits that follow it.
+ * We initially put out a multiple of DEC_DIGITS digits, then truncate if
+ * needed.
*/
if (dscale > 0)
{
/*
* Check for overflow - note we can't do this before rounding, because
- * rounding could raise the weight. Also note that the var's weight
- * could be inflated by leading zeroes, which will be stripped before
- * storage but perhaps might not have been yet. In any case, we must
- * recognize a true zero, whose weight doesn't mean anything.
+ * rounding could raise the weight. Also note that the var's weight could
+ * be inflated by leading zeroes, which will be stripped before storage
+ * but perhaps might not have been yet. In any case, we must recognize a
+ * true zero, whose weight doesn't mean anything.
*/
ddigits = (var->weight + 1) * DEC_DIGITS;
if (ddigits > maxdigits)
}
/*
- * For input like 10000000000, we must treat stripped digits as real.
- * So the loop assumes there are weight+1 digits before the decimal
- * point.
+ * For input like 10000000000, we must treat stripped digits as real. So
+ * the loop assumes there are weight+1 digits before the decimal point.
*/
weight = var->weight;
Assert(weight >= 0 && ndigits <= weight + 1);
/*
* The overflow check is a bit tricky because we want to accept
- * INT64_MIN, which will overflow the positive accumulator. We
- * can detect this case easily though because INT64_MIN is the
- * only nonzero value for which -val == val (on a two's complement
- * machine, anyway).
+ * INT64_MIN, which will overflow the positive accumulator. We can
+ * detect this case easily though because INT64_MIN is the only
+ * nonzero value for which -val == val (on a two's complement machine,
+ * anyway).
*/
if ((val / NBASE) != oldval) /* possible overflow? */
{
/* shouldn't happen ... */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- tmp)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ tmp)));
}
pfree(tmp);
/* shouldn't happen ... */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- tmp)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ tmp)));
}
pfree(tmp);
else
{
/*
- * var1 is positive, var2 is negative Must compare absolute
- * values
+ * var1 is positive, var2 is negative Must compare absolute values
*/
switch (cmp_abs(var1, var2))
{
/*
* Determine number of result digits to compute. If the exact result
- * would have more than rscale fractional digits, truncate the
- * computation with MUL_GUARD_DIGITS guard digits. We do that by
- * pretending that one or both inputs have fewer digits than they
- * really do.
+ * would have more than rscale fractional digits, truncate the computation
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
maxdigits = res_weight + 1 + (rscale * DEC_DIGITS) + MUL_GUARD_DIGITS;
/*
* We do the arithmetic in an array "dig[]" of signed int's. Since
- * INT_MAX is noticeably larger than NBASE*NBASE, this gives us
- * headroom to avoid normalizing carries immediately.
+ * INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
+ * to avoid normalizing carries immediately.
*
* maxdig tracks the maximum possible value of any dig[] entry; when this
- * threatens to exceed INT_MAX, we take the time to propagate carries.
- * To avoid overflow in maxdig itself, it actually represents the max
+ * threatens to exceed INT_MAX, we take the time to propagate carries. To
+ * avoid overflow in maxdig itself, it actually represents the max
* possible value divided by NBASE-1.
*/
dig = (int *) palloc0(res_ndigits * sizeof(int));
}
/*
- * Now we do a final carry propagation pass to normalize the result,
- * which we combine with storing the result digits into the output.
- * Note that this is still done at full precision w/guard digits.
+ * Now we do a final carry propagation pass to normalize the result, which
+ * we combine with storing the result digits into the output. Note that
+ * this is still done at full precision w/guard digits.
*/
alloc_var(result, res_ndigits);
res_digits = result->digits;
/*
* We do the arithmetic in an array "div[]" of signed int's. Since
- * INT_MAX is noticeably larger than NBASE*NBASE, this gives us
- * headroom to avoid normalizing carries immediately.
+ * INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
+ * to avoid normalizing carries immediately.
*
- * We start with div[] containing one zero digit followed by the
- * dividend's digits (plus appended zeroes to reach the desired
- * precision including guard digits). Each step of the main loop
- * computes an (approximate) quotient digit and stores it into div[],
- * removing one position of dividend space. A final pass of carry
- * propagation takes care of any mistaken quotient digits.
+ * We start with div[] containing one zero digit followed by the dividend's
+ * digits (plus appended zeroes to reach the desired precision including
+ * guard digits). Each step of the main loop computes an (approximate)
+ * quotient digit and stores it into div[], removing one position of
+ * dividend space. A final pass of carry propagation takes care of any
+ * mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
for (i = 0; i < var1ndigits; i++)
div[i + 1] = var1digits[i];
/*
- * We estimate each quotient digit using floating-point arithmetic,
- * taking the first four digits of the (current) dividend and divisor.
- * This must be float to avoid overflow.
+ * We estimate each quotient digit using floating-point arithmetic, taking
+ * the first four digits of the (current) dividend and divisor. This must
+ * be float to avoid overflow.
*/
fdivisor = (double) var2digits[0];
for (i = 1; i < 4; i++)
fdivisorinverse = 1.0 / fdivisor;
/*
- * maxdiv tracks the maximum possible absolute value of any div[]
- * entry; when this threatens to exceed INT_MAX, we take the time to
- * propagate carries. To avoid overflow in maxdiv itself, it actually
- * represents the max possible abs. value divided by NBASE-1.
+ * maxdiv tracks the maximum possible absolute value of any div[] entry;
+ * when this threatens to exceed INT_MAX, we take the time to propagate
+ * carries. To avoid overflow in maxdiv itself, it actually represents
+ * the max possible abs. value divided by NBASE-1.
*/
maxdiv = 1;
div[qi] = newdig;
/*
- * All the div[] digits except possibly div[qi] are now in
- * the range 0..NBASE-1.
+ * All the div[] digits except possibly div[qi] are now in the
+ * range 0..NBASE-1.
*/
maxdiv = Abs(newdig) / (NBASE - 1);
maxdiv = Max(maxdiv, 1);
/* Compute the (approximate) quotient digit */
fquotient = fdividend * fdivisorinverse;
qdigit = (fquotient >= 0.0) ? ((int) fquotient) :
- (((int) fquotient) - 1); /* truncate towards
- * -infinity */
+ (((int) fquotient) - 1); /* truncate towards -infinity */
maxdiv += Abs(qdigit);
}
}
/*
- * The dividend digit we are about to replace might still be
- * nonzero. Fold it into the next digit position. We don't need
- * to worry about overflow here since this should nearly cancel
- * with the subtraction of the divisor.
+ * The dividend digit we are about to replace might still be nonzero.
+ * Fold it into the next digit position. We don't need to worry about
+ * overflow here since this should nearly cancel with the subtraction
+ * of the divisor.
*/
div[qi + 1] += div[qi] * NBASE;
div[qi] = qdigit;
/*
- * Now we do a final carry propagation pass to normalize the result,
- * which we combine with storing the result digits into the output.
- * Note that this is still done at full precision w/guard digits.
+ * Now we do a final carry propagation pass to normalize the result, which
+ * we combine with storing the result digits into the output. Note that
+ * this is still done at full precision w/guard digits.
*/
alloc_var(result, div_ndigits + 1);
res_digits = result->digits;
round_var(result, rscale);
else
trunc_var(result, rscale);
-
+
/* Strip leading and trailing zeroes */
strip_var(result);
}
int rscale;
/*
- * The result scale of a division isn't specified in any SQL standard.
- * For PostgreSQL we select a result scale that will give at least
+ * The result scale of a division isn't specified in any SQL standard. For
+ * PostgreSQL we select a result scale that will give at least
* NUMERIC_MIN_SIG_DIGITS significant digits, so that numeric gives a
* result no less accurate than float8; but use a scale not less than
* either input's display scale.
}
/*
- * SQL2003 defines sqrt() in terms of power, so we need to emit the
- * right SQLSTATE error code if the operand is negative.
+ * SQL2003 defines sqrt() in terms of power, so we need to emit the right
+ * SQLSTATE error code if the operand is negative.
*/
if (stat < 0)
ereport(ERROR,
*
* exp(x) = 1 + x + x^2/2! + x^3/3! + ...
*
- * Given the limited range of x, this should converge reasonably quickly.
- * We run the series until the terms fall below the local_rscale
- * limit.
+ * Given the limited range of x, this should converge reasonably quickly. We
+ * run the series until the terms fall below the local_rscale limit.
*/
add_var(&const_one, &x, result);
set_var_from_var(&x, &xpow);
*
* z + z^3/3 + z^5/5 + ...
*
- * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048
- * due to the above range-reduction of x.
+ * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048 due
+ * to the above range-reduction of x.
*
- * The convergence of this is not as fast as one would like, but is
- * tolerable given that z is small.
+ * The convergence of this is not as fast as one would like, but is tolerable
+ * given that z is small.
*/
sub_var(&x, &const_one, result);
add_var(&x, &const_one, &elem);
val = numericvar_to_double_no_overflow(&ln_num);
/*
- * log10(result) = num * log10(e), so this is approximately the
- * weight:
+ * log10(result) = num * log10(e), so this is approximately the weight:
*/
val *= 0.434294481903252;
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra
- * precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
}
/*
- * At this point, we've run out of digits on one side or the other; so
- * any remaining nonzero digits imply that side is larger
+ * At this point, we've run out of digits on one side or the other; so any
+ * remaining nonzero digits imply that side is larger
*/
while (i1 < var1->ndigits)
{
di = (var->weight + 1) * DEC_DIGITS + rscale;
/*
- * If di = 0, the value loses all digits, but could round up to 1 if
- * its first extra digit is >= 5. If di < 0 the result must be 0.
+ * If di = 0, the value loses all digits, but could round up to 1 if its
+ * first extra digit is >= 5. If di < 0 the result must be 0.
*/
if (di < 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numutils.c,v 1.68 2005/01/09 21:03:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numutils.c,v 1.69 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *badp;
/*
- * Some versions of strtol treat the empty string as an error, but
- * some seem not to. Make an explicit test to be sure we catch it.
+ * Some versions of strtol treat the empty string as an error, but some
+ * seem not to. Make an explicit test to be sure we catch it.
*/
if (s == NULL)
elog(ERROR, "NULL pointer");
s)));
/*
- * Skip any trailing whitespace; if anything but whitespace remains
- * before the terminating character, bail out
+ * Skip any trailing whitespace; if anything but whitespace remains before
+ * the terminating character, bail out
*/
while (*badp && *badp != c && isspace((unsigned char) *badp))
badp++;
)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for type integer", s)));
+ errmsg("value \"%s\" is out of range for type integer", s)));
break;
case sizeof(int16):
if (errno == ERANGE || l < SHRT_MIN || l > SHRT_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for type smallint", s)));
+ errmsg("value \"%s\" is out of range for type smallint", s)));
break;
case sizeof(int8):
if (errno == ERANGE || l < SCHAR_MIN || l > SCHAR_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for 8-bit integer", s)));
+ errmsg("value \"%s\" is out of range for 8-bit integer", s)));
break;
default:
elog(ERROR, "unsupported result size: %d", size);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.63 2005/07/10 21:36:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.64 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
cvt = strtoul(s, &endptr, 10);
/*
- * strtoul() normally only sets ERANGE. On some systems it also may
- * set EINVAL, which simply means it couldn't parse the input string.
- * This is handled by the second "if" consistent across platforms.
+ * strtoul() normally only sets ERANGE. On some systems it also may set
+ * EINVAL, which simply means it couldn't parse the input string. This is
+ * handled by the second "if" consistent across platforms.
*/
if (errno && errno != ERANGE && errno != EINVAL)
ereport(ERROR,
result = (Oid) cvt;
/*
- * Cope with possibility that unsigned long is wider than Oid, in
- * which case strtoul will not raise an error for some values that are
- * out of the range of Oid.
+ * Cope with possibility that unsigned long is wider than Oid, in which
+ * case strtoul will not raise an error for some values that are out of
+ * the range of Oid.
*
- * For backwards compatibility, we want to accept inputs that are given
- * with a minus sign, so allow the input value if it matches after
- * either signed or unsigned extension to long.
+ * For backwards compatibility, we want to accept inputs that are given with
+ * a minus sign, so allow the input value if it matches after either
+ * signed or unsigned extension to long.
*
- * To ensure consistent results on 32-bit and 64-bit platforms, make sure
- * the error message is the same as if strtoul() had returned ERANGE.
+ * To ensure consistent results on 32-bit and 64-bit platforms, make sure the
+ * error message is the same as if strtoul() had returned ERANGE.
*/
#if OID_MAX != ULONG_MAX
if (cvt != (unsigned long) result &&
memcpy(result->values, oids, n * sizeof(Oid));
/*
- * Attach standard array header. For historical reasons, we set the
- * index lower bound to 0 not 1.
+ * Attach standard array header. For historical reasons, we set the index
+ * lower bound to 0 not 1.
*/
result->size = OidVectorSize(n);
result->ndim = 1;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.61 2005/08/24 17:50:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.62 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (ncodes == (size_t) -1)
{
/*
- * Invalid multibyte character encountered. We try to give a
- * useful error message by letting pg_verifymbstr check the
- * string. But it's possible that the string is OK to us, and not
- * OK to mbstowcs --- this suggests that the LC_CTYPE locale is
- * different from the database encoding. Give a generic error
- * message if verifymbstr can't find anything wrong.
+ * Invalid multibyte character encountered. We try to give a useful
+ * error message by letting pg_verifymbstr check the string. But it's
+ * possible that the string is OK to us, and not OK to mbstowcs ---
+ * this suggests that the LC_CTYPE locale is different from the
+ * database encoding. Give a generic error message if verifymbstr
+ * can't find anything wrong.
*/
pg_verifymbstr(workstr, nbytes, false);
ereport(ERROR,
{
int nbytes = VARSIZE(txt) - VARHDRSZ;
wchar_t *result;
- int r;
+ int r;
/* Overflow paranoia */
if (nbytes < 0 ||
- nbytes > (int) (INT_MAX / sizeof(wchar_t)) -1)
+ nbytes > (int) (INT_MAX / sizeof(wchar_t)) - 1)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
static text *
win32_utf8_wcstotext(const wchar_t *str)
{
- text *result;
- int nbytes;
- int r;
+ text *result;
+ int nbytes;
+ int r;
nbytes = WideCharToMultiByte(CP_UTF8, 0, str, -1, NULL, 0, NULL, NULL);
if (nbytes == 0) /* shouldn't happen */
errmsg("UTF16 to UTF8 translation failed: %lu",
GetLastError())));
- result = palloc(nbytes+VARHDRSZ);
+ result = palloc(nbytes + VARHDRSZ);
r = WideCharToMultiByte(CP_UTF8, 0, str, -1, VARDATA(result), nbytes,
NULL, NULL);
errmsg("UTF16 to UTF8 translation failed: %lu",
GetLastError())));
- VARATT_SIZEP(result) = nbytes + VARHDRSZ - 1; /* -1 to ignore null */
+ VARATT_SIZEP(result) = nbytes + VARHDRSZ - 1; /* -1 to ignore null */
return result;
}
#define texttowcs win32_texttowcs
#define wcstotext win32_wcstotext
-
-#endif /* WIN32 */
+#endif /* WIN32 */
/********************************************************************
lower(PG_FUNCTION_ARGS)
{
#ifdef USE_WIDE_UPPER_LOWER
+
/*
- * Use wide char code only when max encoding length > 1 and ctype != C.
- * Some operating systems fail with multi-byte encodings and a C locale.
- * Also, for a C locale there is no need to process as multibyte.
+ * Use wide char code only when max encoding length > 1 and ctype != C.
+ * Some operating systems fail with multi-byte encodings and a C locale.
+ * Also, for a C locale there is no need to process as multibyte.
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
int m;
/*
- * Since we copied the string, we can scribble directly on the
- * value
+ * Since we copied the string, we can scribble directly on the value
*/
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
upper(PG_FUNCTION_ARGS)
{
#ifdef USE_WIDE_UPPER_LOWER
+
/*
- * Use wide char code only when max encoding length > 1 and ctype != C.
- * Some operating systems fail with multi-byte encodings and a C locale.
- * Also, for a C locale there is no need to process as multibyte.
+ * Use wide char code only when max encoding length > 1 and ctype != C.
+ * Some operating systems fail with multi-byte encodings and a C locale.
+ * Also, for a C locale there is no need to process as multibyte.
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
int m;
/*
- * Since we copied the string, we can scribble directly on the
- * value
+ * Since we copied the string, we can scribble directly on the value
*/
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
initcap(PG_FUNCTION_ARGS)
{
#ifdef USE_WIDE_UPPER_LOWER
+
/*
- * Use wide char code only when max encoding length > 1 and ctype != C.
- * Some operating systems fail with multi-byte encodings and a C locale.
- * Also, for a C locale there is no need to process as multibyte.
+ * Use wide char code only when max encoding length > 1 and ctype != C.
+ * Some operating systems fail with multi-byte encodings and a C locale.
+ * Also, for a C locale there is no need to process as multibyte.
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
int m;
/*
- * Since we copied the string, we can scribble directly on the
- * value
+ * Since we copied the string, we can scribble directly on the value
*/
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
{
/*
* In the multibyte-encoding case, build arrays of pointers to
- * character starts, so that we can avoid inefficient checks
- * in the inner loops.
+ * character starts, so that we can avoid inefficient checks in
+ * the inner loops.
*/
const char **stringchars;
const char **setchars;
else
{
/*
- * In the single-byte-encoding case, we don't need such
- * overhead.
+ * In the single-byte-encoding case, we don't need such overhead.
*/
if (doltrim)
{
VARATT_SIZEP(result) = retlen + VARHDRSZ;
/*
- * There may be some wasted space in the result if deletions occurred,
- * but it's not worth reallocating it; the function result probably
- * won't live long anyway.
+ * There may be some wasted space in the result if deletions occurred, but
+ * it's not worth reallocating it; the function result probably won't live
+ * long anyway.
*/
PG_RETURN_TEXT_P(result);
*
* Portions Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.31 2005/03/16 00:02:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.32 2005/10/15 02:49:29 momjian Exp $
*
*-----------------------------------------------------------------------
*/
locale_messages_assign(const char *value, bool doit, GucSource source)
{
#ifndef WIN32
+
/*
- * LC_MESSAGES category does not exist everywhere, but accept it
- * anyway
+ * LC_MESSAGES category does not exist everywhere, but accept it anyway
*/
#ifdef LC_MESSAGES
if (doit)
value = locale_xxx_assign(LC_MESSAGES, value, false, source);
#endif /* LC_MESSAGES */
return value;
-
-#else /* WIN32 */
+#else /* WIN32 */
/*
* Win32 does not have working setlocale() for LC_MESSAGES. We can only
- * use environment variables to change it (per gettext FAQ). This
- * means we can't actually check the supplied value, so always assume
- * it's good. Also, ignore attempts to set to "", which really means
- * "keep using the old value". (Actually it means "use the environment
- * value", but we are too lazy to try to implement that exactly.)
+ * use environment variables to change it (per gettext FAQ). This means
+ * we can't actually check the supplied value, so always assume it's good.
+ * Also, ignore attempts to set to "", which really means "keep using the
+ * old value". (Actually it means "use the environment value", but we are
+ * too lazy to try to implement that exactly.)
*/
if (doit && value[0])
{
if (!SetEnvironmentVariable("LC_MESSAGES", value))
return NULL;
- snprintf(env, sizeof(env)-1, "LC_MESSAGES=%s", value);
+ snprintf(env, sizeof(env) - 1, "LC_MESSAGES=%s", value);
if (_putenv(env))
return NULL;
}
return value;
-#endif /* WIN32 */
+#endif /* WIN32 */
}
extlconv = localeconv();
/*
- * Must copy all values since restoring internal settings may
- * overwrite localeconv()'s results.
+ * Must copy all values since restoring internal settings may overwrite
+ * localeconv()'s results.
*/
CurrentLocaleConv = *extlconv;
CurrentLocaleConv.currency_symbol = strdup(extlconv->currency_symbol);
/* ----------
* pg_lzcompress.c -
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.19 2005/05/25 21:40:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.20 2005/10/15 02:49:29 momjian Exp $
*
* This is an implementation of LZ compression for PostgreSQL.
* It uses a simple history table and generates 2-3 byte tags
6144, /* Data chunks greater equal 6K force
* compression */
/* except compressed result is greater uncompressed data */
- 20, /* Compression rates below 20% mean
- * fallback to uncompressed */
+ 20, /* Compression rates below 20% mean fallback
+ * to uncompressed */
/* storage except compression is forced by previous parameter */
- 128, /* Stop history lookup if a match of 128
- * bytes is found */
+ 128, /* Stop history lookup if a match of 128 bytes
+ * is found */
10 /* Lower good match size by 10% at every
* lookup loop iteration. */
};
static PGLZ_Strategy strategy_always_data = {
0, /* Chunks of any size are compressed */
0, /* */
- 0, /* We want to save at least one single
- * byte */
- 128, /* Stop history lookup if a match of 128
- * bytes is found */
+ 0, /* We want to save at least one single byte */
+ 128, /* Stop history lookup if a match of 128 bytes
+ * is found */
6 /* Look harder for a good match. */
};
PGLZ_Strategy *PGLZ_strategy_always = &strategy_always_data;
0, /* */
0, /* */
0, /* */
- 0, /* Zero indicates "store uncompressed
- * always" */
+ 0, /* Zero indicates "store uncompressed always" */
0 /* */
};
PGLZ_Strategy *PGLZ_strategy_never = &strategy_never_data;
int32 off = 0;
/*
- * Traverse the linked history list until a good enough match is
- * found.
+ * Traverse the linked history list until a good enough match is found.
*/
hent = hstart[pglz_hist_idx(input, end)];
while (hent)
break;
/*
- * Determine length of match. A better match must be larger than
- * the best so far. And if we already have a match of 16 or more
- * bytes, it's worth the call overhead to use memcmp() to check if
- * this match is equal for the same size. After that we must
- * fallback to character by character comparison to know the exact
- * position where the diff occurred.
+ * Determine length of match. A better match must be larger than the
+ * best so far. And if we already have a match of 16 or more bytes,
+ * it's worth the call overhead to use memcmp() to check if this match
+ * is equal for the same size. After that we must fallback to
+ * character by character comparison to know the exact position where
+ * the diff occurred.
*/
thislen = 0;
if (len >= 16)
hent = hent->next;
/*
- * Be happy with lesser good matches the more entries we visited.
- * But no point in doing calculation if we're at end of list.
+ * Be happy with lesser good matches the more entries we visited. But
+ * no point in doing calculation if we're at end of list.
*/
if (hent)
{
memset((void *) hist_start, 0, sizeof(hist_start));
/*
- * Compute the maximum result size allowed by the strategy. If the
- * input size exceeds force_input_size, the max result size is the
- * input size itself. Otherwise, it is the input size minus the
- * minimum wanted compression rate.
+ * Compute the maximum result size allowed by the strategy. If the input
+ * size exceeds force_input_size, the max result size is the input size
+ * itself. Otherwise, it is the input size minus the minimum wanted
+ * compression rate.
*/
if (slen >= strategy->force_input_size)
result_max = slen;
while (dp < dend)
{
/*
- * If we already exceeded the maximum result size, set no
- * compression flag and stop this. But don't check too often.
+ * If we already exceeded the maximum result size, set no compression
+ * flag and stop this. But don't check too often.
*/
if (bp - bstart >= result_max)
{
}
/*
- * If we are still in compressing mode, write out the last control
- * byte and determine if the compression gained the rate requested by
- * the strategy.
+ * If we are still in compressing mode, write out the last control byte
+ * and determine if the compression gained the rate requested by the
+ * strategy.
*/
if (do_compress)
{
/*
* Done - if we successfully compressed and matched the strategy's
- * constraints, return the compressed result. Otherwise copy the
- * original source over it and return the original length.
+ * constraints, return the compressed result. Otherwise copy the original
+ * source over it and return the original length.
*/
if (do_compress)
{
/*
* Otherwise it contains the match length minus 3 and the
* upper 4 bits of the offset. The next following byte
- * contains the lower 8 bits of the offset. If the length
- * is coded as 18, another extension tag byte tells how
- * much longer the match really was (0-255).
+ * contains the lower 8 bits of the offset. If the length is
+ * coded as 18, another extension tag byte tells how much
+ * longer the match really was (0-255).
*/
len = (dp[0] & 0x0f) + 3;
off = ((dp[0] & 0xf0) << 4) | dp[1];
len += *dp++;
/*
- * Now we copy the bytes specified by the tag from OUTPUT
- * to OUTPUT. It is dangerous and platform dependent to
- * use memcpy() here, because the copied areas could
- * overlap extremely!
+ * Now we copy the bytes specified by the tag from OUTPUT to
+ * OUTPUT. It is dangerous and platform dependent to use
+ * memcpy() here, because the copied areas could overlap
+ * extremely!
*/
while (len--)
{
else
{
/*
- * An unset control bit means LITERAL BYTE. So we just
- * copy one from INPUT to OUTPUT.
+ * An unset control bit means LITERAL BYTE. So we just copy
+ * one from INPUT to OUTPUT.
*/
*bp++ = *dp++;
}
if (dstate->tocopy > 0)
{
/*
- * Copy one byte from output to output until we did it for the
- * length specified by the last tag. Return that byte.
+ * Copy one byte from output to output until we did it for the length
+ * specified by the last tag. Return that byte.
*/
dstate->tocopy--;
return (*(dstate->cp_out++) = *(dstate->cp_copy++));
if (dstate->ctrl_count == 0)
{
/*
- * Get the next control byte if we need to, but check for EOF
- * before.
+ * Get the next control byte if we need to, but check for EOF before.
*/
if (dstate->cp_in == dstate->cp_end)
return EOF;
/*
* This decompression method saves time only, if we stop near the
- * beginning of the data (maybe because we're called by a
- * comparison function and a difference occurs early). Otherwise,
- * all the checks, needed here, cause too much overhead.
+ * beginning of the data (maybe because we're called by a comparison
+ * function and a difference occurs early). Otherwise, all the checks,
+ * needed here, cause too much overhead.
*
- * Thus we decompress the entire rest at once into the temporary
- * buffer and change the decomp state to return the prepared data
- * from the buffer by the more simple calls to
+ * Thus we decompress the entire rest at once into the temporary buffer
+ * and change the decomp state to return the prepared data from the
+ * buffer by the more simple calls to
* pglz_get_next_decomp_char_from_plain().
*/
if (dstate->cp_out - dstate->temp_buf >= 256)
if (dstate->ctrl & 0x01)
{
/*
- * Bit is set, so tag is following. Setup copy information and do
- * the copy for the first byte as above.
+ * Bit is set, so tag is following. Setup copy information and do the
+ * copy for the first byte as above.
*/
int off;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.24 2005/06/29 22:51:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.25 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
result = beentry->activity_start_timestamp;
/*
- * No time recorded for start of current query -- this is the case if
- * the user hasn't enabled query-level stats collection.
+ * No time recorded for start of current query -- this is the case if the
+ * user hasn't enabled query-level stats collection.
*/
if (result == 0)
PG_RETURN_NULL();
Datum
pg_stat_get_backend_start(PG_FUNCTION_ARGS)
{
- int32 beid = PG_GETARG_INT32(0);
+ int32 beid = PG_GETARG_INT32(0);
TimestampTz result;
PgStat_StatBeEntry *beentry;
pg_stat_get_backend_client_addr(PG_FUNCTION_ARGS)
{
PgStat_StatBeEntry *beentry;
- int32 beid;
+ int32 beid;
char remote_host[NI_MAXHOST];
int ret;
pg_stat_get_backend_client_port(PG_FUNCTION_ARGS)
{
PgStat_StatBeEntry *beentry;
- int32 beid;
+ int32 beid;
char remote_port[NI_MAXSERV];
int ret;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/quote.c,v 1.16 2005/07/02 17:01:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/quote.c,v 1.17 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
cp1 = VARDATA(t);
cp2 = VARDATA(result);
- for(; len-- > 0; cp1++)
+ for (; len-- > 0; cp1++)
if (*cp1 == '\\')
{
*cp2++ = ESCAPE_STRING_SYNTAX;
break;
}
-
+
len = VARSIZE(t) - VARHDRSZ;
cp1 = VARDATA(t);
*cp2++ = '\'';
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.58 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.59 2005/10/15 02:49:29 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
*
* Returns regex_t
*
- * text_re --- the pattern, expressed as an *untoasted* TEXT object
- * cflags --- compile options for the pattern
+ * text_re --- the pattern, expressed as an *untoasted* TEXT object
+ * cflags --- compile options for the pattern
*
* Pattern is given in the database encoding. We internally convert to
* array of pg_wchar which is what Spencer's regex package wants.
/*
* Look for a match among previously compiled REs. Since the data
- * structure is self-organizing with most-used entries at the front,
- * our search strategy can just be to scan from the front.
+ * structure is self-organizing with most-used entries at the front, our
+ * search strategy can just be to scan from the front.
*/
for (i = 0; i < num_res; i++)
{
re_temp.cre_flags = cflags;
/*
- * Okay, we have a valid new item in re_temp; insert it into the
- * storage array. Discard last entry if needed.
+ * Okay, we have a valid new item in re_temp; insert it into the storage
+ * array. Discard last entry if needed.
*/
if (num_res >= MAX_CACHED_RES)
{
size_t data_len;
int regexec_result;
regex_t re;
- char errMsg[100];
+ char errMsg[100];
/* Convert data string to wide characters */
data = (pg_wchar *) palloc((dat_len + 1) * sizeof(pg_wchar));
regmatch_t pmatch[2];
/*
- * We pass two regmatch_t structs to get info about the overall match
- * and the match for the first parenthesized subexpression (if any).
- * If there is a parenthesized subexpression, we return what it
- * matched; else return what the whole regexp matched.
+ * We pass two regmatch_t structs to get info about the overall match and
+ * the match for the first parenthesized subexpression (if any). If there
+ * is a parenthesized subexpression, we return what it matched; else
+ * return what the whole regexp matched.
*/
match = RE_compile_and_execute(p,
VARDATA(s),
}
return DirectFunctionCall3(text_substr,
- PointerGetDatum(s),
- Int32GetDatum(so + 1),
- Int32GetDatum(eo - so));
+ PointerGetDatum(s),
+ Int32GetDatum(so + 1),
+ Int32GetDatum(eo - so));
}
PG_RETURN_NULL();
/*
* textregexreplace_noopt()
- * Return a replace string matched by a regular expression.
+ * Return a replace string matched by a regular expression.
* This function is a version that doesn't specify the option of
* textregexreplace. This is case sensitive, replace the first
* instance only.
re = RE_compile_and_cache(p, regex_flavor);
return DirectFunctionCall4(replace_text_regexp,
- PointerGetDatum(s),
- PointerGetDatum(&re),
- PointerGetDatum(r),
- BoolGetDatum(false));
+ PointerGetDatum(s),
+ PointerGetDatum(&re),
+ PointerGetDatum(r),
+ BoolGetDatum(false));
}
/*
* textregexreplace()
- * Return a replace string matched by a regular expression.
+ * Return a replace string matched by a regular expression.
*/
Datum
textregexreplace(PG_FUNCTION_ARGS)
char *opt_p = VARDATA(opt);
int opt_len = (VARSIZE(opt) - VARHDRSZ);
int i;
- bool global = false;
+ bool global = false;
bool ignorecase = false;
regex_t re;
break;
case 'g':
global = true;
+
break;
default:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid option of regexp_replace: %c",
- opt_p[i])));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid option of regexp_replace: %c",
+ opt_p[i])));
break;
}
}
re = RE_compile_and_cache(p, regex_flavor);
return DirectFunctionCall4(replace_text_regexp,
- PointerGetDatum(s),
- PointerGetDatum(&re),
- PointerGetDatum(r),
- BoolGetDatum(global));
+ PointerGetDatum(s),
+ PointerGetDatum(&re),
+ PointerGetDatum(r),
+ BoolGetDatum(global));
}
/* similar_escape()
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
}
/* We need room for ^, $, and up to 2 output bytes per input byte */
while (plen > 0)
{
- char pchar = *p;
+ char pchar = *p;
if (afterescape)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.95 2005/10/02 23:50:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.96 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
strspn(pro_name_or_oid, "0123456789") == strlen(pro_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(pro_name_or_oid)));
+ CStringGetDatum(pro_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a name, possibly schema-qualified */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_proc for a unique match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_proc for a unique match. This is needed for
+ * initializing other system catalogs (pg_namespace may not exist yet, and
+ * certainly there are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
}
/*
- * Normal case: parse the name into components and see if it matches
- * any pg_proc entries in the current search path.
+ * Normal case: parse the name into components and see if it matches any
+ * pg_proc entries in the current search path.
*/
names = stringToQualifiedNameList(pro_name_or_oid, "regprocin");
clist = FuncnameGetCandidates(names, -1);
if (clist == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
else if (clist->next != NULL)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
char *proname = NameStr(procform->proname);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the proc name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the proc name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
result = pstrdup(proname);
strspn(pro_name_or_oid, "0123456789") == strlen(pro_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(pro_name_or_oid)));
+ CStringGetDatum(pro_name_or_oid)));
PG_RETURN_OID(result);
}
/*
- * Else it's a name and arguments. Parse the name and arguments, look
- * up potential matches in the current namespace search list, and scan
- * to see which one exactly matches the given argument types. (There
- * will not be more than one match.)
+ * Else it's a name and arguments. Parse the name and arguments, look up
+ * potential matches in the current namespace search list, and scan to see
+ * which one exactly matches the given argument types. (There will not be
+ * more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
* datatype cannot be used for any system column that needs to receive
if (clist == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
result = clist->oid;
initStringInfo(&buf);
/*
- * Would this proc be found (given the right args) by
- * regprocedurein? If not, we need to qualify it.
+ * Would this proc be found (given the right args) by regprocedurein?
+ * If not, we need to qualify it.
*/
if (FunctionIsVisible(procedure_oid))
nspname = NULL;
strspn(opr_name_or_oid, "0123456789") == strlen(opr_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(opr_name_or_oid)));
+ CStringGetDatum(opr_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a name, possibly schema-qualified */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_operator for a unique match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_operator for a unique match. This is needed for
+ * initializing other system catalogs (pg_namespace may not exist yet, and
+ * certainly there are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("operator does not exist: %s", opr_name_or_oid)));
+ errmsg("operator does not exist: %s", opr_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
}
/*
- * Normal case: parse the name into components and see if it matches
- * any pg_operator entries in the current search path.
+ * Normal case: parse the name into components and see if it matches any
+ * pg_operator entries in the current search path.
*/
names = stringToQualifiedNameList(opr_name_or_oid, "regoperin");
clist = OpernameGetCandidates(names, '\0');
char *oprname = NameStr(operform->oprname);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the oper name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the oper name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
result = pstrdup(oprname);
else
{
/*
- * If OID doesn't match any pg_operator entry, return it
- * numerically
+ * If OID doesn't match any pg_operator entry, return it numerically
*/
result = (char *) palloc(NAMEDATALEN);
snprintf(result, NAMEDATALEN, "%u", oprid);
strspn(opr_name_or_oid, "0123456789") == strlen(opr_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(opr_name_or_oid)));
+ CStringGetDatum(opr_name_or_oid)));
PG_RETURN_OID(result);
}
/*
- * Else it's a name and arguments. Parse the name and arguments, look
- * up potential matches in the current namespace search list, and scan
- * to see which one exactly matches the given argument types. (There
- * will not be more than one match.)
+ * Else it's a name and arguments. Parse the name and arguments, look up
+ * potential matches in the current namespace search list, and scan to see
+ * which one exactly matches the given argument types. (There will not be
+ * more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
* datatype cannot be used for any system column that needs to receive
initStringInfo(&buf);
/*
- * Would this oper be found (given the right args) by
- * regoperatorin? If not, we need to qualify it.
+ * Would this oper be found (given the right args) by regoperatorin?
+ * If not, we need to qualify it.
*/
if (!OperatorIsVisible(operator_oid))
{
else
{
/*
- * If OID doesn't match any pg_operator entry, return it
- * numerically
+ * If OID doesn't match any pg_operator entry, return it numerically
*/
result = (char *) palloc(NAMEDATALEN);
snprintf(result, NAMEDATALEN, "%u", operator_oid);
/* Numeric OID? */
if (class_name_or_oid[0] >= '0' &&
class_name_or_oid[0] <= '9' &&
- strspn(class_name_or_oid, "0123456789") == strlen(class_name_or_oid))
+ strspn(class_name_or_oid, "0123456789") == strlen(class_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(class_name_or_oid)));
+ CStringGetDatum(class_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a name, possibly schema-qualified */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_class for a match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_class for a match. This is needed for initializing
+ * other system catalogs (pg_namespace may not exist yet, and certainly
+ * there are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation \"%s\" does not exist", class_name_or_oid)));
+ errmsg("relation \"%s\" does not exist", class_name_or_oid)));
/* We assume there can be only one match */
}
/*
- * Normal case: parse the name into components and see if it matches
- * any pg_class entries in the current search path.
+ * Normal case: parse the name into components and see if it matches any
+ * pg_class entries in the current search path.
*/
names = stringToQualifiedNameList(class_name_or_oid, "regclassin");
char *classname = NameStr(classform->relname);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the class name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the class name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
result = pstrdup(classname);
char *nspname;
/*
- * Would this class be found by regclassin? If not, qualify
- * it.
+ * Would this class be found by regclassin? If not, qualify it.
*/
if (RelationIsVisible(classid))
nspname = NULL;
strspn(typ_name_or_oid, "0123456789") == strlen(typ_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(typ_name_or_oid)));
+ CStringGetDatum(typ_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a type name, possibly schema-qualified or decorated */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_type for a match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_type for a match. This is needed for initializing other
+ * system catalogs (pg_namespace may not exist yet, and certainly there
+ * are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("type \"%s\" does not exist", typ_name_or_oid)));
+ errmsg("type \"%s\" does not exist", typ_name_or_oid)));
/* We assume there can be only one match */
}
/*
- * Normal case: invoke the full parser to deal with special cases such
- * as array syntax.
+ * Normal case: invoke the full parser to deal with special cases such as
+ * array syntax.
*/
parseTypeString(typ_name_or_oid, &result, &typmod);
Form_pg_type typeform = (Form_pg_type) GETSTRUCT(typetup);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the type name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the type name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
{
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.80 2005/06/28 05:09:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.81 2005/10/15 02:49:29 momjian Exp $
*
* ----------
*/
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_check", RI_TRIGTYPE_INUP);
tgargs = trigdata->tg_trigger->tgargs;
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the new tuple.
*
* pk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
}
/*
- * We should not even consider checking the row if it is no longer
- * valid since it was either deleted (doesn't matter) or updated (in
- * which case it'll be checked with its final values).
+ * We should not even consider checking the row if it is no longer valid
+ * since it was either deleted (doesn't matter) or updated (in which case
+ * it'll be checked with its final values).
*/
Assert(new_row_buf != InvalidBuffer);
if (!HeapTupleSatisfiesItself(new_row->t_data, new_row_buf))
case RI_KEYS_ALL_NULL:
/*
- * No check - if NULLs are allowed at all is already checked
- * by NOT NULL constraint.
+ * No check - if NULLs are allowed at all is already checked by
+ * NOT NULL constraint.
*
* This is true for MATCH FULL, MATCH PARTIAL, and MATCH
*
case RI_KEYS_SOME_NULL:
/*
- * This is the only case that differs between the three kinds
- * of MATCH.
+ * This is the only case that differs between the three kinds of
+ * MATCH.
*/
switch (match_type)
{
case RI_MATCH_TYPE_FULL:
/*
- * Not allowed - MATCH FULL says either all or none of
- * the attributes can be NULLs
+ * Not allowed - MATCH FULL says either all or none of the
+ * attributes can be NULLs
*/
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
- RelationGetRelationName(trigdata->tg_relation),
+ RelationGetRelationName(trigdata->tg_relation),
tgargs[RI_CONSTRAINT_NAME_ARGNO]),
errdetail("MATCH FULL does not allow mixing of null and nonnull key values.")));
heap_close(pk_rel, RowShareLock);
case RI_MATCH_TYPE_UNSPECIFIED:
/*
- * MATCH - if ANY column is null, we
- * have a match.
+ * MATCH - if ANY column is null, we have a
+ * match.
*/
heap_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
case RI_MATCH_TYPE_PARTIAL:
/*
- * MATCH PARTIAL - all non-null columns must match.
- * (not implemented, can be done by modifying the
- * query below to only include non-null columns, or by
- * writing a special version here)
+ * MATCH PARTIAL - all non-null columns must match. (not
+ * implemented, can be done by modifying the query below
+ * to only include non-null columns, or by writing a
+ * special version here)
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("MATCH PARTIAL not yet implemented")));
+ errmsg("MATCH PARTIAL not yet implemented")));
heap_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
}
case RI_KEYS_NONE_NULL:
/*
- * Have a full qualified key - continue below for all three
- * kinds of MATCH.
+ * Have a full qualified key - continue below for all three kinds
+ * of MATCH.
*/
break;
}
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
for (i = 0; i < qkey.nkeypairs; i++)
{
quoteOneName(attname,
- tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
+ tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), " %s %s = $%d",
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(fk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_FK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_FK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
case RI_KEYS_ALL_NULL:
/*
- * No check - nothing could have been referencing this row
- * anyway.
+ * No check - nothing could have been referencing this row anyway.
*/
return true;
case RI_KEYS_SOME_NULL:
/*
- * This is the only case that differs between the three kinds
- * of MATCH.
+ * This is the only case that differs between the three kinds of
+ * MATCH.
*/
switch (match_type)
{
case RI_MATCH_TYPE_UNSPECIFIED:
/*
- * MATCH /FULL - if ANY column is null,
- * we can't be matching to this row already.
+ * MATCH /FULL - if ANY column is null, we
+ * can't be matching to this row already.
*/
return true;
case RI_MATCH_TYPE_PARTIAL:
/*
- * MATCH PARTIAL - all non-null columns must match.
- * (not implemented, can be done by modifying the
- * query below to only include non-null columns, or by
- * writing a special version here)
+ * MATCH PARTIAL - all non-null columns must match. (not
+ * implemented, can be done by modifying the query below
+ * to only include non-null columns, or by writing a
+ * special version here)
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("MATCH PARTIAL not yet implemented")));
+ errmsg("MATCH PARTIAL not yet implemented")));
break;
}
case RI_KEYS_NONE_NULL:
/*
- * Have a full qualified key - continue below for all three
- * kinds of MATCH.
+ * Have a full qualified key - continue below for all three kinds
+ * of MATCH.
*/
break;
}
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
for (i = 0; i < qkey.nkeypairs; i++)
{
quoteOneName(attname,
- tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
+ tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), " %s %s = $%d",
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_noaction_del", RI_TRIGTYPE_DELETE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict delete
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the restrict delete lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_noaction_upd", RI_TRIGTYPE_UPDATE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * and old tuple.
+ * Get the relation descriptors of the FK and PK tables and the new and
+ * old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
match_type, tgnargs, tgargs))
{
/*
- * There's either another row, or no row could match this
- * one. In either case, we don't need to do the check.
+ * There's either another row, or no row could match this one.
+ * In either case, we don't need to do the check.
*/
heap_close(fk_rel, RowShareLock);
return PointerGetDatum(NULL);
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the noaction update
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the noaction update lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_cascade_del", RI_TRIGTYPE_DELETE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual DELETE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * DELETE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
/* Prepare and save the plan */
}
/*
- * We have a plan now. Build up the arguments from the key
- * values in the deleted PK tuple and delete the referencing
- * rows
+ * We have a plan now. Build up the arguments from the key values
+ * in the deleted PK tuple and delete the referencing rows
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
int j;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_cascade_upd", RI_TRIGTYPE_UPDATE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * and old tuple.
+ * Get the relation descriptors of the FK and PK tables and the new and
+ * old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
querysep = ",";
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
queryoids[j] = queryoids[i];
}
strcat(querystr, qualstr);
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_restrict_del", RI_TRIGTYPE_DELETE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict delete
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the restrict delete lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_restrict_upd", RI_TRIGTYPE_UPDATE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * and old tuple.
+ * Get the relation descriptors of the FK and PK tables and the new and
+ * old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict update
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the restrict update lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setnull_del", RI_TRIGTYPE_DELETE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the set null delete
- * operation
+ * Fetch or prepare a saved plan for the set null delete operation
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
querysep = ",";
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
bool use_cached_query;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setnull_upd", RI_TRIGTYPE_UPDATE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
elog(ERROR, "SPI_connect failed");
/*
- * "MATCH " only changes columns corresponding to
- * the referenced columns that have changed in pk_rel. This
- * means the "SET attrn=NULL [, attrn=NULL]" string will be
- * change as well. In this case, we need to build a temporary
- * plan rather than use our cached plan, unless the update
- * happens to change all columns in the key. Fortunately, for
- * the most common case of a single-column foreign key, this
- * will be true.
+ * "MATCH " only changes columns corresponding to the
+ * referenced columns that have changed in pk_rel. This means the
+ * "SET attrn=NULL [, attrn=NULL]" string will be change as well.
+ * In this case, we need to build a temporary plan rather than use
+ * our cached plan, unless the update happens to change all
+ * columns in the key. Fortunately, for the most common case of a
+ * single-column foreign key, this will be true.
*
- * In case you're wondering, the inequality check works because
- * we know that the old key value has no NULLs (see above).
+ * In case you're wondering, the inequality check works because we
+ * know that the old key value has no NULLs (see above).
*/
use_cached_query = match_type == RI_MATCH_TYPE_FULL ||
&qkey, RI_KEYPAIR_PK_IDX);
/*
- * Fetch or prepare a saved plan for the set null update
- * operation if possible, or build a temporary plan if not.
+ * Fetch or prepare a saved plan for the set null update operation
+ * if possible, or build a temporary plan if not.
*/
if (!use_cached_query ||
(qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_FK_IDX]);
/*
- * MATCH - only change columns
- * corresponding to changed columns in pk_rel's key
+ * MATCH - only change columns corresponding
+ * to changed columns in pk_rel's key
*/
if (match_type == RI_MATCH_TYPE_FULL ||
!ri_OneKeyEqual(pk_rel, i, old_row, new_row, &qkey,
qualsep, attname, i + 1);
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
void *qplan;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setdefault_del", RI_TRIGTYPE_DELETE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
/*
* Prepare a plan for the set default delete operation.
- * Unfortunately we need to do it on every invocation because
- * the default value could potentially change between calls.
+ * Unfortunately we need to do it on every invocation because the
+ * default value could potentially change between calls.
*/
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
querysep = ",";
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
heap_close(fk_rel, RowExclusiveLock);
/*
- * In the case we delete the row who's key is equal to the
- * default values AND a referencing row in the foreign key
- * table exists, we would just have updated it to the same
- * values. We need to do another lookup now and in case a
- * reference exists, abort the operation. That is already
- * implemented in the NO ACTION trigger.
+ * In the case we delete the row who's key is equal to the default
+ * values AND a referencing row in the foreign key table exists,
+ * we would just have updated it to the same values. We need to do
+ * another lookup now and in case a reference exists, abort the
+ * operation. That is already implemented in the NO ACTION
+ * trigger.
*/
RI_FKey_noaction_del(fcinfo);
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setdefault_upd", RI_TRIGTYPE_UPDATE);
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
/*
* Prepare a plan for the set default delete operation.
- * Unfortunately we need to do it on every invocation because
- * the default value could potentially change between calls.
+ * Unfortunately we need to do it on every invocation because the
+ * default value could potentially change between calls.
*/
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_FK_IDX]);
/*
- * MATCH - only change columns
- * corresponding to changed columns in pk_rel's key
+ * MATCH - only change columns corresponding
+ * to changed columns in pk_rel's key
*/
if (match_type == RI_MATCH_TYPE_FULL ||
!ri_OneKeyEqual(pk_rel, i, old_row,
- new_row, &qkey, RI_KEYPAIR_PK_IDX))
+ new_row, &qkey, RI_KEYPAIR_PK_IDX))
{
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), "%s %s = DEFAULT",
querysep, attname);
qualsep, attname, i + 1);
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
/*
* In the case we updated the row who's key was equal to the
- * default values AND a referencing row in the foreign key
- * table exists, we would just have updated it to the same
- * values. We need to do another lookup now and in case a
- * reference exists, abort the operation. That is already
- * implemented in the NO ACTION trigger.
+ * default values AND a referencing row in the foreign key table
+ * exists, we would just have updated it to the same values. We
+ * need to do another lookup now and in case a reference exists,
+ * abort the operation. That is already implemented in the NO
+ * ACTION trigger.
*/
RI_FKey_noaction_upd(fcinfo);
if (!OidIsValid(trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigger->tgname,
- RelationGetRelationName(pk_rel)),
- errhint("Remove this referential integrity trigger and its mates, "
- "then do ALTER TABLE ADD CONSTRAINT.")));
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigger->tgname,
+ RelationGetRelationName(pk_rel)),
+ errhint("Remove this referential integrity trigger and its mates, "
+ "then do ALTER TABLE ADD CONSTRAINT.")));
fk_rel = heap_open(trigger->tgconstrrelid, AccessShareLock);
return ri_KeysEqual(pk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_PK_IDX);
- /* Handle MATCH PARTIAL set null delete. */
+ /* Handle MATCH PARTIAL set null delete. */
case RI_MATCH_TYPE_PARTIAL:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
if (!OidIsValid(trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigger->tgname,
- RelationGetRelationName(fk_rel)),
- errhint("Remove this referential integrity trigger and its mates, "
- "then do ALTER TABLE ADD CONSTRAINT.")));
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigger->tgname,
+ RelationGetRelationName(fk_rel)),
+ errhint("Remove this referential integrity trigger and its mates, "
+ "then do ALTER TABLE ADD CONSTRAINT.")));
pk_rel = heap_open(trigger->tgconstrrelid, AccessShareLock);
return ri_KeysEqual(fk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_FK_IDX);
- /* Handle MATCH PARTIAL set null delete. */
+ /* Handle MATCH PARTIAL set null delete. */
case RI_MATCH_TYPE_PARTIAL:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
{
const char *constrname = fkconstraint->constr_name;
char querystr[MAX_QUOTED_REL_NAME_LEN * 2 + 250 +
- (MAX_QUOTED_NAME_LEN + 32) * ((RI_MAX_NUMKEYS * 4) + 1)];
+ (MAX_QUOTED_NAME_LEN + 32) * ((RI_MAX_NUMKEYS * 4) + 1)];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char relname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
void *qplan;
/*
- * Check to make sure current user has enough permissions to do the
- * test query. (If not, caller can fall back to the trigger method,
- * which works because it changes user IDs on the fly.)
+ * Check to make sure current user has enough permissions to do the test
+ * query. (If not, caller can fall back to the trigger method, which
+ * works because it changes user IDs on the fly.)
*
* XXX are there any other show-stopper conditions to check?
*/
}
/*
- * It's sufficient to test any one pk attribute for null to detect a
- * join failure.
+ * It's sufficient to test any one pk attribute for null to detect a join
+ * failure.
*/
quoteOneName(attname, strVal(linitial(fkconstraint->pk_attrs)));
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr),
")");
/*
- * Temporarily increase work_mem so that the check query can be
- * executed more efficiently. It seems okay to do this because the
- * query is simple enough to not use a multiple of work_mem, and one
- * typically would not have many large foreign-key validations
- * happening concurrently. So this seems to meet the criteria for
- * being considered a "maintenance" operation, and accordingly we use
- * maintenance_work_mem.
+ * Temporarily increase work_mem so that the check query can be executed
+ * more efficiently. It seems okay to do this because the query is simple
+ * enough to not use a multiple of work_mem, and one typically would not
+ * have many large foreign-key validations happening concurrently. So
+ * this seems to meet the criteria for being considered a "maintenance"
+ * operation, and accordingly we use maintenance_work_mem.
*
* We do the equivalent of "SET LOCAL work_mem" so that transaction abort
* will restore the old value if we lose control due to an error.
elog(ERROR, "SPI_prepare returned %d for %s", SPI_result, querystr);
/*
- * Run the plan. For safety we force a current snapshot to be used.
- * (In serializable mode, this arguably violates serializability, but we
+ * Run the plan. For safety we force a current snapshot to be used. (In
+ * serializable mode, this arguably violates serializability, but we
* really haven't got much choice.) We need at most one tuple returned,
* so pass limit = 1.
*/
/*
* If it's MATCH FULL, and there are any nulls in the FK keys,
- * complain about that rather than the lack of a match. MATCH
- * FULL disallows partially-null FK rows.
+ * complain about that rather than the lack of a match. MATCH FULL
+ * disallows partially-null FK rows.
*/
if (fkconstraint->fk_matchtype == FKCONSTR_MATCH_FULL)
{
}
/*
- * Although we didn't cache the query, we need to set up a fake
- * query key to pass to ri_ReportViolation.
+ * Although we didn't cache the query, we need to set up a fake query
+ * key to pass to ri_ReportViolation.
*/
MemSet(&qkey, 0, sizeof(qkey));
qkey.constr_queryno = RI_PLAN_CHECK_LOOKUPPK;
elog(ERROR, "SPI_finish failed");
/*
- * Restore work_mem for the remainder of the current transaction. This
- * is another SET LOCAL, so it won't affect the session value, nor any
+ * Restore work_mem for the remainder of the current transaction. This is
+ * another SET LOCAL, so it won't affect the session value, nor any
* tentative value if there is one.
*/
snprintf(workmembuf, sizeof(workmembuf), "%d", old_work_mem);
key->nkeypairs = (argc - RI_FIRST_ATTNAME_ARGNO) / 2;
/*
- * Lookup the attribute numbers of the arguments to the trigger call
- * and fill in the keypairs.
+ * Lookup the attribute numbers of the arguments to the trigger call and
+ * fill in the keypairs.
*/
for (i = 0, j = RI_FIRST_ATTNAME_ARGNO; j < argc; i++, j += 2)
{
!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
+ errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
switch (tgkind)
{
case RI_TRIGTYPE_INSERT:
if (!TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for INSERT", funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for INSERT", funcname)));
break;
case RI_TRIGTYPE_UPDATE:
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for UPDATE", funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for UPDATE", funcname)));
break;
case RI_TRIGTYPE_INUP:
if (!TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) &&
!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for INSERT or UPDATE",
- funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for INSERT or UPDATE",
+ funcname)));
break;
case RI_TRIGTYPE_DELETE:
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for DELETE", funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for DELETE", funcname)));
break;
}
funcname)));
/*
- * Check that tgconstrrelid is known. We need to check here because
- * of ancient pg_dump bug; see notes in CreateTrigger().
+ * Check that tgconstrrelid is known. We need to check here because of
+ * ancient pg_dump bug; see notes in CreateTrigger().
*/
if (!OidIsValid(trigdata->tg_trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigdata->tg_trigger->tgname,
- RelationGetRelationName(trigdata->tg_relation)),
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigdata->tg_trigger->tgname,
+ RelationGetRelationName(trigdata->tg_relation)),
errhint("Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT.")));
}
query_rel = fk_rel;
/*
- * The values for the query are taken from the table on which the
- * trigger is called - it is normally the other one with respect to
- * query_rel. An exception is ri_Check_Pk_Match(), which uses the PK
- * table for both (the case when constrname == NULL)
+ * The values for the query are taken from the table on which the trigger
+ * is called - it is normally the other one with respect to query_rel. An
+ * exception is ri_Check_Pk_Match(), which uses the PK table for both (the
+ * case when constrname == NULL)
*/
if (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK && constrname != NULL)
{
vals, nulls);
if (old_tuple)
ri_ExtractValues(qkey, key_idx, source_rel, old_tuple,
- vals + qkey->nkeypairs, nulls + qkey->nkeypairs);
+ vals + qkey->nkeypairs, nulls + qkey->nkeypairs);
}
else
{
/*
* In READ COMMITTED mode, we just need to use an up-to-date regular
- * snapshot, and we will see all rows that could be interesting.
- * But in SERIALIZABLE mode, we can't change the transaction snapshot.
- * If the caller passes detectNewRows == false then it's okay to do the
- * query with the transaction snapshot; otherwise we use a current
- * snapshot, and tell the executor to error out if it finds any rows under
- * the current snapshot that wouldn't be visible per the transaction
- * snapshot.
+ * snapshot, and we will see all rows that could be interesting. But in
+ * SERIALIZABLE mode, we can't change the transaction snapshot. If the
+ * caller passes detectNewRows == false then it's okay to do the query
+ * with the transaction snapshot; otherwise we use a current snapshot, and
+ * tell the executor to error out if it finds any rows under the current
+ * snapshot that wouldn't be visible per the transaction snapshot.
*/
if (IsXactIsoLevelSerializable && detectNewRows)
{
- CommandCounterIncrement(); /* be sure all my own work is visible */
+ CommandCounterIncrement(); /* be sure all my own work is visible */
test_snapshot = CopySnapshot(GetLatestSnapshot());
crosscheck_snapshot = CopySnapshot(GetTransactionSnapshot());
}
/*
* If this is a select query (e.g., for a 'no action' or 'restrict'
- * trigger), we only need to see if there is a single row in the
- * table, matching the key. Otherwise, limit = 0 - because we want
- * the query to affect ALL the matching rows.
+ * trigger), we only need to see if there is a single row in the table,
+ * matching the key. Otherwise, limit = 0 - because we want the query to
+ * affect ALL the matching rows.
*/
limit = (expect_OK == SPI_OK_SELECT) ? 1 : 0;
/* XXX wouldn't it be clearer to do this part at the caller? */
if (constrname && expect_OK == SPI_OK_SELECT &&
- (SPI_processed == 0) == (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK))
+ (SPI_processed == 0) == (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK))
ri_ReportViolation(qkey, constrname,
pk_rel, fk_rel,
new_tuple ? new_tuple : old_tuple,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't
- * passed by caller, assume the violator tuple came from there.
+ * Determine which relation to complain about. If tupdesc wasn't passed
+ * by caller, assume the violator tuple came from there.
*/
onfk = (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK);
if (onfk)
/*
* Special case - if there are no keys at all, this is a 'no column'
- * constraint - no need to try to extract the values, and the message
- * in this case looks different.
+ * constraint - no need to try to extract the values, and the message in
+ * this case looks different.
*/
if (qkey->nkeypairs == 0)
{
val = "null";
/*
- * Go to "..." if name or value doesn't fit in buffer. We reserve
- * 5 bytes to ensure we can add comma, "...", null.
+ * Go to "..." if name or value doesn't fit in buffer. We reserve 5
+ * bytes to ensure we can add comma, "...", null.
*/
if (strlen(name) >= (key_names + BUFLENGTH - 5) - name_ptr ||
strlen(val) >= (key_values + BUFLENGTH - 5) - val_ptr)
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
RelationGetRelationName(fk_rel), constrname),
- errdetail("Key (%s)=(%s) is not present in table \"%s\".",
- key_names, key_values,
- RelationGetRelationName(pk_rel))));
+ errdetail("Key (%s)=(%s) is not present in table \"%s\".",
+ key_names, key_values,
+ RelationGetRelationName(pk_rel))));
else
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("update or delete on \"%s\" violates foreign key constraint \"%s\" on \"%s\"",
RelationGetRelationName(pk_rel),
constrname, RelationGetRelationName(fk_rel)),
- errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
- key_names, key_values,
- RelationGetRelationName(fk_rel))));
+ errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
+ key_names, key_values,
+ RelationGetRelationName(fk_rel))));
}
/* ----------
key->nkeypairs = (argc - RI_FIRST_ATTNAME_ARGNO) / 2;
/*
- * Lookup the attribute numbers of the arguments to the trigger call
- * and fill in the keypairs.
+ * Lookup the attribute numbers of the arguments to the trigger call and
+ * fill in the keypairs.
*/
for (i = 0, j = RI_FIRST_ATTNAME_ARGNO + RI_KEYPAIR_PK_IDX; j < argc; i++, j += 2)
{
return false;
/*
- * Get the attribute's type OID and call the '=' operator to
- * compare the values.
+ * Get the attribute's type OID and call the '=' operator to compare
+ * the values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[i][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
continue;
/*
- * Get the attributes type OID and call the '=' operator to
- * compare the values.
+ * Get the attributes type OID and call the '=' operator to compare
+ * the values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[i][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
return false;
/*
- * Get the attributes type OID and call the '=' operator to compare
- * the values.
+ * Get the attributes type OID and call the '=' operator to compare the
+ * values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[column][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(typeid))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(typeid))));
/*
* Call the type specific '=' function
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.12 2005/07/10 21:13:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.13 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
char *string = PG_GETARG_CSTRING(0);
Oid tupType = PG_GETARG_OID(1);
+
#ifdef NOT_USED
int32 typmod = PG_GETARG_INT32(2);
#endif
/*
* Use the passed type unless it's RECORD; we can't support input of
- * anonymous types, mainly because there's no good way to figure out
- * which anonymous type is wanted. Note that for RECORD, what we'll
- * probably actually get is RECORD's typelem, ie, zero.
+ * anonymous types, mainly because there's no good way to figure out which
+ * anonymous type is wanted. Note that for RECORD, what we'll probably
+ * actually get is RECORD's typelem, ie, zero.
*/
if (tupType == InvalidOid || tupType == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("input of anonymous composite types is not implemented")));
+ errmsg("input of anonymous composite types is not implemented")));
tupTypmod = -1; /* for all non-anonymous types */
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
/* *ptr must be ')' */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed record literal: \"%s\"", string),
+ errmsg("malformed record literal: \"%s\"", string),
errdetail("Too few columns.")));
}
{
if (*ptr == '\0')
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed record literal: \"%s\"",
- string),
- errdetail("Unexpected end of input.")));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed record literal: \"%s\"",
+ string),
+ errdetail("Unexpected end of input.")));
appendStringInfoChar(&buf, *ptr++);
}
else if (ch == '\"')
values[i] = FunctionCall3(&column_info->proc,
CStringGetDatum(buf.data),
- ObjectIdGetDatum(column_info->typioparam),
- Int32GetDatum(tupdesc->attrs[i]->atttypmod));
+ ObjectIdGetDatum(column_info->typioparam),
+ Int32GetDatum(tupdesc->attrs[i]->atttypmod));
nulls[i] = ' ';
}
tuple = heap_formtuple(tupdesc, values, nulls);
/*
- * We cannot return tuple->t_data because heap_formtuple allocates it
- * as part of a larger chunk, and our caller may expect to be able to
- * pfree our result. So must copy the info into a new palloc chunk.
+ * We cannot return tuple->t_data because heap_formtuple allocates it as
+ * part of a larger chunk, and our caller may expect to be able to pfree
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
Oid tupType = PG_GETARG_OID(1);
+
#ifdef NOT_USED
int32 typmod = PG_GETARG_INT32(2);
#endif
/*
* Use the passed type unless it's RECORD; we can't support input of
- * anonymous types, mainly because there's no good way to figure out
- * which anonymous type is wanted. Note that for RECORD, what we'll
- * probably actually get is RECORD's typelem, ie, zero.
+ * anonymous types, mainly because there's no good way to figure out which
+ * anonymous type is wanted. Note that for RECORD, what we'll probably
+ * actually get is RECORD's typelem, ie, zero.
*/
if (tupType == InvalidOid || tupType == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("input of anonymous composite types is not implemented")));
+ errmsg("input of anonymous composite types is not implemented")));
tupTypmod = -1; /* for all non-anonymous types */
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
{
/*
* Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the input
- * buffer. We assume we can scribble on the input buffer so as
- * to maintain the convention that StringInfos have a trailing
- * null.
+ * StringInfo pointing to the correct portion of the input buffer.
+ * We assume we can scribble on the input buffer so as to maintain
+ * the convention that StringInfos have a trailing null.
*/
StringInfoData item_buf;
char csave;
values[i] = FunctionCall3(&column_info->proc,
PointerGetDatum(&item_buf),
- ObjectIdGetDatum(column_info->typioparam),
- Int32GetDatum(tupdesc->attrs[i]->atttypmod));
+ ObjectIdGetDatum(column_info->typioparam),
+ Int32GetDatum(tupdesc->attrs[i]->atttypmod));
nulls[i] = ' ';
/* Trouble if it didn't eat the whole buffer */
if (item_buf.cursor != itemlen)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("improper binary format in record column %d",
- i + 1)));
+ errmsg("improper binary format in record column %d",
+ i + 1)));
buf->data[buf->cursor] = csave;
}
tuple = heap_formtuple(tupdesc, values, nulls);
/*
- * We cannot return tuple->t_data because heap_formtuple allocates it
- * as part of a larger chunk, and our caller may expect to be able to
- * pfree our result. So must copy the info into a new palloc chunk.
+ * We cannot return tuple->t_data because heap_formtuple allocates it as
+ * part of a larger chunk, and our caller may expect to be able to pfree
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
* back to source text
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.206 2005/10/06 19:51:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.207 2005/10/15 02:49:29 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
static void get_const_expr(Const *constval, deparse_context *context);
static void get_sublink_expr(SubLink *sublink, deparse_context *context);
static void get_from_clause(Query *query, const char *prefix,
- deparse_context *context);
+ deparse_context *context);
static void get_from_clause_item(Node *jtnode, Query *query,
deparse_context *context);
static void get_from_clause_alias(Alias *alias, RangeTblEntry *rte,
- deparse_context *context);
+ deparse_context *context);
static void get_from_clause_coldeflist(List *coldeflist,
deparse_context *context);
static void get_opclass_name(Oid opclass, Oid actual_datatype,
trigrec = (Form_pg_trigger) GETSTRUCT(ht_trig);
/*
- * Start the trigger definition. Note that the trigger's name should
- * never be schema-qualified, but the trigger rel's name may be.
+ * Start the trigger definition. Note that the trigger's name should never
+ * be schema-qualified, but the trigger rel's name may be.
*/
initStringInfo(&buf);
{
if (trigrec->tgconstrrelid != InvalidOid)
appendStringInfo(&buf, "FROM %s ",
- generate_relation_name(trigrec->tgconstrrelid));
+ generate_relation_name(trigrec->tgconstrrelid));
if (!trigrec->tgdeferrable)
appendStringInfo(&buf, "NOT ");
appendStringInfo(&buf, "DEFERRABLE INITIALLY ");
amrec = (Form_pg_am) GETSTRUCT(ht_am);
/*
- * Get the index expressions, if any. (NOTE: we do not use the
- * relcache versions of the expressions and predicate, because we want
- * to display non-const-folded expressions.)
+ * Get the index expressions, if any. (NOTE: we do not use the relcache
+ * versions of the expressions and predicate, because we want to display
+ * non-const-folded expressions.)
*/
if (!heap_attisnull(ht_idx, Anum_pg_index_indexprs))
{
context = deparse_context_for(get_rel_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should
- * never be schema-qualified, but the indexed rel's name may be.
+ * Start the index definition. Note that the index's name should never be
+ * schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
{
/* Need parens if it's not a bare function call */
if (indexkey && IsA(indexkey, FuncExpr) &&
- ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL)
+ ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL)
appendStringInfoString(&buf, str);
else
appendStringInfo(&buf, "(%s)", str);
Oid constraintId = PG_GETARG_OID(0);
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
- false, 0)));
+ false, 0)));
}
Datum
prettyFlags = pretty ? PRETTYFLAG_PAREN | PRETTYFLAG_INDENT : 0;
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
- false, prettyFlags)));
+ false, prettyFlags)));
}
/* Internal version that returns a palloc'd C string */
Form_pg_constraint conForm;
/*
- * Fetch the pg_constraint row. There's no syscache for pg_constraint
- * so we must do it the hard way.
+ * Fetch the pg_constraint row. There's no syscache for pg_constraint so
+ * we must do it the hard way.
*/
conDesc = heap_open(ConstraintRelationId, AccessShareLock);
/* add foreign relation name */
appendStringInfo(&buf, ") REFERENCES %s(",
- generate_relation_name(conForm->confrelid));
+ generate_relation_name(conForm->confrelid));
/* Fetch and build referenced-column list */
val = heap_getattr(tup, Anum_pg_constraint_confkey,
prettyFlags, 0);
/*
- * Now emit the constraint definition. There are cases
- * where the constraint expression will be fully
- * parenthesized and we don't need the outer parens ...
- * but there are other cases where we do need 'em. Be
- * conservative for now.
+ * Now emit the constraint definition. There are cases where
+ * the constraint expression will be fully parenthesized and
+ * we don't need the outer parens ... but there are other
+ * cases where we do need 'em. Be conservative for now.
*
* Note that simply checking for leading '(' and trailing ')'
- * would NOT be good enough, consider "(x > 0) AND (y >
- * 0)".
+ * would NOT be good enough, consider "(x > 0) AND (y > 0)".
*/
appendStringInfo(&buf, "CHECK (%s)", consrc);
/* Get the number of the column */
column = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(columnname)));
+ PointerGetDatum(columnname)));
attnum = get_attnum(tableOid, column);
if (attnum == InvalidAttrNumber)
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
/*
- * We assume any internal dependency of a relation on a column
- * must be what we are looking for.
+ * We assume any internal dependency of a relation on a column must be
+ * what we are looking for.
*/
if (deprec->classid == RelationRelationId &&
deprec->objsubid == 0 &&
if (var->varnoold > 0 && var->varnoold <= rtablelength)
{
RangeTblEntry *varrte = rt_fetch(var->varnoold, rtable);
- AttrNumber varattnum = var->varoattno;
+ AttrNumber varattnum = var->varoattno;
/* need this test in case it's referencing a resjunk col */
if (varattnum <= list_length(varrte->eref->colnames))
appendStringInfo(buf, " TO %s", generate_relation_name(ev_class));
if (ev_attr > 0)
appendStringInfo(buf, ".%s",
- quote_identifier(get_relid_attribute_name(ev_class,
- ev_attr)));
+ quote_identifier(get_relid_attribute_name(ev_class,
+ ev_attr)));
/* If the rule has an event qualification, add it */
if (ev_qual == NULL)
/*
* We need to make a context for recognizing any Vars in the qual
- * (which can only be references to OLD and NEW). Use the rtable
- * of the first query in the action list for this purpose.
+ * (which can only be references to OLD and NEW). Use the rtable of
+ * the first query in the action list for this purpose.
*/
query = (Query *) linitial(actions);
/*
* If the action is INSERT...SELECT, OLD/NEW have been pushed down
- * into the SELECT, and that's what we need to look at. (Ugly
- * kluge ... try to fix this when we redesign querytrees.)
+ * into the SELECT, and that's what we need to look at. (Ugly kluge
+ * ... try to fix this when we redesign querytrees.)
*/
query = getInsertSelectQuery(query, NULL);
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the
- * passed querytree!
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
+ * querytree!
*/
AcquireRewriteLocks(query);
ListCell *l;
/*
- * If the Query node has a setOperations tree, then it's the top level
- * of a UNION/INTERSECT/EXCEPT query; only the ORDER BY and LIMIT
- * fields are interesting in the top query itself.
+ * If the Query node has a setOperations tree, then it's the top level of
+ * a UNION/INTERSECT/EXCEPT query; only the ORDER BY and LIMIT fields are
+ * interesting in the top query itself.
*/
if (query->setOperations)
{
sortcoltype = exprType(sortexpr);
/* See whether operator is default < or > for datatype */
typentry = lookup_type_cache(sortcoltype,
- TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
+ TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
if (srt->sortop == typentry->lt_opr)
/* ASC is default, so emit nothing */ ;
else if (srt->sortop == typentry->gt_opr)
get_rule_expr((Node *) tle->expr, context, true);
/*
- * Figure out what the result column should be called. In the
- * context of a view, use the view's tuple descriptor (so as to
- * pick up the effects of any column RENAME that's been done on
- * the view). Otherwise, just use what we can find in the TLE.
+ * Figure out what the result column should be called. In the context
+ * of a view, use the view's tuple descriptor (so as to pick up the
+ * effects of any column RENAME that's been done on the view).
+ * Otherwise, just use what we can find in the TLE.
*/
if (resultDesc && colno <= resultDesc->natts)
colname = NameStr(resultDesc->attrs[colno - 1]->attname);
SetOperationStmt *op = (SetOperationStmt *) setOp;
/*
- * We force parens whenever nesting two SetOperationStmts. There
- * are some cases in which parens are needed around a leaf query
- * too, but those are more easily handled at the next level down
- * (see code above).
+ * We force parens whenever nesting two SetOperationStmts. There are
+ * some cases in which parens are needed around a leaf query too, but
+ * those are more easily handled at the next level down (see code
+ * above).
*/
need_paren = !IsA(op->larg, RangeTblRef);
List *strippedexprs;
/*
- * If it's an INSERT ... SELECT there will be a single subquery RTE
- * for the SELECT.
+ * If it's an INSERT ... SELECT there will be a single subquery RTE for
+ * the SELECT.
*/
foreach(l, query->rtable)
{
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
- quote_identifier(get_relid_attribute_name(rte->relid,
- tle->resno)));
+ quote_identifier(get_relid_attribute_name(rte->relid,
+ tle->resno)));
/*
- * Print any indirection needed (subfields or subscripts), and
- * strip off the top-level nodes representing the indirection
- * assignments.
+ * Print any indirection needed (subfields or subscripts), and strip
+ * off the top-level nodes representing the indirection assignments.
*/
strippedexprs = lappend(strippedexprs,
processIndirection((Node *) tle->expr,
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
- quote_identifier(get_relid_attribute_name(rte->relid,
- tle->resno)));
+ quote_identifier(get_relid_attribute_name(rte->relid,
+ tle->resno)));
/*
- * Print any indirection needed (subfields or subscripts), and
- * strip off the top-level nodes representing the indirection
- * assignments.
+ * Print any indirection needed (subfields or subscripts), and strip
+ * off the top-level nodes representing the indirection assignments.
*/
expr = processIndirection((Node *) tle->expr, context);
appendContextKeyword(context, "",
0, PRETTYINDENT_STD, 1);
appendStringInfo(buf, "NOTIFY %s",
- quote_qualified_identifier(stmt->relation->schemaname,
- stmt->relation->relname));
+ quote_qualified_identifier(stmt->relation->schemaname,
+ stmt->relation->relname));
}
else
{
if (rte->rtekind == RTE_RELATION)
{
/*
- * It's possible that use of the bare refname would find
- * another more-closely-nested RTE, or be ambiguous, in which
- * case we need to specify the schemaname to avoid these
- * errors.
+ * It's possible that use of the bare refname would find another
+ * more-closely-nested RTE, or be ambiguous, in which case we need
+ * to specify the schemaname to avoid these errors.
*/
if (find_rte_by_refname(rte->eref->aliasname, context) != rte)
*schemaname =
{
/*
* If it's an unnamed join, look at the expansion of the alias
- * variable. If it's a simple reference to one of the input
- * vars then recursively find the name of that var, instead.
- * (This allows correct decompiling of cases where there are
- * identically named columns on both sides of the join.)
- * When it's not a simple reference, we have to just return
- * the unqualified variable name (this can only happen with
- * columns that were merged by USING or NATURAL clauses).
+ * variable. If it's a simple reference to one of the input vars
+ * then recursively find the name of that var, instead. (This
+ * allows correct decompiling of cases where there are identically
+ * named columns on both sides of the join.) When it's not a
+ * simple reference, we have to just return the unqualified
+ * variable name (this can only happen with columns that were
+ * merged by USING or NATURAL clauses).
*/
if (var->varattno > 0)
{
- Var *aliasvar;
+ Var *aliasvar;
aliasvar = (Var *) list_nth(rte->joinaliasvars,
- var->varattno-1);
+ var->varattno - 1);
if (IsA(aliasvar, Var))
{
get_names_for_var(aliasvar,
* Get the name of a field of a Var of type RECORD.
*
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
- * the field name from it. We ereport if we can't determine the name.
+ * the field name from it. We ereport if we can't determine the name.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
*
{
case RTE_RELATION:
case RTE_SPECIAL:
+
/*
* This case should not occur: a column of a table shouldn't have
* type RECORD. Fall through and fail (most likely) at the
{
/*
* Recurse into the sub-select to see what its Var refers
- * to. We have to build an additional level of namespace
+ * to. We have to build an additional level of namespace
* to keep in step with varlevelsup in the subselect.
*/
deparse_namespace mydpns;
/* else fall through to inspect the expression */
break;
case RTE_FUNCTION:
+
/*
- * We couldn't get here unless a function is declared with one
- * of its result columns as RECORD, which is not allowed.
+ * We couldn't get here unless a function is declared with one of
+ * its result columns as RECORD, which is not allowed.
*/
break;
}
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass
- * to lookup_rowtype_tupdesc() which will probably fail, but will
- * give an appropriate error message while failing.
+ * get_expr_result_type() can do anything with it. If not, pass to
+ * lookup_rowtype_tupdesc() which will probably fail, but will give an
+ * appropriate error message while failing.
*/
if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE)
tupleDesc = lookup_rowtype_tupdesc(exprType(expr), exprTypmod(expr));
return false;
/*
- * Operators are same priority --- can skip parens
- * only if we have (a - b) - c, not a - (b - c).
+ * Operators are same priority --- can skip parens only if
+ * we have (a - b) - c, not a - (b - c).
*/
if (node == (Node *) linitial(((OpExpr *) parentNode)->args))
return true;
case T_BoolExpr: /* lower precedence */
case T_ArrayRef: /* other separators */
case T_ArrayExpr: /* other separators */
- case T_RowExpr: /* other separators */
+ case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
- case T_Aggref: /* own parentheses */
+ case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
}
case T_ArrayRef: /* other separators */
case T_ArrayExpr: /* other separators */
- case T_RowExpr: /* other separators */
+ case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
- case T_Aggref: /* own parentheses */
+ case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
/*
* Each level of get_rule_expr must emit an indivisible term
- * (parenthesized if necessary) to ensure result is reparsed into the
- * same expression tree. The only exception is that when the input
- * is a List, we emit the component items comma-separated with no
- * surrounding decoration; this is convenient for most callers.
+ * (parenthesized if necessary) to ensure result is reparsed into the same
+ * expression tree. The only exception is that when the input is a List,
+ * we emit the component items comma-separated with no surrounding
+ * decoration; this is convenient for most callers.
*
* There might be some work left here to support additional node types.
*/
/*
* Parenthesize the argument unless it's a simple Var or a
- * FieldSelect. (In particular, if it's another ArrayRef,
- * we *must* parenthesize to avoid confusion.)
+ * FieldSelect. (In particular, if it's another ArrayRef, we
+ * *must* parenthesize to avoid confusion.)
*/
need_parens = !IsA(aref->refexpr, Var) &&
!IsA(aref->refexpr, FieldSelect);
appendStringInfo(buf, " %s %s (",
generate_operator_name(expr->opno,
exprType(arg1),
- get_element_type(exprType(arg2))),
+ get_element_type(exprType(arg2))),
expr->useOr ? "ANY" : "ALL");
get_rule_expr_paren(arg2, context, true, node);
appendStringInfoChar(buf, ')');
case T_SubPlan:
{
/*
- * We cannot see an already-planned subplan in rule
- * deparsing, only while EXPLAINing a query plan. For now,
- * just punt.
+ * We cannot see an already-planned subplan in rule deparsing,
+ * only while EXPLAINing a query plan. For now, just punt.
*/
if (((SubPlan *) node)->useHashTable)
appendStringInfo(buf, "(hashed subplan)");
/*
* Parenthesize the argument unless it's an ArrayRef or
- * another FieldSelect. Note in particular that it would
- * be WRONG to not parenthesize a Var argument; simplicity
- * is not the issue here, having the right number of names
- * is.
+ * another FieldSelect. Note in particular that it would be
+ * WRONG to not parenthesize a Var argument; simplicity is not
+ * the issue here, having the right number of names is.
*/
- need_parens = !IsA(arg, ArrayRef) && !IsA(arg, FieldSelect);
+ need_parens = !IsA(arg, ArrayRef) &&!IsA(arg, FieldSelect);
if (need_parens)
appendStringInfoChar(buf, '(');
get_rule_expr(arg, context, true);
/*
* If it's a Var of type RECORD, we have to find what the Var
- * refers to; otherwise we can use get_expr_result_type.
- * If that fails, we try lookup_rowtype_tupdesc, which will
+ * refers to; otherwise we can use get_expr_result_type. If
+ * that fails, we try lookup_rowtype_tupdesc, which will
* probably fail too, but will ereport an acceptable message.
*/
if (IsA(arg, Var) &&
case T_FieldStore:
/*
- * We shouldn't see FieldStore here; it should have been
- * stripped off by processIndirection().
+ * We shouldn't see FieldStore here; it should have been stripped
+ * off by processIndirection().
*/
elog(ERROR, "unexpected FieldStore");
break;
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(relabel->resulttype,
- relabel->resulttypmod));
+ format_type_with_typemod(relabel->resulttype,
+ relabel->resulttypmod));
}
}
break;
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(convert->resulttype, -1));
+ format_type_with_typemod(convert->resulttype, -1));
}
}
break;
char *sep;
/*
- * If it's a named type and not RECORD, we may have to
- * skip dropped columns and/or claim there are NULLs for
- * added columns.
+ * If it's a named type and not RECORD, we may have to skip
+ * dropped columns and/or claim there are NULLs for added
+ * columns.
*/
if (rowexpr->row_typeid != RECORDOID)
{
}
/*
- * SQL99 allows "ROW" to be omitted when there is more
- * than one column, but for simplicity we always print it.
+ * SQL99 allows "ROW" to be omitted when there is more than
+ * one column, but for simplicity we always print it.
*/
appendStringInfo(buf, "ROW(");
sep = "";
appendStringInfo(buf, ")");
if (rowexpr->row_format == COERCE_EXPLICIT_CAST)
appendStringInfo(buf, "::%s",
- format_type_with_typemod(rowexpr->row_typeid, -1));
+ format_type_with_typemod(rowexpr->row_typeid, -1));
}
break;
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(ctest->resulttype,
- ctest->resulttypmod));
+ format_type_with_typemod(ctest->resulttype,
+ ctest->resulttypmod));
}
}
break;
ListCell *l;
/*
- * If the function call came from an implicit coercion, then just show
- * the first argument --- unless caller wants to see implicit
- * coercions.
+ * If the function call came from an implicit coercion, then just show the
+ * first argument --- unless caller wants to see implicit coercions.
*/
if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit)
{
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(rettype, coercedTypmod));
+ format_type_with_typemod(rettype, coercedTypmod));
return;
}
/*
- * Normal function: display as proname(args). First we need to
- * extract the argument datatypes.
+ * Normal function: display as proname(args). First we need to extract
+ * the argument datatypes.
*/
nargs = 0;
foreach(l, expr->args)
Oid argtype = exprType((Node *) aggref->target);
appendStringInfo(buf, "%s(%s",
- generate_function_name(aggref->aggfnoid, 1, &argtype),
+ generate_function_name(aggref->aggfnoid, 1, &argtype),
aggref->aggdistinct ? "DISTINCT " : "");
if (aggref->aggstar)
appendStringInfo(buf, "*");
if (constval->constisnull)
{
/*
- * Always label the type of a NULL constant to prevent
- * misdecisions about type when reparsing.
+ * Always label the type of a NULL constant to prevent misdecisions
+ * about type when reparsing.
*/
appendStringInfo(buf, "NULL::%s",
- format_type_with_typemod(constval->consttype, -1));
+ format_type_with_typemod(constval->consttype, -1));
return;
}
case NUMERICOID:
{
/*
- * These types are printed without quotes unless they
- * contain values that aren't accepted by the scanner
- * unquoted (e.g., 'NaN'). Note that strtod() and friends
- * might accept NaN, so we can't use that to test.
+ * These types are printed without quotes unless they contain
+ * values that aren't accepted by the scanner unquoted (e.g.,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
+ * so we can't use that to test.
*
- * In reality we only need to defend against infinity and
- * NaN, so we need not get too crazy about pattern
- * matching here.
+ * In reality we only need to defend against infinity and NaN, so
+ * we need not get too crazy about pattern matching here.
*/
if (strspn(extval, "0123456789+-eE.") == strlen(extval))
{
break;
default:
+
/*
* We must quote any funny characters in the constant's
* representation. XXX Any MULTIBYTE considerations here?
*/
for (valptr = extval; *valptr; valptr++)
if (*valptr == '\\' ||
- (unsigned char)*valptr < (unsigned char)' ')
+ (unsigned char) *valptr < (unsigned char) ' ')
{
appendStringInfoChar(buf, ESCAPE_STRING_SYNTAX);
break;
appendStringInfoChar(buf, ch);
appendStringInfoChar(buf, ch);
}
- else if ((unsigned char)ch < (unsigned char)' ')
+ else if ((unsigned char) ch < (unsigned char) ' ')
appendStringInfo(buf, "\\%03o", (int) ch);
else
appendStringInfoChar(buf, ch);
pfree(extval);
/*
- * Append ::typename unless the constant will be implicitly typed as
- * the right type when it is read in. XXX this code has to be kept in
- * sync with the behavior of the parser, especially make_const.
+ * Append ::typename unless the constant will be implicitly typed as the
+ * right type when it is read in. XXX this code has to be kept in sync
+ * with the behavior of the parser, especially make_const.
*/
switch (constval->consttype)
{
}
if (needlabel)
appendStringInfo(buf, "::%s",
- format_type_with_typemod(constval->consttype, -1));
+ format_type_with_typemod(constval->consttype, -1));
}
need_paren = true;
/*
- * XXX we regurgitate the originally given operator name, with or
- * without schema qualification. This is not necessarily 100% right
- * but it's the best we can do, since the operators actually used
- * might not all be in the same schema.
+ * XXX we regurgitate the originally given operator name, with or without
+ * schema qualification. This is not necessarily 100% right but it's the
+ * best we can do, since the operators actually used might not all be in
+ * the same schema.
*/
switch (sublink->subLinkType)
{
ListCell *l;
/*
- * We use the query's jointree as a guide to what to print. However,
- * we must ignore auto-added RTEs that are marked not inFromCl. (These
- * can only appear at the top level of the jointree, so it's
- * sufficient to check here.) This check also ensures we ignore
- * the rule pseudo-RTEs for NEW and OLD.
+ * We use the query's jointree as a guide to what to print. However, we
+ * must ignore auto-added RTEs that are marked not inFromCl. (These can
+ * only appear at the top level of the jointree, so it's sufficient to
+ * check here.) This check also ensures we ignore the rule pseudo-RTEs
+ * for NEW and OLD.
*/
foreach(l, query->jointree->fromlist)
{
strcmp(rte->eref->aliasname, get_rel_name(rte->relid)) != 0)
{
/*
- * Apparently the rel has been renamed since the rule was
- * made. Emit a fake alias clause so that variable references
- * will still work. This is not a 100% solution but should
- * work in most reasonable situations.
+ * Apparently the rel has been renamed since the rule was made.
+ * Emit a fake alias clause so that variable references will still
+ * work. This is not a 100% solution but should work in most
+ * reasonable situations.
*/
appendStringInfo(buf, " %s",
quote_identifier(rte->eref->aliasname));
else if (rte->rtekind == RTE_FUNCTION)
{
/*
- * For a function RTE, always give an alias.
- * This covers possible renaming of the function and/or
- * instability of the FigureColname rules for things that
- * aren't simple functions.
+ * For a function RTE, always give an alias. This covers possible
+ * renaming of the function and/or instability of the
+ * FigureColname rules for things that aren't simple functions.
*/
appendStringInfo(buf, " %s",
quote_identifier(rte->eref->aliasname));
need_paren_on_right = PRETTY_PAREN(context) &&
!IsA(j->rarg, RangeTblRef) &&
- !(IsA(j->rarg, JoinExpr) && ((JoinExpr*) j->rarg)->alias != NULL);
+ !(IsA(j->rarg, JoinExpr) &&((JoinExpr *) j->rarg)->alias != NULL);
if (!PRETTY_PAREN(context) || j->alias != NULL)
appendStringInfoChar(buf, '(');
if (col != list_head(j->using))
appendStringInfo(buf, ", ");
appendStringInfoString(buf,
- quote_identifier(strVal(lfirst(col))));
+ quote_identifier(strVal(lfirst(col))));
}
appendStringInfoChar(buf, ')');
}
opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc);
/*
- * Special case for ARRAY_OPS: pretend it is default for any array
- * type
+ * Special case for ARRAY_OPS: pretend it is default for any array type
*/
if (OidIsValid(actual_datatype))
{
format_type_be(fstore->resulttype));
/*
- * Get the field name. Note we assume here that there's only
- * one field being assigned to. This is okay in stored rules
- * but could be wrong in executable target lists. Presently
- * no problem since explain.c doesn't print plan targetlists,
- * but someday may have to think of something ...
+ * Get the field name. Note we assume here that there's only one
+ * field being assigned to. This is okay in stored rules but
+ * could be wrong in executable target lists. Presently no
+ * problem since explain.c doesn't print plan targetlists, but
+ * someday may have to think of something ...
*/
fieldname = get_relid_attribute_name(typrelid,
- linitial_int(fstore->fieldnums));
+ linitial_int(fstore->fieldnums));
appendStringInfo(buf, ".%s", quote_identifier(fieldname));
/*
- * We ignore arg since it should be an uninteresting reference
- * to the target column or subcolumn.
+ * We ignore arg since it should be an uninteresting reference to
+ * the target column or subcolumn.
*/
node = (Node *) linitial(fstore->newvals);
}
printSubscripts(aref, context);
/*
- * We ignore refexpr since it should be an uninteresting
- * reference to the target column or subcolumn.
+ * We ignore refexpr since it should be an uninteresting reference
+ * to the target column or subcolumn.
*/
node = (Node *) aref->refassgnexpr;
}
quote_identifier(const char *ident)
{
/*
- * Can avoid quoting if ident starts with a lowercase letter or
- * underscore and contains only lowercase letters, digits, and
- * underscores, *and* is not any SQL keyword. Otherwise, supply
- * quotes.
+ * Can avoid quoting if ident starts with a lowercase letter or underscore
+ * and contains only lowercase letters, digits, and underscores, *and* is
+ * not any SQL keyword. Otherwise, supply quotes.
*/
int nquotes = 0;
bool safe;
char *optr;
/*
- * would like to use macros here, but they might yield
- * unwanted locale-specific results...
+ * would like to use macros here, but they might yield unwanted
+ * locale-specific results...
*/
safe = ((ident[0] >= 'a' && ident[0] <= 'z') || ident[0] == '_');
if (safe)
{
/*
- * Check for keyword. This test is overly strong, since many of
- * the "keywords" known to the parser are usable as column names,
- * but the parser doesn't provide any easy way to test for whether
- * an identifier is safe or not... so be safe not sorry.
+ * Check for keyword. This test is overly strong, since many of the
+ * "keywords" known to the parser are usable as column names, but the
+ * parser doesn't provide any easy way to test for whether an
+ * identifier is safe or not... so be safe not sorry.
*
- * Note: ScanKeywordLookup() does case-insensitive comparison, but
- * that's fine, since we already know we have all-lower-case.
+ * Note: ScanKeywordLookup() does case-insensitive comparison, but that's
+ * fine, since we already know we have all-lower-case.
*/
if (ScanKeywordLookup(ident) != NULL)
safe = false;
/*
* The idea here is to schema-qualify only if the parser would fail to
- * resolve the correct function given the unqualified func name with
- * the specified argtypes.
+ * resolve the correct function given the unqualified func name with the
+ * specified argtypes.
*/
p_result = func_get_detail(list_make1(makeString(proname)),
NIL, nargs, argtypes,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.190 2005/10/11 17:27:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.191 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
double selec;
/*
- * If expression is not variable = something or something = variable,
- * then punt and return a default estimate.
+ * If expression is not variable = something or something = variable, then
+ * punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
int i;
/*
- * Is the constant "=" to any of the column's most common
- * values? (Although the given operator may not really be
- * "=", we will assume that seeing whether it returns TRUE is
- * an appropriate test. If you don't like this, maybe you
- * shouldn't be using eqsel for your operator...)
+ * Is the constant "=" to any of the column's most common values?
+ * (Although the given operator may not really be "=", we will
+ * assume that seeing whether it returns TRUE is an appropriate
+ * test. If you don't like this, maybe you shouldn't be using
+ * eqsel for your operator...)
*/
if (get_attstatsslot(vardata.statsTuple,
vardata.atttype, vardata.atttypmod,
if (match)
{
/*
- * Constant is "=" to this common value. We know
- * selectivity exactly (or as exactly as VACUUM could
- * calculate it, anyway).
+ * Constant is "=" to this common value. We know selectivity
+ * exactly (or as exactly as VACUUM could calculate it,
+ * anyway).
*/
selec = numbers[i];
}
else
{
/*
- * Comparison is against a constant that is neither NULL
- * nor any of the common values. Its selectivity cannot
- * be more than this:
+ * Comparison is against a constant that is neither NULL nor
+ * any of the common values. Its selectivity cannot be more
+ * than this:
*/
double sumcommon = 0.0;
double otherdistinct;
CLAMP_PROBABILITY(selec);
/*
- * and in fact it's probably a good deal less. We
- * approximate that all the not-common values share this
- * remaining fraction equally, so we divide by the number
- * of other distinct values.
+ * and in fact it's probably a good deal less. We approximate
+ * that all the not-common values share this remaining
+ * fraction equally, so we divide by the number of other
+ * distinct values.
*/
otherdistinct = get_variable_numdistinct(&vardata)
- nnumbers;
selec /= otherdistinct;
/*
- * Another cross-check: selectivity shouldn't be estimated
- * as more than the least common "most common value".
+ * Another cross-check: selectivity shouldn't be estimated as
+ * more than the least common "most common value".
*/
if (nnumbers > 0 && selec > numbers[nnumbers - 1])
selec = numbers[nnumbers - 1];
double ndistinct;
/*
- * Search is for a value that we do not know a priori, but we
- * will assume it is not NULL. Estimate the selectivity as
- * non-null fraction divided by number of distinct values, so
- * that we get a result averaged over all possible values
- * whether common or uncommon. (Essentially, we are assuming
- * that the not-yet-known comparison value is equally likely
- * to be any of the possible values, regardless of their
- * frequency in the table. Is that a good idea?)
+ * Search is for a value that we do not know a priori, but we will
+ * assume it is not NULL. Estimate the selectivity as non-null
+ * fraction divided by number of distinct values, so that we get a
+ * result averaged over all possible values whether common or
+ * uncommon. (Essentially, we are assuming that the not-yet-known
+ * comparison value is equally likely to be any of the possible
+ * values, regardless of their frequency in the table. Is that a
+ * good idea?)
*/
selec = 1.0 - stats->stanullfrac;
ndistinct = get_variable_numdistinct(&vardata);
selec /= ndistinct;
/*
- * Cross-check: selectivity should never be estimated as more
- * than the most common value's.
+ * Cross-check: selectivity should never be estimated as more than
+ * the most common value's.
*/
if (get_attstatsslot(vardata.statsTuple,
vardata.atttype, vardata.atttypmod,
else
{
/*
- * No VACUUM ANALYZE stats available, so make a guess using
- * estimated number of distinct values and assuming they are
- * equally common. (The guess is unlikely to be very good, but we
- * do know a few special cases.)
+ * No VACUUM ANALYZE stats available, so make a guess using estimated
+ * number of distinct values and assuming they are equally common.
+ * (The guess is unlikely to be very good, but we do know a few
+ * special cases.)
*/
selec = 1.0 / get_variable_numdistinct(&vardata);
}
float8 result;
/*
- * We want 1 - eqsel() where the equality operator is the one
- * associated with this != operator, that is, its negator.
+ * We want 1 - eqsel() where the equality operator is the one associated
+ * with this != operator, that is, its negator.
*/
eqop = get_negator(operator);
if (eqop)
{
result = DatumGetFloat8(DirectFunctionCall4(eqsel,
PointerGetDatum(root),
- ObjectIdGetDatum(eqop),
+ ObjectIdGetDatum(eqop),
PointerGetDatum(args),
- Int32GetDatum(varRelid)));
+ Int32GetDatum(varRelid)));
}
else
{
/*
* If we have most-common-values info, add up the fractions of the MCV
- * entries that satisfy MCV OP CONST. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
- * represented by MCV entries.
+ * entries that satisfy MCV OP CONST. These fractions contribute directly
+ * to the result selectivity. Also add up the total fraction represented
+ * by MCV entries.
*/
mcv_selec = 0.0;
sumcommon = 0.0;
}
/*
- * If there is a histogram, determine which bin the constant falls in,
- * and compute the resulting contribution to selectivity.
+ * If there is a histogram, determine which bin the constant falls in, and
+ * compute the resulting contribution to selectivity.
*
* Someday, VACUUM might store more than one histogram per rel/att,
- * corresponding to more than one possible sort ordering defined for
- * the column type. However, to make that work we will need to figure
- * out which staop to search for --- it's not necessarily the one we
- * have at hand! (For example, we might have a '<=' operator rather
- * than the '<' operator that will appear in staop.) For now, assume
- * that whatever appears in pg_statistic is sorted the same way our
- * operator sorts, or the reverse way if isgt is TRUE.
+ * corresponding to more than one possible sort ordering defined for the
+ * column type. However, to make that work we will need to figure out
+ * which staop to search for --- it's not necessarily the one we have at
+ * hand! (For example, we might have a '<=' operator rather than the '<'
+ * operator that will appear in staop.) For now, assume that whatever
+ * appears in pg_statistic is sorted the same way our operator sorts, or
+ * the reverse way if isgt is TRUE.
*/
hist_selec = 0.0;
else
{
/*
- * Scan to find proper location. This could be made
- * faster by using a binary-search method, but it's
- * probably not worth the trouble for typical histogram
- * sizes.
+ * Scan to find proper location. This could be made faster by
+ * using a binary-search method, but it's probably not worth
+ * the trouble for typical histogram sizes.
*/
for (i = 1; i < nvalues; i++)
{
* We have values[i-1] < constant < values[i].
*
* Convert the constant and the two nearest bin boundary
- * values to a uniform comparison scale, and do a
- * linear interpolation within this bin.
+ * values to a uniform comparison scale, and do a linear
+ * interpolation within this bin.
*/
if (convert_to_scalar(constval, consttype, &val,
values[i - 1], values[i],
binfrac = (val - low) / (high - low);
/*
- * Watch out for the possibility that we got a
- * NaN or Infinity from the division. This
- * can happen despite the previous checks, if
- * for example "low" is -Infinity.
+ * Watch out for the possibility that we got a NaN
+ * or Infinity from the division. This can happen
+ * despite the previous checks, if for example
+ * "low" is -Infinity.
*/
if (isnan(binfrac) ||
binfrac < 0.0 || binfrac > 1.0)
else
{
/*
- * Ideally we'd produce an error here, on the
- * grounds that the given operator shouldn't have
- * scalarXXsel registered as its selectivity func
- * unless we can deal with its operand types. But
- * currently, all manner of stuff is invoking
- * scalarXXsel, so give a default estimate until
- * that can be fixed.
+ * Ideally we'd produce an error here, on the grounds
+ * that the given operator shouldn't have scalarXXsel
+ * registered as its selectivity func unless we can
+ * deal with its operand types. But currently, all
+ * manner of stuff is invoking scalarXXsel, so give a
+ * default estimate until that can be fixed.
*/
binfrac = 0.5;
}
/*
- * Now, compute the overall selectivity across the
- * values represented by the histogram. We have i-1
- * full bins and binfrac partial bin below the
- * constant.
+ * Now, compute the overall selectivity across the values
+ * represented by the histogram. We have i-1 full bins
+ * and binfrac partial bin below the constant.
*/
histfrac = (double) (i - 1) + binfrac;
histfrac /= (double) (nvalues - 1);
hist_selec = isgt ? (1.0 - histfrac) : histfrac;
/*
- * The histogram boundaries are only approximate to begin
- * with, and may well be out of date anyway. Therefore, don't
- * believe extremely small or large selectivity estimates.
+ * The histogram boundaries are only approximate to begin with,
+ * and may well be out of date anyway. Therefore, don't believe
+ * extremely small or large selectivity estimates.
*/
if (hist_selec < 0.0001)
hist_selec = 0.0001;
/*
* Now merge the results from the MCV and histogram calculations,
- * realizing that the histogram covers only the non-null values that
- * are not listed in MCV.
+ * realizing that the histogram covers only the non-null values that are
+ * not listed in MCV.
*/
selec = 1.0 - stats->stanullfrac - sumcommon;
double selec;
/*
- * If expression is not variable op something or something op
- * variable, then punt and return a default estimate.
+ * If expression is not variable op something or something op variable,
+ * then punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
/*
- * Can't do anything useful if the something is not a constant,
- * either.
+ * Can't do anything useful if the something is not a constant, either.
*/
if (!IsA(other, Const))
{
}
/*
- * If the constant is NULL, assume operator is strict and return zero,
- * ie, operator will never return TRUE.
+ * If the constant is NULL, assume operator is strict and return zero, ie,
+ * operator will never return TRUE.
*/
if (((Const *) other)->constisnull)
{
double selec;
/*
- * If expression is not variable op something or something op
- * variable, then punt and return a default estimate.
+ * If expression is not variable op something or something op variable,
+ * then punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
/*
- * Can't do anything useful if the something is not a constant,
- * either.
+ * Can't do anything useful if the something is not a constant, either.
*/
if (!IsA(other, Const))
{
}
/*
- * If the constant is NULL, assume operator is strict and return zero,
- * ie, operator will never return TRUE.
+ * If the constant is NULL, assume operator is strict and return zero, ie,
+ * operator will never return TRUE.
*/
if (((Const *) other)->constisnull)
{
variable = (Node *) linitial(args);
/*
- * If the constant is NULL, assume operator is strict and return zero,
- * ie, operator will never return TRUE.
+ * If the constant is NULL, assume operator is strict and return zero, ie,
+ * operator will never return TRUE.
*/
if (((Const *) other)->constisnull)
{
consttype = ((Const *) other)->consttype;
/*
- * The right-hand const is type text or bytea for all supported
- * operators. We do not expect to see binary-compatible types here,
- * since const-folding should have relabeled the const to exactly
- * match the operator's declared type.
+ * The right-hand const is type text or bytea for all supported operators.
+ * We do not expect to see binary-compatible types here, since
+ * const-folding should have relabeled the const to exactly match the
+ * operator's declared type.
*/
if (consttype != TEXTOID && consttype != BYTEAOID)
{
}
/*
- * Similarly, the exposed type of the left-hand side should be one
- * of those we know. (Do not look at vardata.atttype, which might be
- * something binary-compatible but different.) We can use it to choose
+ * Similarly, the exposed type of the left-hand side should be one of
+ * those we know. (Do not look at vardata.atttype, which might be
+ * something binary-compatible but different.) We can use it to choose
* the index opclass from which we must draw the comparison operators.
*
* NOTE: It would be more correct to use the PATTERN opclasses than the
- * simple ones, but at the moment ANALYZE will not generate statistics
- * for the PATTERN operators. But our results are so approximate
- * anyway that it probably hardly matters.
+ * simple ones, but at the moment ANALYZE will not generate statistics for
+ * the PATTERN operators. But our results are so approximate anyway that
+ * it probably hardly matters.
*/
vartype = vardata.vartype;
pstatus = pattern_fixed_prefix(patt, ptype, &prefix, &rest);
/*
- * If necessary, coerce the prefix constant to the right type. (The
- * "rest" constant need not be changed.)
+ * If necessary, coerce the prefix constant to the right type. (The "rest"
+ * constant need not be changed.)
*/
if (prefix && prefix->consttype != vartype)
{
{
case TEXTOID:
prefixstr = DatumGetCString(DirectFunctionCall1(textout,
- prefix->constvalue));
+ prefix->constvalue));
break;
case BYTEAOID:
prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
- prefix->constvalue));
+ prefix->constvalue));
break;
default:
elog(ERROR, "unrecognized consttype: %u",
eqargs = list_make2(variable, prefix);
result = DatumGetFloat8(DirectFunctionCall4(eqsel,
PointerGetDatum(root),
- ObjectIdGetDatum(eqopr),
- PointerGetDatum(eqargs),
- Int32GetDatum(varRelid)));
+ ObjectIdGetDatum(eqopr),
+ PointerGetDatum(eqargs),
+ Int32GetDatum(varRelid)));
}
else
{
/*
* Not exact-match pattern. We estimate selectivity of the fixed
- * prefix and remainder of pattern separately, then combine the
- * two.
+ * prefix and remainder of pattern separately, then combine the two.
*/
Selectivity prefixsel;
Selectivity restsel;
freq_true = 1.0 - numbers[0] - freq_null;
/*
- * Next derive frequency for false. Then use these as
- * appropriate to derive frequency for each case.
+ * Next derive frequency for false. Then use these as appropriate
+ * to derive frequency for each case.
*/
freq_false = 1.0 - freq_true - freq_null;
else
{
/*
- * No most-common-value info available. Still have null
- * fraction information, so use it for IS [NOT] UNKNOWN.
- * Otherwise adjust for null fraction and assume an even split
- * for boolean tests.
+ * No most-common-value info available. Still have null fraction
+ * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
+ * for null fraction and assume an even split for boolean tests.
*/
switch (booltesttype)
{
case IS_NOT_UNKNOWN:
/*
- * Select not unknown (not null) values. Calculate
- * from freq_null.
+ * Select not unknown (not null) values. Calculate from
+ * freq_null.
*/
selec = 1.0 - freq_null;
break;
/*
* If we can't get variable statistics for the argument, perhaps
* clause_selectivity can do something with it. We ignore the
- * possibility of a NULL value when using clause_selectivity, and
- * just assume the value is either TRUE or FALSE.
+ * possibility of a NULL value when using clause_selectivity, and just
+ * assume the value is either TRUE or FALSE.
*/
switch (booltesttype)
{
case IS_FALSE:
case IS_NOT_TRUE:
selec = 1.0 - (double) clause_selectivity(root, arg,
- varRelid, jointype);
+ varRelid, jointype);
break;
default:
elog(ERROR, "unrecognized booltesttype: %d",
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run
- * through the lists to see which MCVs actually join to each other
- * with the given operator. This allows us to determine the exact
- * join selectivity for the portion of the relations represented
- * by the MCV lists. We still have to estimate for the remaining
- * population, but in a skewed distribution this gives us a big
- * leg up in accuracy. For motivation see the analysis in Y.
- * Ioannidis and S. Christodoulakis, "On the propagation of errors
- * in the size of join results", Technical Report 1018, Computer
- * Science Dept., University of Wisconsin, Madison, March 1991
- * (available from ftp.cs.wisc.edu).
+ * We have most-common-value lists for both relations. Run through
+ * the lists to see which MCVs actually join to each other with the
+ * given operator. This allows us to determine the exact join
+ * selectivity for the portion of the relations represented by the MCV
+ * lists. We still have to estimate for the remaining population, but
+ * in a skewed distribution this gives us a big leg up in accuracy.
+ * For motivation see the analysis in Y. Ioannidis and S.
+ * Christodoulakis, "On the propagation of errors in the size of join
+ * results", Technical Report 1018, Computer Science Dept., University
+ * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
*/
FmgrInfo eqproc;
bool *hasmatch1;
hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
/*
- * If we are doing any variant of JOIN_IN, pretend all the values
- * of the righthand relation are unique (ie, act as if it's been
+ * If we are doing any variant of JOIN_IN, pretend all the values of
+ * the righthand relation are unique (ie, act as if it's been
* DISTINCT'd).
*
- * NOTE: it might seem that we should unique-ify the lefthand input
- * when considering JOIN_REVERSE_IN. But this is not so, because
- * the join clause we've been handed has not been commuted from
- * the way the parser originally wrote it. We know that the
- * unique side of the IN clause is *always* on the right.
+ * NOTE: it might seem that we should unique-ify the lefthand input when
+ * considering JOIN_REVERSE_IN. But this is not so, because the join
+ * clause we've been handed has not been commuted from the way the
+ * parser originally wrote it. We know that the unique side of the IN
+ * clause is *always* on the right.
*
* NOTE: it would be dangerous to try to be smart about JOIN_LEFT or
* JOIN_RIGHT here, because we do not have enough information to
- * determine which var is really on which side of the join.
- * Perhaps someday we should pass in more information.
+ * determine which var is really on which side of the join. Perhaps
+ * someday we should pass in more information.
*/
if (jointype == JOIN_IN ||
jointype == JOIN_REVERSE_IN ||
}
/*
- * Note we assume that each MCV will match at most one member of
- * the other MCV list. If the operator isn't really equality,
- * there could be multiple matches --- but we don't look for them,
- * both for speed and because the math wouldn't add up...
+ * Note we assume that each MCV will match at most one member of the
+ * other MCV list. If the operator isn't really equality, there could
+ * be multiple matches --- but we don't look for them, both for speed
+ * and because the math wouldn't add up...
*/
matchprodfreq = 0.0;
nmatches = 0;
pfree(hasmatch2);
/*
- * Compute total frequency of non-null values that are not in the
- * MCV lists.
+ * Compute total frequency of non-null values that are not in the MCV
+ * lists.
*/
otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
/*
* We can estimate the total selectivity from the point of view of
* relation 1 as: the known selectivity for matched MCVs, plus
- * unmatched MCVs that are assumed to match against random members
- * of relation 2's non-MCV population, plus non-MCV values that
- * are assumed to match against random members of relation 2's
- * unmatched MCVs plus non-MCV values.
+ * unmatched MCVs that are assumed to match against random members of
+ * relation 2's non-MCV population, plus non-MCV values that are
+ * assumed to match against random members of relation 2's unmatched
+ * MCVs plus non-MCV values.
*/
totalsel1 = matchprodfreq;
if (nd2 > nvalues2)
/*
* Use the smaller of the two estimates. This can be justified in
- * essentially the same terms as given below for the no-stats
- * case: to a first approximation, we are estimating from the
- * point of view of the relation with smaller nd.
+ * essentially the same terms as given below for the no-stats case: to
+ * a first approximation, we are estimating from the point of view of
+ * the relation with smaller nd.
*/
selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
}
{
/*
* We do not have MCV lists for both sides. Estimate the join
- * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2).
- * This is plausible if we assume that the join operator is strict
- * and the non-null values are about equally distributed: a given
- * non-null tuple of rel1 will join to either zero or
- * N2*(1-nullfrac2)/nd2 rows of rel2, so total join rows are at
- * most N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join
- * selectivity of not more than (1-nullfrac1)*(1-nullfrac2)/nd2.
- * By the same logic it is not more than
- * (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression with MIN()
- * is an upper bound. Using the MIN() means we estimate from the
- * point of view of the relation with smaller nd (since the larger
- * nd is determining the MIN). It is reasonable to assume that
- * most tuples in this rel will have join partners, so the bound
- * is probably reasonably tight and should be taken as-is.
+ * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
+ * is plausible if we assume that the join operator is strict and the
+ * non-null values are about equally distributed: a given non-null
+ * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
+ * of rel2, so total join rows are at most
+ * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
+ * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
+ * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
+ * with MIN() is an upper bound. Using the MIN() means we estimate
+ * from the point of view of the relation with smaller nd (since the
+ * larger nd is determining the MIN). It is reasonable to assume that
+ * most tuples in this rel will have join partners, so the bound is
+ * probably reasonably tight and should be taken as-is.
*
* XXX Can we be smarter if we have an MCV list for just one side? It
- * seems that if we assume equal distribution for the other side,
- * we end up with the same answer anyway.
+ * seems that if we assume equal distribution for the other side, we
+ * end up with the same answer anyway.
*/
double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
{
result = DatumGetFloat8(DirectFunctionCall4(eqjoinsel,
PointerGetDatum(root),
- ObjectIdGetDatum(eqop),
+ ObjectIdGetDatum(eqop),
PointerGetDatum(args),
- Int16GetDatum(jointype)));
+ Int16GetDatum(jointype)));
}
else
{
*rightscan = selec;
/*
- * Only one of the two fractions can really be less than 1.0; believe
- * the smaller estimate and reset the other one to exactly 1.0. If we
- * get exactly equal estimates (as can easily happen with self-joins),
- * believe neither.
+ * Only one of the two fractions can really be less than 1.0; believe the
+ * smaller estimate and reset the other one to exactly 1.0. If we get
+ * exactly equal estimates (as can easily happen with self-joins), believe
+ * neither.
*/
if (*leftscan > *rightscan)
*leftscan = 1.0;
*/
typedef struct
{
- Node *var; /* might be an expression, not just a Var */
- RelOptInfo *rel; /* relation it belongs to */
- double ndistinct; /* # distinct values */
+ Node *var; /* might be an expression, not just a Var */
+ RelOptInfo *rel; /* relation it belongs to */
+ double ndistinct; /* # distinct values */
} GroupVarInfo;
static List *
/*
* If we find any variable-free GROUP BY item, then either it is a
- * constant (and we can ignore it) or it contains a volatile
- * function; in the latter case we punt and assume that each input
- * row will yield a distinct group.
+ * constant (and we can ignore it) or it contains a volatile function;
+ * in the latter case we punt and assume that each input row will
+ * yield a distinct group.
*/
if (varshere == NIL)
{
* Steps 3/4: group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove
- * these Vars from the newvarinfos list for the next iteration. This
- * is the easiest way to group Vars of same rel together.
+ * varinfos, plus all other Vars in the same relation. We remove these
+ * Vars from the newvarinfos list for the next iteration. This is the
+ * easiest way to group Vars of same rel together.
*/
numdistinct = 1.0;
if (rel->tuples > 0)
{
/*
- * Clamp to size of rel, or size of rel / 10 if multiple Vars.
- * The fudge factor is because the Vars are probably correlated
- * but we don't know by how much. We should never clamp to less
- * than the largest ndistinct value for any of the Vars, though,
- * since there will surely be at least that many groups.
+ * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
+ * fudge factor is because the Vars are probably correlated but we
+ * don't know by how much. We should never clamp to less than the
+ * largest ndistinct value for any of the Vars, though, since
+ * there will surely be at least that many groups.
*/
double clamp = rel->tuples;
else
{
/*
- * Believe a default ndistinct only if it came from stats.
- * Otherwise punt and return 0.1, per comments above.
+ * Believe a default ndistinct only if it came from stats. Otherwise
+ * punt and return 0.1, per comments above.
*/
if (ndistinct == DEFAULT_NUM_DISTINCT)
{
avgfreq = (1.0 - stanullfrac) / ndistinct;
/*
- * Adjust ndistinct to account for restriction clauses. Observe we
- * are assuming that the data distribution is affected uniformly by
- * the restriction clauses!
+ * Adjust ndistinct to account for restriction clauses. Observe we are
+ * assuming that the data distribution is affected uniformly by the
+ * restriction clauses!
*
- * XXX Possibly better way, but much more expensive: multiply by
- * selectivity of rel's restriction clauses that mention the target
- * Var.
+ * XXX Possibly better way, but much more expensive: multiply by selectivity
+ * of rel's restriction clauses that mention the target Var.
*/
if (vardata.rel)
ndistinct *= vardata.rel->rows / vardata.rel->tuples;
/*
- * Initial estimate of bucketsize fraction is 1/nbuckets as long as
- * the number of buckets is less than the expected number of distinct
- * values; otherwise it is 1/ndistinct.
+ * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
+ * number of buckets is less than the expected number of distinct values;
+ * otherwise it is 1/ndistinct.
*/
if (ndistinct > nbuckets)
estfract = 1.0 / nbuckets;
}
/*
- * Adjust estimated bucketsize upward to account for skewed
- * distribution.
+ * Adjust estimated bucketsize upward to account for skewed distribution.
*/
if (avgfreq > 0.0 && mcvfreq > avgfreq)
estfract *= mcvfreq / avgfreq;
/*
* Clamp bucketsize to sane range (the above adjustment could easily
- * produce an out-of-range result). We set the lower bound a little
- * above zero, since zero isn't a very sane result.
+ * produce an out-of-range result). We set the lower bound a little above
+ * zero, since zero isn't a very sane result.
*/
if (estfract < 1.0e-6)
estfract = 1.0e-6;
double *scaledlobound, double *scaledhibound)
{
/*
- * Both the valuetypid and the boundstypid should exactly match
- * the declared input type(s) of the operator we are invoked for,
- * so we just error out if either is not recognized.
+ * Both the valuetypid and the boundstypid should exactly match the
+ * declared input type(s) of the operator we are invoked for, so we just
+ * error out if either is not recognized.
*
- * XXX The histogram we are interpolating between points of could belong
- * to a column that's only binary-compatible with the declared type.
- * In essence we are assuming that the semantics of binary-compatible
- * types are enough alike that we can use a histogram generated with one
- * type's operators to estimate selectivity for the other's. This is
- * outright wrong in some cases --- in particular signed versus unsigned
+ * XXX The histogram we are interpolating between points of could belong to a
+ * column that's only binary-compatible with the declared type. In essence
+ * we are assuming that the semantics of binary-compatible types are
+ * enough alike that we can use a histogram generated with one type's
+ * operators to estimate selectivity for the other's. This is outright
+ * wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
case TEXTOID:
case NAMEOID:
{
- char *valstr = convert_string_datum(value, valuetypid);
- char *lostr = convert_string_datum(lobound, boundstypid);
- char *histr = convert_string_datum(hibound, boundstypid);
+ char *valstr = convert_string_datum(value, valuetypid);
+ char *lostr = convert_string_datum(lobound, boundstypid);
+ char *histr = convert_string_datum(hibound, boundstypid);
convert_string_to_scalar(valstr, scaledvalue,
lostr, scaledlobound,
}
/*
- * Can't get here unless someone tries to use scalarltsel/scalargtsel
- * on an operator with one numeric and one non-numeric operand.
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel on
+ * an operator with one numeric and one non-numeric operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return 0;
return 0.0; /* empty string has scalar value 0 */
/*
- * Since base is at least 10, need not consider more than about 20
- * chars
+ * Since base is at least 10, need not consider more than about 20 chars
*/
if (slen > 20)
slen = 20;
default:
/*
- * Can't get here unless someone tries to use scalarltsel on
- * an operator with one string and one non-string operand.
+ * Can't get here unless someone tries to use scalarltsel on an
+ * operator with one string and one non-string operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return NULL;
size_t xfrmlen2;
/*
- * Note: originally we guessed at a suitable output buffer size,
- * and only needed to call strxfrm twice if our guess was too
- * small. However, it seems that some versions of Solaris have
- * buggy strxfrm that can write past the specified buffer length
- * in that scenario. So, do it the dumb way for portability.
+ * Note: originally we guessed at a suitable output buffer size, and
+ * only needed to call strxfrm twice if our guess was too small.
+ * However, it seems that some versions of Solaris have buggy strxfrm
+ * that can write past the specified buffer length in that scenario.
+ * So, do it the dumb way for portability.
*
- * Yet other systems (e.g., glibc) sometimes return a smaller value
- * from the second call than the first; thus the Assert must be <=
- * not == as you'd expect. Can't any of these people program
- * their way out of a paper bag?
+ * Yet other systems (e.g., glibc) sometimes return a smaller value from
+ * the second call than the first; thus the Assert must be <= not ==
+ * as you'd expect. Can't any of these people program their way out
+ * of a paper bag?
*/
xfrmlen = strxfrm(NULL, val, 0);
xfrmstr = (char *) palloc(xfrmlen + 1);
Interval *interval = DatumGetIntervalP(value);
/*
- * Convert the month part of Interval to days using
- * assumed average month length of 365.25/12.0 days. Not
- * too accurate, but plenty good enough for our purposes.
+ * Convert the month part of Interval to days using assumed
+ * average month length of 365.25/12.0 days. Not too
+ * accurate, but plenty good enough for our purposes.
*/
#ifdef HAVE_INT64_TIMESTAMP
- return interval->time + interval->day * (double)USECS_PER_DAY +
- interval->month * ((DAYS_PER_YEAR / (double)MONTHS_PER_YEAR) * USECS_PER_DAY);
+ return interval->time + interval->day * (double) USECS_PER_DAY +
+ interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
#else
return interval->time + interval->day * SECS_PER_DAY +
- interval->month * ((DAYS_PER_YEAR / (double)MONTHS_PER_YEAR) * (double)SECS_PER_DAY);
+ interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * (double) SECS_PER_DAY);
#endif
}
case RELTIMEOID:
}
/*
- * Can't get here unless someone tries to use scalarltsel/scalargtsel
- * on an operator with one timevalue and one non-timevalue operand.
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel on
+ * an operator with one timevalue and one non-timevalue operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return 0;
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of
- * other relations will be treated as pseudoconstants.
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
examine_variable(root, right, varRelid, &rdata);
{
vardata->statsTuple = SearchSysCache(STATRELATT,
ObjectIdGetDatum(relid),
- Int16GetDatum(var->varattno),
+ Int16GetDatum(var->varattno),
0, 0);
}
else
{
/*
- * XXX This means the Var comes from a JOIN or sub-SELECT.
- * Later add code to dig down into the join etc and see if we
- * can trace the variable to something with stats. (But
- * beware of sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps
- * there are no cases where this would really be useful,
- * because we'd have flattened the subselect if it is??)
+ * XXX This means the Var comes from a JOIN or sub-SELECT. Later
+ * add code to dig down into the join etc and see if we can trace
+ * the variable to something with stats. (But beware of
+ * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no
+ * cases where this would really be useful, because we'd have
+ * flattened the subselect if it is??)
*/
}
if (varRelid == 0 || bms_is_member(varRelid, varnos))
{
onerel = find_base_rel(root,
- (varRelid ? varRelid : bms_singleton_member(varnos)));
+ (varRelid ? varRelid : bms_singleton_member(varnos)));
vardata->rel = onerel;
- node = basenode; /* strip any relabeling */
+ node = basenode; /* strip any relabeling */
}
/* else treat it as a constant */
break;
{
/* treat it as a variable of a join relation */
vardata->rel = find_join_rel(root, varnos);
- node = basenode; /* strip any relabeling */
+ node = basenode; /* strip any relabeling */
}
else if (bms_is_member(varRelid, varnos))
{
/* ignore the vars belonging to other relations */
vardata->rel = find_base_rel(root, varRelid);
- node = basenode; /* strip any relabeling */
+ node = basenode; /* strip any relabeling */
/* note: no point in expressional-index search here */
}
/* else treat it as a constant */
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to
- * match it to expressional index columns, in hopes of finding
- * some statistics.
+ * We have an expression in vars of a single relation. Try to match
+ * it to expressional index columns, in hopes of finding some
+ * statistics.
*
- * XXX it's conceivable that there are multiple matches with
- * different index opclasses; if so, we need to pick one that
- * matches the operator we are estimating for. FIXME later.
+ * XXX it's conceivable that there are multiple matches with different
+ * index opclasses; if so, we need to pick one that matches the
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
if (equal(node, indexkey))
{
/*
- * Found a match ... is it a unique index? Tests
- * here should match has_unique_index().
+ * Found a match ... is it a unique index? Tests here
+ * should match has_unique_index().
*/
if (index->unique &&
index->ncolumns == 1 &&
vardata->isunique = true;
/* Has it got stats? */
vardata->statsTuple = SearchSysCache(STATRELATT,
- ObjectIdGetDatum(index->indexoid),
- Int16GetDatum(pos + 1),
+ ObjectIdGetDatum(index->indexoid),
+ Int16GetDatum(pos + 1),
0, 0);
if (vardata->statsTuple)
break;
double ntuples;
/*
- * Determine the stadistinct value to use. There are cases where we
- * can get an estimate even without a pg_statistic entry, or can get a
- * better value than is in pg_statistic.
+ * Determine the stadistinct value to use. There are cases where we can
+ * get an estimate even without a pg_statistic entry, or can get a better
+ * value than is in pg_statistic.
*/
if (HeapTupleIsValid(vardata->statsTuple))
{
/*
* Special-case boolean columns: presumably, two distinct values.
*
- * Are there any other datatypes we should wire in special estimates
- * for?
+ * Are there any other datatypes we should wire in special estimates for?
*/
stadistinct = 2.0;
}
else
{
/*
- * We don't keep statistics for system columns, but in some cases
- * we can infer distinctness anyway.
+ * We don't keep statistics for system columns, but in some cases we
+ * can infer distinctness anyway.
*/
if (vardata->var && IsA(vardata->var, Var))
{
/*
* If there is a unique index for the variable, assume it is unique no
- * matter what pg_statistic says (the statistics could be out of
- * date). Can skip search if we already think it's unique.
+ * matter what pg_statistic says (the statistics could be out of date).
+ * Can skip search if we already think it's unique.
*/
if (stadistinct != -1.0)
{
return floor((-stadistinct * ntuples) + 0.5);
/*
- * With no data, estimate ndistinct = ntuples if the table is small,
- * else use default.
+ * With no data, estimate ndistinct = ntuples if the table is small, else
+ * use default.
*/
if (ntuples < DEFAULT_NUM_DISTINCT)
return ntuples;
get_typlenbyval(vardata->atttype, &typLen, &typByVal);
/*
- * If there is a histogram, grab the last or first value as
- * appropriate.
+ * If there is a histogram, grab the last or first value as appropriate.
*
- * If there is a histogram that is sorted with some other operator than
- * the one we want, fail --- this suggests that there is data we can't
- * use.
+ * If there is a histogram that is sorted with some other operator than the
+ * one we want, fail --- this suggests that there is data we can't use.
*/
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
/*
* If we have most-common-values info, look for a large MCV. This is
- * needed even if we also have a histogram, since the histogram
- * excludes the MCVs. However, usually the MCVs will not be the
- * extreme values, so avoid unnecessary data copying.
+ * needed even if we also have a histogram, since the histogram excludes
+ * the MCVs. However, usually the MCVs will not be the extreme values, so
+ * avoid unnecessary data copying.
*/
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
if (typeid == BYTEAOID && case_insensitive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("case insensitive matching not supported on type bytea")));
+ errmsg("case insensitive matching not supported on type bytea")));
if (typeid != BYTEAOID)
{
}
/*
- * XXX I suspect isalpha() is not an adequately locale-sensitive
- * test for characters that can vary under case folding?
+ * XXX I suspect isalpha() is not an adequately locale-sensitive test
+ * for characters that can vary under case folding?
*/
if (case_insensitive && isalpha((unsigned char) patt[pos]))
break;
/*
* NOTE: this code used to think that %% meant a literal %, but
- * textlike() itself does not think that, and the SQL92 spec
- * doesn't say any such thing either.
+ * textlike() itself does not think that, and the SQL92 spec doesn't
+ * say any such thing either.
*/
match[match_pos++] = patt[pos];
}
/* in LIKE, an empty pattern is an exact match! */
if (pos == pattlen)
- return Pattern_Prefix_Exact; /* reached end of pattern, so
- * exact */
+ return Pattern_Prefix_Exact; /* reached end of pattern, so exact */
if (match_pos > 0)
return Pattern_Prefix_Partial;
Oid typeid = patt_const->consttype;
/*
- * Should be unnecessary, there are no bytea regex operators defined.
- * As such, it should be noted that the rest of this function has *not*
- * been made safe for binary (possibly NULL containing) strings.
+ * Should be unnecessary, there are no bytea regex operators defined. As
+ * such, it should be noted that the rest of this function has *not* been
+ * made safe for binary (possibly NULL containing) strings.
*/
if (typeid == BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("regular-expression matching not supported on type bytea")));
+ errmsg("regular-expression matching not supported on type bytea")));
/* the right-hand const is type text for all of these */
patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
}
/*
- * If unquoted | is present at paren level 0 in pattern, then there
- * are multiple alternatives for the start of the string.
+ * If unquoted | is present at paren level 0 in pattern, then there are
+ * multiple alternatives for the start of the string.
*/
paren_depth = 0;
for (pos = 1; patt[pos]; pos++)
prev_match_pos = match_pos = 0;
/* note start at pos 1 to skip leading ^ */
- for (prev_pos = pos = 1; patt[pos]; )
+ for (prev_pos = pos = 1; patt[pos];)
{
- int len;
+ int len;
/*
- * Check for characters that indicate multiple possible matches
- * here. XXX I suspect isalpha() is not an adequately
- * locale-sensitive test for characters that can vary under case
- * folding?
+ * Check for characters that indicate multiple possible matches here.
+ * XXX I suspect isalpha() is not an adequately locale-sensitive test
+ * for characters that can vary under case folding?
*/
if (patt[pos] == '.' ||
patt[pos] == '(' ||
break;
/*
- * In AREs, backslash followed by alphanumeric is an escape, not
- * a quoted character. Must treat it as having multiple possible
+ * In AREs, backslash followed by alphanumeric is an escape, not a
+ * quoted character. Must treat it as having multiple possible
* matches.
*/
if (patt[pos] == '\\' && isalnum((unsigned char) patt[pos + 1]))
/*
* Check for quantifiers. Except for +, this means the preceding
- * character is optional, so we must remove it from the prefix
- * too!
+ * character is optional, so we must remove it from the prefix too!
*/
if (patt[pos] == '*' ||
patt[pos] == '?' ||
/* Assume scalargtsel is appropriate for all supported types */
prefixsel = DatumGetFloat8(DirectFunctionCall4(scalargtsel,
PointerGetDatum(root),
- ObjectIdGetDatum(cmpopr),
- PointerGetDatum(cmpargs),
+ ObjectIdGetDatum(cmpopr),
+ PointerGetDatum(cmpargs),
Int32GetDatum(0)));
/*-------
/* Assume scalarltsel is appropriate for all supported types */
topsel = DatumGetFloat8(DirectFunctionCall4(scalarltsel,
PointerGetDatum(root),
- ObjectIdGetDatum(cmpopr),
- PointerGetDatum(cmpargs),
+ ObjectIdGetDatum(cmpopr),
+ PointerGetDatum(cmpargs),
Int32GetDatum(0)));
/*
- * Merge the two selectivities in the same way as for a range
- * query (see clauselist_selectivity()).
+ * Merge the two selectivities in the same way as for a range query
+ * (see clauselist_selectivity()).
*/
prefixsel = topsel + prefixsel - 1.0;
prefixsel += nulltestsel(root, IS_NULL, variable, 0);
/*
- * A zero or slightly negative prefixsel should be converted into
- * a small positive value; we probably are dealing with a very
- * tight range and got a bogus result due to roundoff errors.
- * However, if prefixsel is very negative, then we probably have
- * default selectivity estimates on one or both sides of the
- * range. In that case, insert a not-so-wildly-optimistic default
- * estimate.
+ * A zero or slightly negative prefixsel should be converted into a
+ * small positive value; we probably are dealing with a very tight
+ * range and got a bogus result due to roundoff errors. However, if
+ * prefixsel is very negative, then we probably have default
+ * selectivity estimates on one or both sides of the range. In that
+ * case, insert a not-so-wildly-optimistic default estimate.
*/
if (prefixsel <= 0.0)
{
if (prefixsel < -0.01)
{
/*
- * No data available --- use a default estimate that is
- * small, but not real small.
+ * No data available --- use a default estimate that is small,
+ * but not real small.
*/
prefixsel = 0.005;
}
#define FIXED_CHAR_SEL 0.20 /* about 1/5 */
#define CHAR_RANGE_SEL 0.25
-#define ANY_CHAR_SEL 0.9 /* not 1, since it won't match
- * end-of-string */
+#define ANY_CHAR_SEL 0.9 /* not 1, since it won't match end-of-string */
#define FULL_WILDCARD_SEL 5.0
#define PARTIAL_WILDCARD_SEL 2.0
if (typeid == BYTEAOID && case_insensitive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("case insensitive matching not supported on type bytea")));
+ errmsg("case insensitive matching not supported on type bytea")));
if (typeid != BYTEAOID)
{
else if (patt[pos] == '|' && paren_depth == 0)
{
/*
- * If unquoted | is present at paren level 0 in pattern, we
- * have multiple alternatives; sum their probabilities.
+ * If unquoted | is present at paren level 0 in pattern, we have
+ * multiple alternatives; sum their probabilities.
*/
sel += regex_selectivity_sub(patt + (pos + 1),
pattlen - (pos + 1),
Oid typeid = patt_const->consttype;
/*
- * Should be unnecessary, there are no bytea regex operators defined.
- * As such, it should be noted that the rest of this function has *not*
- * been made safe for binary (possibly NULL containing) strings.
+ * Should be unnecessary, there are no bytea regex operators defined. As
+ * such, it should be noted that the rest of this function has *not* been
+ * made safe for binary (possibly NULL containing) strings.
*/
if (typeid == BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("regular-expression matching not supported on type bytea")));
+ errmsg("regular-expression matching not supported on type bytea")));
/* the right-hand const is type text for all of these */
patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
if (datatype == NAMEOID)
{
workstr = DatumGetCString(DirectFunctionCall1(nameout,
- str_const->constvalue));
+ str_const->constvalue));
len = strlen(workstr);
}
else if (datatype == BYTEAOID)
else
{
workstr = DatumGetCString(DirectFunctionCall1(textout,
- str_const->constvalue));
+ str_const->constvalue));
len = strlen(workstr);
}
*lastchar = savelastchar;
/*
- * Truncate off the last character, which might be more than 1
- * byte, depending on the character encoding.
+ * Truncate off the last character, which might be more than 1 byte,
+ * depending on the character encoding.
*/
if (datatype != BYTEAOID && pg_database_encoding_max_length() > 1)
len = pg_mbcliplen(workstr, len, len - 1);
List *selectivityQuals;
/*
- * If the index is partial, AND the index predicate with the
- * explicitly given indexquals to produce a more accurate idea of the
- * index selectivity. This may produce redundant clauses. We get rid
- * of exact duplicates in the code below. We expect that most cases
- * of partial redundancy (such as "x < 4" from the qual and "x < 5"
- * from the predicate) will be recognized and handled correctly by
- * clauselist_selectivity(). This assumption is somewhat fragile,
- * since it depends on predicate_implied_by() and clauselist_selectivity()
+ * If the index is partial, AND the index predicate with the explicitly
+ * given indexquals to produce a more accurate idea of the index
+ * selectivity. This may produce redundant clauses. We get rid of exact
+ * duplicates in the code below. We expect that most cases of partial
+ * redundancy (such as "x < 4" from the qual and "x < 5" from the
+ * predicate) will be recognized and handled correctly by
+ * clauselist_selectivity(). This assumption is somewhat fragile, since
+ * it depends on predicate_implied_by() and clauselist_selectivity()
* having similar capabilities, and there are certainly many cases where
- * we will end up with a too-low selectivity estimate. This will bias the
+ * we will end up with a too-low selectivity estimate. This will bias the
* system in favor of using partial indexes where possible, which is not
* necessarily a bad thing. But it'd be nice to do better someday.
*
- * Note that index->indpred and indexQuals are both in implicit-AND form,
- * so ANDing them together just takes merging the lists. However,
- * eliminating duplicates is a bit trickier because indexQuals
- * contains RestrictInfo nodes and the indpred does not. It is okay
- * to pass a mixed list to clauselist_selectivity, but we have to work
- * a bit to generate a list without logical duplicates. (We could
- * just list_union indpred and strippedQuals, but then we'd not get
- * caching of per-qual selectivity estimates.)
+ * Note that index->indpred and indexQuals are both in implicit-AND form, so
+ * ANDing them together just takes merging the lists. However,
+ * eliminating duplicates is a bit trickier because indexQuals contains
+ * RestrictInfo nodes and the indpred does not. It is okay to pass a
+ * mixed list to clauselist_selectivity, but we have to work a bit to
+ * generate a list without logical duplicates. (We could just list_union
+ * indpred and strippedQuals, but then we'd not get caching of per-qual
+ * selectivity estimates.)
*/
if (index->indpred != NIL)
{
numIndexTuples = *indexSelectivity * index->rel->tuples;
/*
- * We can bound the number of tuples by the index size in any case.
- * Also, always estimate at least one tuple is touched, even when
+ * We can bound the number of tuples by the index size in any case. Also,
+ * always estimate at least one tuple is touched, even when
* indexSelectivity estimate is tiny.
*/
if (numIndexTuples > index->tuples)
/*
* Estimate the number of index pages that will be retrieved.
*
- * For all currently-supported index types, the first page of the index
- * is a metadata page, and we should figure on fetching that plus a
- * pro-rated fraction of the remaining pages.
+ * For all currently-supported index types, the first page of the index is a
+ * metadata page, and we should figure on fetching that plus a pro-rated
+ * fraction of the remaining pages.
*/
if (index->pages > 1 && index->tuples > 0)
{
/*
* CPU cost: any complex expressions in the indexquals will need to be
- * evaluated once at the start of the scan to reduce them to runtime
- * keys to pass to the index AM (see nodeIndexscan.c). We model the
- * per-tuple CPU costs as cpu_index_tuple_cost plus one
- * cpu_operator_cost per indexqual operator.
+ * evaluated once at the start of the scan to reduce them to runtime keys
+ * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
+ * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
+ * indexqual operator.
*
- * Note: this neglects the possible costs of rechecking lossy operators
- * and OR-clause expressions. Detecting that that might be needed
- * seems more expensive than it's worth, though, considering all the
- * other inaccuracies here ...
+ * Note: this neglects the possible costs of rechecking lossy operators and
+ * OR-clause expressions. Detecting that that might be needed seems more
+ * expensive than it's worth, though, considering all the other
+ * inaccuracies here ...
*/
cost_qual_eval(&index_qual_cost, indexQuals);
qual_op_cost = cpu_operator_cost * list_length(indexQuals);
ListCell *l;
/*
- * For a btree scan, only leading '=' quals plus inequality quals
- * for the immediately next attribute contribute to index selectivity
- * (these are the "boundary quals" that determine the starting and
- * stopping points of the index scan). Additional quals can suppress
- * visits to the heap, so it's OK to count them in indexSelectivity,
- * but they should not count for estimating numIndexTuples. So we must
- * examine the given indexQuals to find out which ones count as boundary
- * quals. We rely on the knowledge that they are given in index column
- * order.
+ * For a btree scan, only leading '=' quals plus inequality quals for the
+ * immediately next attribute contribute to index selectivity (these are
+ * the "boundary quals" that determine the starting and stopping points of
+ * the index scan). Additional quals can suppress visits to the heap, so
+ * it's OK to count them in indexSelectivity, but they should not count
+ * for estimating numIndexTuples. So we must examine the given indexQuals
+ * to find out which ones count as boundary quals. We rely on the
+ * knowledge that they are given in index column order.
*/
indexBoundQuals = NIL;
indexcol = 0;
foreach(l, indexQuals)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Expr *clause;
- Oid clause_op;
- int op_strategy;
+ Expr *clause;
+ Oid clause_op;
+ int op_strategy;
Assert(IsA(rinfo, RestrictInfo));
clause = rinfo->clause;
}
op_strategy = get_op_opclass_strategy(clause_op,
index->classlist[indexcol]);
- Assert(op_strategy != 0); /* not a member of opclass?? */
+ Assert(op_strategy != 0); /* not a member of opclass?? */
if (op_strategy == BTEqualStrategyNumber)
eqQualHere = true;
indexBoundQuals = lappend(indexBoundQuals, rinfo);
}
/*
- * If index is unique and we found an '=' clause for each column,
- * we can just assume numIndexTuples = 1 and skip the expensive
+ * If index is unique and we found an '=' clause for each column, we can
+ * just assume numIndexTuples = 1 and skip the expensive
* clauselist_selectivity calculations.
*/
if (index->unique && indexcol == index->ncolumns - 1 && eqQualHere)
indexSelectivity, indexCorrelation);
/*
- * If we can get an estimate of the first column's ordering
- * correlation C from pg_statistic, estimate the index correlation as
- * C for a single-column index, or C * 0.75 for multiple columns.
- * (The idea here is that multiple columns dilute the importance of
- * the first column's ordering, but don't negate it entirely. Before
- * 8.0 we divided the correlation by the number of columns, but that
- * seems too strong.)
+ * If we can get an estimate of the first column's ordering correlation C
+ * from pg_statistic, estimate the index correlation as C for a
+ * single-column index, or C * 0.75 for multiple columns. (The idea here
+ * is that multiple columns dilute the importance of the first column's
+ * ordering, but don't negate it entirely. Before 8.0 we divided the
+ * correlation by the number of columns, but that seems too strong.)
*/
if (index->indexkeys[0] != 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.154 2005/10/09 17:21:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.155 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
case DTK_INVALID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"%s\" is no longer supported", str)));
+ errmsg("date/time value \"%s\" is no longer supported", str)));
TIMESTAMP_NOEND(result);
break;
Datum
timestamp_out(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
char *result;
struct pg_tm tt,
*tm = &tt;
timestamp_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
int32 typmod = PG_GETARG_INT32(2);
- Timestamp timestamp;
+ Timestamp timestamp;
struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
Datum
timestamp_send(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
StringInfoData buf;
pq_begintypsend(&buf);
Datum
timestamp_scale(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
int32 typmod = PG_GETARG_INT32(1);
Timestamp result;
INT64CONST(5),
INT64CONST(0)
};
-
#else
static const double TimestampScales[MAX_TIMESTAMP_PRECISION + 1] = {
1,
if (typmod < 0 || typmod > MAX_TIMESTAMP_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp(%d) precision must be between %d and %d",
- typmod, 0, MAX_TIMESTAMP_PRECISION)));
+ errmsg("timestamp(%d) precision must be between %d and %d",
+ typmod, 0, MAX_TIMESTAMP_PRECISION)));
/*
- * Note: this round-to-nearest code is not completely consistent
- * about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
- * round-to-nearest-even, but the integer code always rounds up
- * (away from zero). Is it worth trying to be consistent?
+ * Note: this round-to-nearest code is not completely consistent about
+ * rounding values that are exactly halfway between integral values.
+ * On most platforms, rint() will implement round-to-nearest-even, but
+ * the integer code always rounds up (away from zero). Is it worth
+ * trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (*time >= INT64CONST(0))
{
*time = ((*time + TimestampOffsets[typmod]) / TimestampScales[typmod]) *
- TimestampScales[typmod];
+ TimestampScales[typmod];
}
else
{
* TimestampScales[typmod]);
}
#else
- *time = rint((double)*time * TimestampScales[typmod]) / TimestampScales[typmod];
+ *time = rint((double) *time * TimestampScales[typmod]) / TimestampScales[typmod];
#endif
}
}
case DTK_INVALID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"%s\" is no longer supported", str)));
+ errmsg("date/time value \"%s\" is no longer supported", str)));
TIMESTAMP_NOEND(result);
break;
timestamptz_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
case DTK_INVALID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"%s\" is no longer supported", str)));
+ errmsg("date/time value \"%s\" is no longer supported", str)));
break;
default:
interval_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
PG_RETURN_INTERVAL_P(result);
}
+
/*
* Adjust interval for specified precision, in both YEAR to SECOND
* range and sub-second precision.
INT64CONST(5),
INT64CONST(0)
};
-
#else
static const double IntervalScales[MAX_INTERVAL_PRECISION + 1] = {
1,
#endif
/*
- * Unspecified range and precision? Then not necessary to adjust.
- * Setting typmod to -1 is the convention for all types.
+ * Unspecified range and precision? Then not necessary to adjust. Setting
+ * typmod to -1 is the convention for all types.
*/
if (typmod != -1)
{
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_HOUR) *
- USECS_PER_HOUR;
+ USECS_PER_HOUR;
#else
- interval->time = ((int)(interval->time / SECS_PER_HOUR)) * (double)SECS_PER_HOUR;
+ interval->time = ((int) (interval->time / SECS_PER_HOUR)) * (double) SECS_PER_HOUR;
#endif
}
else if (range == INTERVAL_MASK(MINUTE))
hour = interval->time / USECS_PER_HOUR;
interval->time -= hour * USECS_PER_HOUR;
interval->time = (interval->time / USECS_PER_MINUTE) *
- USECS_PER_MINUTE;
+ USECS_PER_MINUTE;
#else
- TMODULO(interval->time, hour, (double)SECS_PER_HOUR);
- interval->time = ((int)(interval->time / SECS_PER_MINUTE)) * (double)SECS_PER_MINUTE;
+ TMODULO(interval->time, hour, (double) SECS_PER_HOUR);
+ interval->time = ((int) (interval->time / SECS_PER_MINUTE)) * (double) SECS_PER_MINUTE;
#endif
}
else if (range == INTERVAL_MASK(SECOND))
minute = interval->time / USECS_PER_MINUTE;
interval->time -= minute * USECS_PER_MINUTE;
#else
- TMODULO(interval->time, minute, (double)SECS_PER_MINUTE);
+ TMODULO(interval->time, minute, (double) SECS_PER_MINUTE);
/* return subseconds too */
#endif
}
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_HOUR) *
- USECS_PER_HOUR;
+ USECS_PER_HOUR;
#else
- interval->time = ((int) (interval->time / SECS_PER_HOUR)) * (double)SECS_PER_HOUR;
+ interval->time = ((int) (interval->time / SECS_PER_HOUR)) * (double) SECS_PER_HOUR;
#endif
}
/* DAY TO MINUTE */
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_MINUTE) *
- USECS_PER_MINUTE;
+ USECS_PER_MINUTE;
#else
- interval->time = ((int)(interval->time / SECS_PER_MINUTE)) * (double)SECS_PER_MINUTE;
+ interval->time = ((int) (interval->time / SECS_PER_MINUTE)) * (double) SECS_PER_MINUTE;
#endif
}
/* DAY TO SECOND */
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_MINUTE) *
- USECS_PER_MINUTE;
+ USECS_PER_MINUTE;
#else
- interval->time = ((int)(interval->time / SECS_PER_MINUTE)) * (double)SECS_PER_MINUTE;
+ interval->time = ((int) (interval->time / SECS_PER_MINUTE)) * (double) SECS_PER_MINUTE;
#endif
}
/* HOUR TO SECOND */
{
#ifdef HAVE_INT64_TIMESTAMP
int64 hour;
-
#else
double hour;
#endif
hour = interval->time / USECS_PER_HOUR;
interval->time -= hour * USECS_PER_HOUR;
#else
- TMODULO(interval->time, hour, (double)SECS_PER_HOUR);
+ TMODULO(interval->time, hour, (double) SECS_PER_HOUR);
#endif
}
else
if (precision < 0 || precision > MAX_INTERVAL_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval(%d) precision must be between %d and %d",
- precision, 0, MAX_INTERVAL_PRECISION)));
+ errmsg("interval(%d) precision must be between %d and %d",
+ precision, 0, MAX_INTERVAL_PRECISION)));
/*
- * Note: this round-to-nearest code is not completely
- * consistent about rounding values that are exactly halfway
- * between integral values. On most platforms, rint() will
- * implement round-to-nearest-even, but the integer code
- * always rounds up (away from zero). Is it worth trying to
- * be consistent?
+ * Note: this round-to-nearest code is not completely consistent
+ * about rounding values that are exactly halfway between integral
+ * values. On most platforms, rint() will implement
+ * round-to-nearest-even, but the integer code always rounds up
+ * (away from zero). Is it worth trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (interval->time >= INT64CONST(0))
{
interval->time = ((interval->time +
- IntervalOffsets[precision]) /
- IntervalScales[precision]) *
- IntervalScales[precision];
+ IntervalOffsets[precision]) /
+ IntervalScales[precision]) *
+ IntervalScales[precision];
}
else
{
interval->time = -(((-interval->time +
- IntervalOffsets[precision]) /
+ IntervalOffsets[precision]) /
IntervalScales[precision]) *
- IntervalScales[precision]);
+ IntervalScales[precision]);
}
#else
interval->time = rint(((double) interval->time) *
- IntervalScales[precision]) /
- IntervalScales[precision];
+ IntervalScales[precision]) /
+ IntervalScales[precision];
#endif
}
}
* timezone) will be used.
*/
int
-timestamp2tm(Timestamp dt, int *tzp, struct pg_tm *tm, fsec_t *fsec, char **tzn, pg_tz *attimezone)
+timestamp2tm(Timestamp dt, int *tzp, struct pg_tm * tm, fsec_t *fsec, char **tzn, pg_tz *attimezone)
{
- Timestamp date;
+ Timestamp date;
Timestamp time;
pg_time_t utime;
/*
- * If HasCTZSet is true then we have a brute force time zone
- * specified. Go ahead and rotate to the local time zone since we will
- * later bypass any calls which adjust the tm fields.
+ * If HasCTZSet is true then we have a brute force time zone specified. Go
+ * ahead and rotate to the local time zone since we will later bypass any
+ * calls which adjust the tm fields.
*/
if (attimezone == NULL && HasCTZSet && tzp != NULL)
{
dt2time(time, &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#else
time = dt;
- TMODULO(time, date, (double)SECS_PER_DAY);
+ TMODULO(time, date, (double) SECS_PER_DAY);
if (time < 0)
{
if (*fsec >= 1.0)
{
time = ceil(time);
- if (time >= (double)SECS_PER_DAY)
+ if (time >= (double) SECS_PER_DAY)
{
time = 0;
date += 1;
}
/*
- * We have a brute force time zone per SQL99? Then use it without
- * change since we have already rotated to the time zone.
+ * We have a brute force time zone per SQL99? Then use it without change
+ * since we have already rotated to the time zone.
*/
if (attimezone == NULL && HasCTZSet)
{
}
/*
- * If the time falls within the range of pg_time_t, use pg_localtime()
- * to rotate to the local time zone.
+ * If the time falls within the range of pg_time_t, use pg_localtime() to
+ * rotate to the local time zone.
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss.
- * This coding avoids hardwiring any assumptions about the width of
- * pg_time_t, so it should behave sanely on machines without int64.
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * coding avoids hardwiring any assumptions about the width of pg_time_t,
+ * so it should behave sanely on machines without int64.
*/
#ifdef HAVE_INT64_TIMESTAMP
dt = (dt - *fsec) / USECS_PER_SEC +
if ((Timestamp) utime == dt)
{
struct pg_tm *tx = pg_localtime(&utime,
- attimezone ? attimezone : global_timezone);
+ attimezone ? attimezone : global_timezone);
tm->tm_year = tx->tm_year + 1900;
tm->tm_mon = tx->tm_mon + 1;
* Returns -1 on failure (value out of range).
*/
int
-tm2timestamp(struct pg_tm *tm, fsec_t fsec, int *tzp, Timestamp *result)
+tm2timestamp(struct pg_tm * tm, fsec_t fsec, int *tzp, Timestamp *result)
{
#ifdef HAVE_INT64_TIMESTAMP
- int date;
+ int date;
int64 time;
#else
- double date,
+ double date,
time;
#endif
* Convert a interval data type to a tm structure.
*/
int
-interval2tm(Interval span, struct pg_tm *tm, fsec_t *fsec)
+interval2tm(Interval span, struct pg_tm * tm, fsec_t *fsec)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 time;
*fsec = time - (tm->tm_sec * USECS_PER_SEC);
#else
recalc:
- TMODULO(time, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(time, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(time, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(time, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(time, tm->tm_sec, 1.0);
time = TSROUND(time);
/* roundoff may need to propagate to higher-order fields */
}
int
-tm2interval(struct pg_tm *tm, fsec_t fsec, Interval *span)
+tm2interval(struct pg_tm * tm, fsec_t fsec, Interval *span)
{
span->month = tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
- span->day = tm->tm_mday;
+ span->day = tm->tm_mday;
#ifdef HAVE_INT64_TIMESTAMP
span->time = (((((tm->tm_hour * INT64CONST(60)) +
- tm->tm_min) * INT64CONST(60)) +
- tm->tm_sec) * USECS_PER_SEC) + fsec;
+ tm->tm_min) * INT64CONST(60)) +
+ tm->tm_sec) * USECS_PER_SEC) + fsec;
#else
- span->time = (((tm->tm_hour * (double)MINS_PER_HOUR) +
- tm->tm_min) * (double)SECS_PER_MINUTE) +
- tm->tm_sec + fsec;
+ span->time = (((tm->tm_hour * (double) MINS_PER_HOUR) +
+ tm->tm_min) * (double) SECS_PER_MINUTE) +
+ tm->tm_sec + fsec;
#endif
return 0;
{
return (((((hour * MINS_PER_HOUR) + min) * SECS_PER_MINUTE) + sec) * USECS_PER_SEC) + fsec;
} /* time2t() */
-
#else
static double
time2t(const int hour, const int min, const int sec, const fsec_t fsec)
Datum
timestamp_finite(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
PG_RETURN_BOOL(!TIMESTAMP_NOT_FINITE(timestamp));
}
*---------------------------------------------------------*/
void
-GetEpochTime(struct pg_tm *tm)
+GetEpochTime(struct pg_tm * tm)
{
struct pg_tm *t0;
pg_time_t epoch = 0;
* When using float representation, we have to be wary of NaNs.
*
* We consider all NANs to be equal and larger than any non-NAN. This is
- * somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (isnan(dt1))
{
span2 += interval2->month * INT64CONST(30) * USECS_PER_DAY;
span2 += interval2->day * INT64CONST(24) * USECS_PER_HOUR;
#else
- span1 += interval1->month * ((double)DAYS_PER_MONTH * SECS_PER_DAY);
- span1 += interval1->day * ((double)HOURS_PER_DAY * SECS_PER_HOUR);
- span2 += interval2->month * ((double)DAYS_PER_MONTH * SECS_PER_DAY);
- span2 += interval2->day * ((double)HOURS_PER_DAY * SECS_PER_HOUR);
+ span1 += interval1->month * ((double) DAYS_PER_MONTH * SECS_PER_DAY);
+ span1 += interval1->day * ((double) HOURS_PER_DAY * SECS_PER_HOUR);
+ span2 += interval2->month * ((double) DAYS_PER_MONTH * SECS_PER_DAY);
+ span2 += interval2->day * ((double) HOURS_PER_DAY * SECS_PER_HOUR);
#endif
return ((span1 < span2) ? -1 : (span1 > span2) ? 1 : 0);
/*
* Specify hash length as sizeof(double) + sizeof(int4), not as
- * sizeof(Interval), so that any garbage pad bytes in the structure
- * won't be included in the hash!
+ * sizeof(Interval), so that any garbage pad bytes in the structure won't
+ * be included in the hash!
*/
- return hash_any((unsigned char *) key,
- sizeof(key->time) + sizeof(key->day) + sizeof(key->month));
+ return hash_any((unsigned char *) key,
+ sizeof(key->time) + sizeof(key->day) + sizeof(key->month));
}
/* overlaps_timestamp() --- implements the SQL92 OVERLAPS operator.
overlaps_timestamp(PG_FUNCTION_ARGS)
{
/*
- * The arguments are Timestamps, but we leave them as generic Datums
- * to avoid unnecessary conversions between value and reference forms
- * --- not to mention possible dereferences of null pointers.
+ * The arguments are Timestamps, but we leave them as generic Datums to
+ * avoid unnecessary conversions between value and reference forms --- not
+ * to mention possible dereferences of null pointers.
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
DatumGetBool(DirectFunctionCall2(timestamp_lt,t1,t2))
/*
- * If both endpoints of interval 1 are null, the result is null
- * (unknown). If just one endpoint is null, take ts1 as the non-null
- * one. Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null (unknown).
+ * If just one endpoint is null, take ts1 as the non-null one. Otherwise,
+ * take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
if (TIMESTAMP_GT(ts1, ts2))
{
/*
- * This case is ts1 < te2 OR te1 < te2, which may look redundant
- * but in the presence of nulls it's not quite completely so.
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant but
+ * in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
PG_RETURN_NULL();
PG_RETURN_NULL();
/*
- * If te1 is not null then we had ts1 <= te1 above, and we just
- * found ts1 >= te2, hence te1 >= te2.
+ * If te1 is not null then we had ts1 <= te1 above, and we just found
+ * ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
}
PG_RETURN_NULL();
/*
- * If te2 is not null then we had ts2 <= te2 above, and we just
- * found ts2 >= te1, hence te2 >= te1.
+ * If te2 is not null then we had ts2 <= te2 above, and we just found
+ * ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
{
/*
* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else
- * null".
+ * rather silly way of saying "true if both are nonnull, else null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
result->day = 0;
result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
- IntervalPGetDatum(result)));
+ IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
}
/* interval_justify_hours()
* Adjust interval so 'time' contains less than a whole day, and
- * 'day' contains an integral number of days. This is useful for
+ * 'day' contains an integral number of days. This is useful for
* situations (such as non-TZ) where '1 day' = '24 hours' is valid,
* e.g. interval subtraction and division. The SQL standard requires
* such conversion in these cases, but not the conversion of days to months.
Datum
interval_justify_hours(PG_FUNCTION_ARGS)
{
- Interval *span = PG_GETARG_INTERVAL_P(0);
- Interval *result;
+ Interval *span = PG_GETARG_INTERVAL_P(0);
+ Interval *result;
result = (Interval *) palloc(sizeof(Interval));
result->month = span->month;
result->time += span->day * USECS_PER_DAY;
TMODULO(result->time, result->day, USECS_PER_DAY);
#else
- result->time += span->day * (double)SECS_PER_DAY;
- TMODULO(result->time, result->day, (double)SECS_PER_DAY);
+ result->time += span->day * (double) SECS_PER_DAY;
+ TMODULO(result->time, result->day, (double) SECS_PER_DAY);
#endif
PG_RETURN_INTERVAL_P(result);
Datum
interval_justify_days(PG_FUNCTION_ARGS)
{
- Interval *span = PG_GETARG_INTERVAL_P(0);
- Interval *result;
+ Interval *span = PG_GETARG_INTERVAL_P(0);
+ Interval *result;
result = (Interval *) palloc(sizeof(Interval));
result->day = span->day;
Datum
timestamp_pl_interval(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
Interval *span = PG_GETARG_INTERVAL_P(1);
Timestamp result;
*tm = &tt;
fsec_t fsec;
int julian;
-
+
if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) != 0)
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
Datum
timestamp_mi_interval(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
Interval *span = PG_GETARG_INTERVAL_P(1);
Interval tspan;
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder, day_remainder, month_remainder_days;
+ double month_remainder,
+ day_remainder,
+ month_remainder_days;
Interval *result;
result = (Interval *) palloc(sizeof(Interval));
#endif
result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
- IntervalPGetDatum(result)));
+ IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
}
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder, day_remainder, month_remainder_days;
+ double month_remainder,
+ day_remainder,
+ month_remainder_days;
Interval *result;
result = (Interval *) palloc(sizeof(Interval));
#endif
result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
- IntervalPGetDatum(result)));
+ IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
}
elog(ERROR, "expected 2-element interval array");
/*
- * XXX memcpy, instead of just extracting a pointer, to work around
- * buggy array code: it won't ensure proper alignment of Interval
- * objects on machines where double requires 8-byte alignment. That
- * should be fixed, but in the meantime...
+ * XXX memcpy, instead of just extracting a pointer, to work around buggy
+ * array code: it won't ensure proper alignment of Interval objects on
+ * machines where double requires 8-byte alignment. That should be fixed,
+ * but in the meantime...
*
* Note: must use DatumGetPointer here, not DatumGetIntervalP, else some
* compilers optimize into double-aligned load/store anyway.
memcpy((void *) &N, DatumGetPointer(transdatums[1]), sizeof(Interval));
newsum = DatumGetIntervalP(DirectFunctionCall2(interval_pl,
- IntervalPGetDatum(&sumX),
- IntervalPGetDatum(newval)));
+ IntervalPGetDatum(&sumX),
+ IntervalPGetDatum(newval)));
N.time += 1;
transdatums[0] = IntervalPGetDatum(newsum);
elog(ERROR, "expected 2-element interval array");
/*
- * XXX memcpy, instead of just extracting a pointer, to work around
- * buggy array code: it won't ensure proper alignment of Interval
- * objects on machines where double requires 8-byte alignment. That
- * should be fixed, but in the meantime...
+ * XXX memcpy, instead of just extracting a pointer, to work around buggy
+ * array code: it won't ensure proper alignment of Interval objects on
+ * machines where double requires 8-byte alignment. That should be fixed,
+ * but in the meantime...
*
* Note: must use DatumGetPointer here, not DatumGetIntervalP, else some
* compilers optimize into double-aligned load/store anyway.
timestamp_text(PG_FUNCTION_ARGS)
{
/* Input is a Timestamp, but may as well leave it in Datum form */
- Datum timestamp = PG_GETARG_DATUM(0);
+ Datum timestamp = PG_GETARG_DATUM(0);
text *result;
char *str;
int len;
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type timestamp: \"%s\"",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(str))))));
+ PointerGetDatum(str))))));
sp = VARDATA(str);
dp = dstr;
timestamptz_text(PG_FUNCTION_ARGS)
{
/* Input is a Timestamp, but may as well leave it in Datum form */
- Datum timestamp = PG_GETARG_DATUM(0);
+ Datum timestamp = PG_GETARG_DATUM(0);
text *result;
char *str;
int len;
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type timestamp with time zone: \"%s\"",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(str))))));
+ PointerGetDatum(str))))));
sp = VARDATA(str);
dp = dstr;
int len;
str = DatumGetCString(DirectFunctionCall1(interval_out,
- IntervalPGetDatum(interval)));
+ IntervalPGetDatum(interval)));
len = strlen(str) + VARHDRSZ;
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type interval: \"%s\"",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(str))))));
+ PointerGetDatum(str))))));
sp = VARDATA(str);
dp = dstr;
timestamp_trunc(PG_FUNCTION_ARGS)
{
text *units = PG_GETARG_TEXT_P(0);
- Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
Timestamp result;
int type,
val;
switch (val)
{
case DTK_WEEK:
- {
- int woy;
-
- woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
- /*
- * If it is week 52/53 and the month is January,
- * then the week must belong to the previous year.
- * Also, some December dates belong to the next year.
- */
- if (woy >= 52 && tm->tm_mon == 1)
- --tm->tm_year;
- if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
- ++tm->tm_year;
- isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
- tm->tm_hour = 0;
- tm->tm_min = 0;
- tm->tm_sec = 0;
- fsec = 0;
- break;
- }
+ {
+ int woy;
+
+ woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
+
+ /*
+ * If it is week 52/53 and the month is January, then the
+ * week must belong to the previous year. Also, some
+ * December dates belong to the next year.
+ */
+ if (woy >= 52 && tm->tm_mon == 1)
+ --tm->tm_year;
+ if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
+ ++tm->tm_year;
+ isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
+ tm->tm_hour = 0;
+ tm->tm_min = 0;
+ tm->tm_sec = 0;
+ fsec = 0;
+ break;
+ }
case DTK_MILLENNIUM:
/* see comments in timestamptz_trunc */
if (tm->tm_year > 0)
switch (val)
{
case DTK_WEEK:
- {
- int woy;
-
- woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
- /*
- * If it is week 52/53 and the month is January,
- * then the week must belong to the previous year.
- * Also, some December dates belong to the next year.
- */
- if (woy >= 52 && tm->tm_mon == 1)
- --tm->tm_year;
- if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
- ++tm->tm_year;
- isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
- tm->tm_hour = 0;
- tm->tm_min = 0;
- tm->tm_sec = 0;
- fsec = 0;
- redotz = true;
- break;
- }
+ {
+ int woy;
+
+ woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
+
+ /*
+ * If it is week 52/53 and the month is January, then the
+ * week must belong to the previous year. Also, some
+ * December dates belong to the next year.
+ */
+ if (woy >= 52 && tm->tm_mon == 1)
+ --tm->tm_year;
+ if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
+ ++tm->tm_year;
+ isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
+ tm->tm_hour = 0;
+ tm->tm_min = 0;
+ tm->tm_sec = 0;
+ fsec = 0;
+ redotz = true;
+ break;
+ }
/* one may consider DTK_THOUSAND and DTK_HUNDRED... */
case DTK_MILLENNIUM:
/*
* truncating to the millennium? what is this supposed to
- * mean? let us put the first year of the millennium...
- * i.e. -1000, 1, 1001, 2001...
+ * mean? let us put the first year of the millennium... i.e.
+ * -1000, 1, 1001, 2001...
*/
if (tm->tm_year > 0)
tm->tm_year = ((tm->tm_year + 999) / 1000) * 1000 - 999;
case DTK_DECADE:
/*
- * truncating to the decade? first year of the decade.
- * must not be applied if year was truncated before!
+ * truncating to the decade? first year of the decade. must
+ * not be applied if year was truncated before!
*/
if (val != DTK_MILLENNIUM && val != DTK_CENTURY)
{
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("timestamp with time zone units \"%s\" not "
- "supported", lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not "
+ "supported", lowunits)));
result = 0;
}
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp with time zone units \"%s\" not recognized",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not recognized",
+ lowunits)));
result = 0;
}
{
switch (val)
{
- /* fall through */
+ /* fall through */
case DTK_MILLENNIUM:
/* caution: C division may have negative remainder */
tm->tm_year = (tm->tm_year / 1000) * 1000;
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("interval units \"%s\" not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ PointerGetDatum(units))))));
*result = *interval;
}
if (!*year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot calculate week number without year information")));
+ errmsg("cannot calculate week number without year information")));
/* fourth day of current year */
day4 = date2j(*year, 1, 4);
day0 = j2day(day4 - 1);
/*
- * We need the first week containing a Thursday, otherwise this day
- * falls into the previous year for purposes of counting weeks
+ * We need the first week containing a Thursday, otherwise this day falls
+ * into the previous year for purposes of counting weeks
*/
if (dayn < day4 - day0)
{
result = (dayn - (day4 - day0)) / 7 + 1;
/*
- * Sometimes the last few days in a year will fall into the first week
- * of the next year, so check for this.
+ * Sometimes the last few days in a year will fall into the first week of
+ * the next year, so check for this.
*/
if (result >= 52)
{
day0 = j2day(day4 - 1);
/*
- * We need the first week containing a Thursday, otherwise this day
- * falls into the previous year for purposes of counting weeks
+ * We need the first week containing a Thursday, otherwise this day falls
+ * into the previous year for purposes of counting weeks
*/
if (dayn < day4 - day0)
{
result = (dayn - (day4 - day0)) / 7 + 1;
/*
- * Sometimes the last few days in a year will fall into the first week
- * of the next year, so check for this.
+ * Sometimes the last few days in a year will fall into the first week of
+ * the next year, so check for this.
*/
if (result >= 52)
{
timestamp_part(PG_FUNCTION_ARGS)
{
text *units = PG_GETARG_TEXT_P(0);
- Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
float8 result;
int type,
val;
case DTK_DECADE:
/*
- * what is a decade wrt dates? let us assume that decade
- * 199 is 1990 thru 1999... decade 0 starts on year 1 BC,
- * and -1 is 11 BC thru 2 BC...
+ * what is a decade wrt dates? let us assume that decade 199
+ * is 1990 thru 1999... decade 0 starts on year 1 BC, and -1
+ * is 11 BC thru 2 BC...
*/
if (tm->tm_year >= 0)
result = tm->tm_year / 10;
result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
#ifdef HAVE_INT64_TIMESTAMP
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + (fsec / 1000000.0)) / (double)SECS_PER_DAY;
+ tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY;
#else
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + fsec) / (double)SECS_PER_DAY;
+ tm->tm_sec + fsec) / (double) SECS_PER_DAY;
#endif
break;
TimestampTz timestamptz;
/*
- * convert to timestamptz to produce consistent
- * results
+ * convert to timestamptz to produce consistent results
*/
if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) != 0)
ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
tz = DetermineTimeZoneOffset(tm, global_timezone);
if (tm2timestamp(tm, fsec, &tz, ×tamptz) != 0)
ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
#ifdef HAVE_INT64_TIMESTAMP
result = (timestamptz - SetEpochTimestamp()) / 1000000.0;
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp units \"%s\" not recognized", lowunits)));
+ errmsg("timestamp units \"%s\" not recognized", lowunits)));
result = 0;
}
case DTK_TZ_MINUTE:
result = -tz;
result /= MINS_PER_HOUR;
- FMODULO(result, dummy, (double)MINS_PER_HOUR);
+ FMODULO(result, dummy, (double) MINS_PER_HOUR);
break;
case DTK_TZ_HOUR:
dummy = -tz;
- FMODULO(dummy, result, (double)SECS_PER_HOUR);
+ FMODULO(dummy, result, (double) SECS_PER_HOUR);
break;
case DTK_MICROSEC:
result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
#ifdef HAVE_INT64_TIMESTAMP
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + (fsec / 1000000.0)) / (double)SECS_PER_DAY;
+ tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY;
#else
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + fsec) / (double)SECS_PER_DAY;
+ tm->tm_sec + fsec) / (double) SECS_PER_DAY;
#endif
break;
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("timestamp with time zone units \"%s\" not supported",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not supported",
+ lowunits)));
result = 0;
}
{
case DTK_EPOCH:
#ifdef HAVE_INT64_TIMESTAMP
- result = (timestamp - SetEpochTimestamp()) /1000000.0;
+ result = (timestamp - SetEpochTimestamp()) / 1000000.0;
#else
result = timestamp - SetEpochTimestamp();
#endif
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("timestamp with time zone units \"%s\" not supported",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not supported",
+ lowunits)));
result = 0;
}
}
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp with time zone units \"%s\" not recognized",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not recognized",
+ lowunits)));
result = 0;
}
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("interval units \"%s\" not supported",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
result = interval->time;
#endif
result += (DAYS_PER_YEAR * SECS_PER_DAY) * (interval->month / MONTHS_PER_YEAR);
- result += ((double)DAYS_PER_MONTH * SECS_PER_DAY) * (interval->month % MONTHS_PER_YEAR);
+ result += ((double) DAYS_PER_MONTH * SECS_PER_DAY) * (interval->month % MONTHS_PER_YEAR);
result += interval->day * SECS_PER_DAY;
}
else
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("interval units \"%s\" not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ PointerGetDatum(units))))));
result = 0;
}
}
-/* timestamp_zone()
- * Encode timestamp type with specified time zone.
- * This function is just timestamp2timestamptz() except instead of
+/* timestamp_zone()
+ * Encode timestamp type with specified time zone.
+ * This function is just timestamp2timestamptz() except instead of
* shifting to the global timezone, we shift to the specified timezone.
* This is different from the other AT TIME ZONE cases because instead
* of shifting to a _to_ a new time zone, it sets the time to _be_ the
{
text *zone = PG_GETARG_TEXT_P(0);
Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
- TimestampTz result;
+ TimestampTz result;
int tz;
- pg_tz *tzp;
- char tzname[TZ_STRLEN_MAX + 1];
- int len;
-
+ pg_tz *tzp;
+ char tzname[TZ_STRLEN_MAX + 1];
+ int len;
+
if (TIMESTAMP_NOT_FINITE(timestamp))
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the timezone
- * database (to handle cases like "America/New_York"), and if that
- * fails, we look in the date token table (to handle cases like "EST").
- */
+ * Look up the requested timezone. First we look in the timezone database
+ * (to handle cases like "America/New_York"), and if that fails, we look
+ * in the date token table (to handle cases like "EST").
+ */
len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX);
memcpy(tzname, VARDATA(zone), len);
tzname[len] = '\0';
{
/* Apply the timezone change */
struct pg_tm tm;
- fsec_t fsec;
+ fsec_t fsec;
if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, tzp) != 0)
ereport(ERROR,
timestamp_izone(PG_FUNCTION_ARGS)
{
Interval *zone = PG_GETARG_INTERVAL_P(0);
- Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
TimestampTz result;
int tz;
if (zone->month != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not specify month",
- DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ errmsg("interval time zone \"%s\" must not specify month",
+ DatumGetCString(DirectFunctionCall1(interval_out,
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = zone->time / USECS_PER_SEC;
Datum
timestamp_timestamptz(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
PG_RETURN_TIMESTAMPTZ(timestamp2timestamptz(timestamp));
}
Timestamp result;
int tz;
pg_tz *tzp;
- char tzname[TZ_STRLEN_MAX + 1];
- int len;
+ char tzname[TZ_STRLEN_MAX + 1];
+ int len;
if (TIMESTAMP_NOT_FINITE(timestamp))
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the timezone
- * database (to handle cases like "America/New_York"), and if that
- * fails, we look in the date token table (to handle cases like "EST").
- */
+ * Look up the requested timezone. First we look in the timezone database
+ * (to handle cases like "America/New_York"), and if that fails, we look
+ * in the date token table (to handle cases like "EST").
+ */
len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX);
memcpy(tzname, VARDATA(zone), len);
tzname[len] = '\0';
{
/* Apply the timezone change */
struct pg_tm tm;
- fsec_t fsec;
+ fsec_t fsec;
if (timestamp2tm(timestamp, &tz, &tm, &fsec, NULL, tzp) != 0)
ereport(ERROR,
if (zone->month != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not specify month",
- DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ errmsg("interval time zone \"%s\" must not specify month",
+ DatumGetCString(DirectFunctionCall1(interval_out,
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = -(zone->time / USECS_PER_SEC);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.46 2005/09/24 17:53:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.47 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
else
{
/*
- * Otherwise it's binary. This allows things like cast('1001' as
- * bit) to work transparently.
+ * Otherwise it's binary. This allows things like cast('1001' as bit)
+ * to work transparently.
*/
bit_not_hex = true;
sp = input_string;
bitlen = slen * 4;
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod <= 0)
atttypmod = bitlen;
else if (bitlen != atttypmod)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH),
- errmsg("bit string length %d does not match type bit(%d)",
- bitlen, atttypmod)));
+ errmsg("bit string length %d does not match type bit(%d)",
+ bitlen, atttypmod)));
len = VARBITTOTALLEN(atttypmod);
/* set to 0 so that *r is always initialised and string is zero-padded */
}
/*
- * Go back one step if we printed a hex number that was not part of
- * the bitstring anymore
+ * Go back one step if we printed a hex number that was not part of the
+ * bitstring anymore
*/
if (i > len)
r--;
bit_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
errmsg("invalid length in external bit string")));
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod > 0 && bitlen != atttypmod)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH),
- errmsg("bit string length %d does not match type bit(%d)",
- bitlen, atttypmod)));
+ errmsg("bit string length %d does not match type bit(%d)",
+ bitlen, atttypmod)));
len = VARBITTOTALLEN(bitlen);
result = (VarBit *) palloc(len);
if (!isExplicit)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH),
- errmsg("bit string length %d does not match type bit(%d)",
- VARBITLEN(arg), len)));
+ errmsg("bit string length %d does not match type bit(%d)",
+ VARBITLEN(arg), len)));
rlen = VARBITTOTALLEN(len);
/* set to 0 so that string is zero-padded */
Min(VARBITBYTES(result), VARBITBYTES(arg)));
/*
- * Make sure last byte is zero-padded if needed. This is useless but
- * safe if source data was shorter than target length (we assume the
- * last byte of the source data was itself correctly zero-padded).
+ * Make sure last byte is zero-padded if needed. This is useless but safe
+ * if source data was shorter than target length (we assume the last byte
+ * of the source data was itself correctly zero-padded).
*/
ipad = VARBITPAD(result);
if (ipad > 0)
bitlen = slen * 4;
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod <= 0)
atttypmod = bitlen;
varbit_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
errmsg("invalid length in external bit string")));
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod > 0 && bitlen > atttypmod)
ereport(ERROR,
else
{
/*
- * OK, we've got a true substring starting at position s1-1 and
- * ending at position e1-1
+ * OK, we've got a true substring starting at position s1-1 and ending
+ * at position e1-1
*/
rbitlen = e1 - s1;
len = VARBITTOTALLEN(rbitlen);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.112 2005/07/29 12:59:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.113 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
maxlen = len;
else
{
- size_t charlen; /* number of CHARACTERS in the input */
+ size_t charlen; /* number of CHARACTERS in the input */
maxlen = atttypmod - VARHDRSZ;
charlen = pg_mbstrlen_with_len(s, len);
}
/*
- * Now we set maxlen to the necessary byte length, not
- * the number of CHARACTERS!
+ * Now we set maxlen to the necessary byte length, not the number
+ * of CHARACTERS!
*/
maxlen = len = mbmaxlen;
}
else
{
/*
- * Now we set maxlen to the necessary byte length, not
- * the number of CHARACTERS!
+ * Now we set maxlen to the necessary byte length, not the number
+ * of CHARACTERS!
*/
maxlen = len + (maxlen - charlen);
}
bpcharin(PG_FUNCTION_ARGS)
{
char *s = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
bpcharrecv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
char *r;
char *s;
int i;
- int charlen; /* number of characters in the input string
- * + VARHDRSZ */
+ int charlen; /* number of characters in the input string +
+ * VARHDRSZ */
/* No work if typmod is invalid */
if (maxlen < (int32) VARHDRSZ)
for (i = maxmblen - VARHDRSZ; i < len - VARHDRSZ; i++)
if (*(VARDATA(source) + i) != ' ')
ereport(ERROR,
- (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character(%d)",
- maxlen - VARHDRSZ)));
+ (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
+ errmsg("value too long for type character(%d)",
+ maxlen - VARHDRSZ)));
}
len = maxmblen;
/*
- * XXX: at this point, maxlen is the necessary byte
- * length+VARHDRSZ, not the number of CHARACTERS!
+ * XXX: at this point, maxlen is the necessary byte length+VARHDRSZ,
+ * not the number of CHARACTERS!
*/
maxlen = len;
}
else
{
/*
- * XXX: at this point, maxlen is the necessary byte
- * length+VARHDRSZ, not the number of CHARACTERS!
+ * XXX: at this point, maxlen is the necessary byte length+VARHDRSZ,
+ * not the number of CHARACTERS!
*/
maxlen = len + (maxlen - charlen);
}
if (s[j] != ' ')
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character varying(%d)",
- (int) maxlen)));
+ errmsg("value too long for type character varying(%d)",
+ (int) maxlen)));
}
len = mbmaxlen;
varcharin(PG_FUNCTION_ARGS)
{
char *s = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
varcharrecv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
int32 atttypmod = PG_GETARG_INT32(2);
- VarChar *result;
+ VarChar *result;
char *str;
int nbytes;
if (*(VARDATA(source) + i) != ' ')
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character varying(%d)",
- maxlen - VARHDRSZ)));
+ errmsg("value too long for type character varying(%d)",
+ maxlen - VARHDRSZ)));
}
len = maxmblen + VARHDRSZ;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.135 2005/09/24 17:53:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.136 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
else
{
/*
- * We should never get here. The first pass should not allow
- * it.
+ * We should never get here. The first pass should not allow it.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
{
S1 = Max(S, 1);
- if (length_not_specified) /* special case - get length to
- * end of string */
+ if (length_not_specified) /* special case - get length to end of
+ * string */
L1 = -1;
else
{
int E = S + length;
/*
- * A negative value for L is the only way for the end position
- * to be before the start. SQL99 says to throw an error.
+ * A negative value for L is the only way for the end position to
+ * be before the start. SQL99 says to throw an error.
*/
if (E < S)
ereport(ERROR,
(errcode(ERRCODE_SUBSTRING_ERROR),
- errmsg("negative substring length not allowed")));
+ errmsg("negative substring length not allowed")));
/*
- * A zero or negative value for the end position can happen if
- * the start was negative or one. SQL99 says to return a
- * zero-length string.
+ * A zero or negative value for the end position can happen if the
+ * start was negative or one. SQL99 says to return a zero-length
+ * string.
*/
if (E < 1)
return PG_STR_GET_TEXT("");
}
/*
- * If the start position is past the end of the string, SQL99 says
- * to return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will
- * do that for us. Convert to zero-based starting position
+ * If the start position is past the end of the string, SQL99 says to
+ * return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will do
+ * that for us. Convert to zero-based starting position
*/
return DatumGetTextPSlice(str, S1 - 1, L1);
}
{
/*
* When encoding max length is > 1, we can't get LC without
- * detoasting, so we'll grab a conservatively large slice now and
- * go back later to do the right thing
+ * detoasting, so we'll grab a conservatively large slice now and go
+ * back later to do the right thing
*/
int32 slice_start;
int32 slice_size;
text *ret;
/*
- * if S is past the end of the string, the tuple toaster will
- * return a zero-length string to us
+ * if S is past the end of the string, the tuple toaster will return a
+ * zero-length string to us
*/
S1 = Max(S, 1);
/*
- * We need to start at position zero because there is no way to
- * know in advance which byte offset corresponds to the supplied
- * start position.
+ * We need to start at position zero because there is no way to know
+ * in advance which byte offset corresponds to the supplied start
+ * position.
*/
slice_start = 0;
- if (length_not_specified) /* special case - get length to
- * end of string */
+ if (length_not_specified) /* special case - get length to end of
+ * string */
slice_size = L1 = -1;
else
{
int E = S + length;
/*
- * A negative value for L is the only way for the end position
- * to be before the start. SQL99 says to throw an error.
+ * A negative value for L is the only way for the end position to
+ * be before the start. SQL99 says to throw an error.
*/
if (E < S)
ereport(ERROR,
(errcode(ERRCODE_SUBSTRING_ERROR),
- errmsg("negative substring length not allowed")));
+ errmsg("negative substring length not allowed")));
/*
- * A zero or negative value for the end position can happen if
- * the start was negative or one. SQL99 says to return a
- * zero-length string.
+ * A zero or negative value for the end position can happen if the
+ * start was negative or one. SQL99 says to return a zero-length
+ * string.
*/
if (E < 1)
return PG_STR_GET_TEXT("");
L1 = E - S1;
/*
- * Total slice size in bytes can't be any longer than the
- * start position plus substring length times the encoding max
- * length.
+ * Total slice size in bytes can't be any longer than the start
+ * position plus substring length times the encoding max length.
*/
slice_size = (S1 + L1) * eml;
}
slice_strlen = pg_mbstrlen_with_len(VARDATA(slice), VARSIZE(slice) - VARHDRSZ);
/*
- * Check that the start position wasn't > slice_strlen. If so,
- * SQL99 says to return a zero-length string.
+ * Check that the start position wasn't > slice_strlen. If so, SQL99
+ * says to return a zero-length string.
*/
if (S1 > slice_strlen)
return PG_STR_GET_TEXT("");
/*
- * Adjust L1 and E1 now that we know the slice string length.
- * Again remember that S1 is one based, and slice_start is zero
- * based.
+ * Adjust L1 and E1 now that we know the slice string length. Again
+ * remember that S1 is one based, and slice_start is zero based.
*/
if (L1 > -1)
E1 = Min(S1 + L1, slice_start + 1 + slice_strlen);
E1 = slice_start + 1 + slice_strlen;
/*
- * Find the start position in the slice; remember S1 is not zero
- * based
+ * Find the start position in the slice; remember S1 is not zero based
*/
p = VARDATA(slice);
for (i = 0; i < S1 - 1; i++)
int result;
/*
- * Unfortunately, there is no strncoll(), so in the non-C locale case
- * we have to do some memory copying. This turns out to be
- * significantly slower, so we optimize the case where LC_COLLATE is
- * C. We also try to optimize relatively-short strings by avoiding
- * palloc/pfree overhead.
+ * Unfortunately, there is no strncoll(), so in the non-C locale case we
+ * have to do some memory copying. This turns out to be significantly
+ * slower, so we optimize the case where LC_COLLATE is C. We also try to
+ * optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
if (lc_collate_is_c())
{
/* Win32 does not have UTF-8, so we need to map to UTF-16 */
if (GetDatabaseEncoding() == PG_UTF8)
{
- int a1len;
- int a2len;
- int r;
+ int a1len;
+ int a2len;
+ int r;
- if (len1 >= STACKBUFLEN/2)
+ if (len1 >= STACKBUFLEN / 2)
{
a1len = len1 * 2 + 2;
a1p = palloc(a1len);
a1len = STACKBUFLEN;
a1p = a1buf;
}
- if (len2 >= STACKBUFLEN/2)
+ if (len2 >= STACKBUFLEN / 2)
{
a2len = len2 * 2 + 2;
a2p = palloc(a2len);
else
{
r = MultiByteToWideChar(CP_UTF8, 0, arg1, len1,
- (LPWSTR) a1p, a1len/2);
+ (LPWSTR) a1p, a1len / 2);
if (!r)
ereport(ERROR,
(errmsg("could not convert string to UTF16: %lu",
else
{
r = MultiByteToWideChar(CP_UTF8, 0, arg2, len2,
- (LPWSTR) a2p, a2len/2);
+ (LPWSTR) a2p, a2len / 2);
if (!r)
ereport(ERROR,
(errmsg("could not convert string to UTF16: %lu",
errno = 0;
result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p);
- if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */
+ if (result == 2147483647) /* _NLSCMPERROR; missing from mingw
+ * headers */
ereport(ERROR,
(errmsg("could not compare unicode strings: %d",
errno)));
return result;
}
-#endif /* WIN32 */
+#endif /* WIN32 */
if (len1 >= STACKBUFLEN)
a1p = (char *) palloc(len1 + 1);
if (fcinfo->nargs == 2)
{
/*
- * Not passed a length - PG_GETARG_BYTEA_P_SLICE() grabs
- * everything to the end of the string if we pass it a negative
- * value for length.
+ * Not passed a length - PG_GETARG_BYTEA_P_SLICE() grabs everything to
+ * the end of the string if we pass it a negative value for length.
*/
L1 = -1;
}
int E = S + PG_GETARG_INT32(2);
/*
- * A negative value for L is the only way for the end position to
- * be before the start. SQL99 says to throw an error.
+ * A negative value for L is the only way for the end position to be
+ * before the start. SQL99 says to throw an error.
*/
if (E < S)
ereport(ERROR,
/*
* If the start position is past the end of the string, SQL99 says to
- * return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will do
- * that for us. Convert to zero-based starting position
+ * return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will do that
+ * for us. Convert to zero-based starting position
*/
PG_RETURN_BYTEA_P(PG_GETARG_BYTEA_P_SLICE(0, S1 - 1, L1));
}
/* Convert to C string (handles possible detoasting). */
/* Note we rely on being able to modify rawname below. */
rawname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(textval)));
+ PointerGetDatum(textval)));
if (!SplitIdentifierString(rawname, '.', &namelist))
ereport(ERROR,
return false; /* empty unquoted name not allowed */
/*
- * Downcase the identifier, using same code as main lexer
- * does.
+ * Downcase the identifier, using same code as main lexer does.
*
* XXX because we want to overwrite the input in-place, we cannot
- * support a downcasing transformation that increases the
- * string length. This is not a problem given the current
- * implementation of downcase_truncate_identifier, but we'll
- * probably have to do something about this someday.
+ * support a downcasing transformation that increases the string
+ * length. This is not a problem given the current implementation
+ * of downcase_truncate_identifier, but we'll probably have to do
+ * something about this someday.
*/
len = endp - curname;
downname = downcase_truncate_identifier(curname, len, false);
if (pg_database_encoding_max_length() == 1)
{
for (; p < p_end; p++)
- if (*p == '\\') return true;
+ if (*p == '\\')
+ return true;
}
else
{
for (; p < p_end; p += pg_mblen(p))
- if (*p == '\\') return true;
+ if (*p == '\\')
+ return true;
}
return false;
*/
static void
appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
- regmatch_t *pmatch, text *src_text)
+ regmatch_t *pmatch, text *src_text)
{
const char *p = VARDATA(replace_text);
const char *p_end = p + (VARSIZE(replace_text) - VARHDRSZ);
}
/*
- * Copy the text when there is a text in the left of escape char
- * or escape char is not found.
+ * Copy the text when there is a text in the left of escape char or
+ * escape char is not found.
*/
if (ch_cnt)
{
- text *append_text = text_substring(PointerGetDatum(replace_text),
- substr_start, ch_cnt, false);
+ text *append_text = text_substring(PointerGetDatum(replace_text),
+ substr_start, ch_cnt, false);
+
appendStringInfoText(str, append_text);
pfree(append_text);
}
substr_start += ch_cnt + 1;
- if (p >= p_end) /* When escape char is not found. */
+ if (p >= p_end) /* When escape char is not found. */
break;
/* See the next character of escape char. */
if (*p >= '1' && *p <= '9')
{
/* Use the back reference of regexp. */
- int idx = *p - '0';
+ int idx = *p - '0';
+
so = pmatch[idx].rm_so;
eo = pmatch[idx].rm_eo;
p++;
if (so != -1 && eo != -1)
{
/* Copy the text that is back reference of regexp. */
- text *append_text = text_substring(PointerGetDatum(src_text),
- so + 1, (eo - so), false);
+ text *append_text = text_substring(PointerGetDatum(src_text),
+ so + 1, (eo - so), false);
+
appendStringInfoText(str, append_text);
pfree(append_text);
}
text *ret_text;
text *src_text = PG_GETARG_TEXT_P(0);
int src_text_len = VARSIZE(src_text) - VARHDRSZ;
- regex_t *re = (regex_t *)PG_GETARG_POINTER(1);
+ regex_t *re = (regex_t *) PG_GETARG_POINTER(1);
text *replace_text = PG_GETARG_TEXT_P(2);
- bool global = PG_GETARG_BOOL(3);
+ bool global = PG_GETARG_BOOL(3);
StringInfo str = makeStringInfo();
int regexec_result;
regmatch_t pmatch[REGEXP_REPLACE_BACKREF_CNT];
data,
data_len,
search_start,
- NULL, /* no details */
+ NULL, /* no details */
REGEXP_REPLACE_BACKREF_CNT,
pmatch,
0);
if (regexec_result != REG_OKAY && regexec_result != REG_NOMATCH)
{
- char errMsg[100];
+ char errMsg[100];
/* re failed??? */
pg_regerror(regexec_result, re, errMsg, sizeof(errMsg));
ereport(ERROR,
- (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
- errmsg("regular expression failed: %s", errMsg)));
+ (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
+ errmsg("regular expression failed: %s", errMsg)));
}
if (regexec_result == REG_NOMATCH)
break;
- /*
- * Copy the text when there is a text in the left of matched position.
- */
+ /*
+ * Copy the text when there is a text in the left of matched position.
+ */
if (pmatch[0].rm_so - data_pos > 0)
{
- text *left_text = text_substring(PointerGetDatum(src_text),
- data_pos + 1,
- pmatch[0].rm_so - data_pos, false);
+ text *left_text = text_substring(PointerGetDatum(src_text),
+ data_pos + 1,
+ pmatch[0].rm_so - data_pos, false);
+
appendStringInfoText(str, left_text);
pfree(left_text);
}
}
/*
- * Copy the text when there is a text at the right of last matched
- * or regexp is not matched.
+ * Copy the text when there is a text at the right of last matched or
+ * regexp is not matched.
*/
if (data_pos < data_len)
{
- text *right_text = text_substring(PointerGetDatum(src_text),
- data_pos + 1, -1, true);
+ text *right_text = text_substring(PointerGetDatum(src_text),
+ data_pos + 1, -1, true);
+
appendStringInfoText(str, right_text);
pfree(right_text);
}
*/
if (fldsep_len < 1)
PG_RETURN_ARRAYTYPE_P(create_singleton_array(fcinfo, TEXTOID,
- CStringGetDatum(inputstring), 1));
+ CStringGetDatum(inputstring), 1));
/* start with end position holding the initial start position */
end_posn = 0;
if (fldnum == 1)
{
/*
- * first element return one element, 1D, array using the
- * input string
+ * first element return one element, 1D, array using the input
+ * string
*/
PG_RETURN_ARRAYTYPE_P(create_singleton_array(fcinfo, TEXTOID,
- CStringGetDatum(inputstring), 1));
+ CStringGetDatum(inputstring), 1));
}
else
{
/* otherwise create array and exit */
PG_RETURN_ARRAYTYPE_P(makeArrayResult(astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
}
}
else if (start_posn == 0)
/* interior field requested */
result_text = text_substring(PointerGetDatum(inputstring),
start_posn + fldsep_len,
- end_posn - start_posn - fldsep_len,
+ end_posn - start_posn - fldsep_len,
false);
}
/*
* We arrange to look up info about element type, including its output
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
if (my_extra->element_type != element_type)
{
/*
- * Get info about element type, including its output conversion
- * proc
+ * Get info about element type, including its output conversion proc
*/
get_type_io_data(element_type, IOFunc_output,
&my_extra->typlen, &my_extra->typbyval,
{
text *in_text = PG_GETARG_TEXT_P(0);
size_t len;
- char hexsum[MD5_HASH_LEN + 1];
+ char hexsum[MD5_HASH_LEN + 1];
text *result_text;
/* Calculate the length of the buffer using varlena metadata */
if (fcinfo->flinfo->fn_extra == NULL)
{
/* Lookup the datatype of the supplied argument */
- Oid argtypeid = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ Oid argtypeid = get_fn_expr_argtype(fcinfo->flinfo, 0);
typlen = get_typlen(argtypeid);
if (typlen == 0) /* should not happen */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.124 2005/09/24 22:54:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.125 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "utils/syscache.h"
-/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
+ /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
/*
* Constants related to size of the catcache.
case 4:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
- cur_skey[3].sk_argument)) << 9;
+ cur_skey[3].sk_argument)) << 9;
/* FALLTHROUGH */
case 3:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
- cur_skey[2].sk_argument)) << 6;
+ cur_skey[2].sk_argument)) << 6;
/* FALLTHROUGH */
case 2:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
- cur_skey[1].sk_argument)) << 3;
+ cur_skey[1].sk_argument)) << 3;
/* FALLTHROUGH */
case 1:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
- cur_skey[0].sk_argument));
+ cur_skey[0].sk_argument));
break;
default:
elog(FATAL, "wrong number of hash keys: %d", nkeys);
/*
* We don't bother to check whether the cache has finished
- * initialization yet; if not, there will be no entries in it so
- * no problem.
+ * initialization yet; if not, there will be no entries in it so no
+ * problem.
*/
/*
CreateCacheMemoryContext(void)
{
/*
- * Purely for paranoia, check that context doesn't exist; caller
- * probably did so already.
+ * Purely for paranoia, check that context doesn't exist; caller probably
+ * did so already.
*/
if (!CacheMemoryContext)
CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
"CacheMemoryContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
cp->cc_reloid, cp->cc_indexoid, cp->id, \
cp->cc_nkeys, cp->cc_nbuckets); \
} while(0)
-
#else
#define InitCatCache_DEBUG2
#endif
int i;
/*
- * first switch to the cache context so our allocations do not vanish
- * at the end of a transaction
+ * first switch to the cache context so our allocations do not vanish at
+ * the end of a transaction
*/
if (!CacheMemoryContext)
CreateCacheMemoryContext();
i+1, cache->cc_nkeys, cache->cc_key[i]); \
} \
} while(0)
-
#else
#define CatalogCacheInitializeCache_DEBUG1
#define CatalogCacheInitializeCache_DEBUG2
CatalogCacheInitializeCache_DEBUG1;
/*
- * Open the relation without locking --- we only need the tupdesc,
- * which we assume will never change ...
+ * Open the relation without locking --- we only need the tupdesc, which
+ * we assume will never change ...
*/
relation = heap_open(cache->cc_reloid, NoLock);
Assert(RelationIsValid(relation));
/*
- * switch to the cache context so our allocations do not vanish at the
- * end of a transaction
+ * switch to the cache context so our allocations do not vanish at the end
+ * of a transaction
*/
Assert(CacheMemoryContext != NULL);
tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
/*
- * save the relation's name and relisshared flag, too (cc_relname
- * is used only for debugging purposes)
+ * save the relation's name and relisshared flag, too (cc_relname is used
+ * only for debugging purposes)
*/
cache->cc_relname = pstrdup(RelationGetRelationName(relation));
cache->cc_relisshared = RelationGetForm(relation)->relisshared;
cache->cc_isname[i] = (keytype == NAMEOID);
/*
- * Do equality-function lookup (we assume this won't need a
- * catalog lookup for any supported type)
+ * Do equality-function lookup (we assume this won't need a catalog
+ * lookup for any supported type)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
if (cache->id == INDEXRELID)
{
/*
- * Since the OIDs of indexes aren't hardwired, it's painful to
- * figure out which is which. Just force all pg_index searches to
- * be heap scans while building the relcaches.
+ * Since the OIDs of indexes aren't hardwired, it's painful to figure
+ * out which is which. Just force all pg_index searches to be heap
+ * scans while building the relcaches.
*/
if (!criticalRelcachesBuilt)
return false;
cache->id == AMNAME)
{
/*
- * Always do heap scans in pg_am, because it's so small there's
- * not much point in an indexscan anyway. We *must* do this when
- * initially building critical relcache entries, but we might as
- * well just always do it.
+ * Always do heap scans in pg_am, because it's so small there's not
+ * much point in an indexscan anyway. We *must* do this when
+ * initially building critical relcache entries, but we might as well
+ * just always do it.
*/
return false;
}
continue;
/*
- * we found a match in the cache: move it to the front of the
- * global LRU list. We also move it to the front of the list for
- * its hashbucket, in order to speed subsequent searches. (The
- * most frequently accessed elements in any hashbucket will tend
- * to be near the front of the hashbucket's list.)
+ * we found a match in the cache: move it to the front of the global
+ * LRU list. We also move it to the front of the list for its
+ * hashbucket, in order to speed subsequent searches. (The most
+ * frequently accessed elements in any hashbucket will tend to be near
+ * the front of the hashbucket's list.)
*/
DLMoveToFront(&ct->lrulist_elem);
DLMoveToFront(&ct->cache_elem);
/*
- * If it's a positive entry, bump its refcount and return it. If
- * it's negative, we can report failure to the caller.
+ * If it's a positive entry, bump its refcount and return it. If it's
+ * negative, we can report failure to the caller.
*/
if (!ct->negative)
{
}
/*
- * Tuple was not found in cache, so we have to try to retrieve it
- * directly from the relation. If found, we will add it to the cache;
- * if not found, we will add a negative cache entry instead.
+ * Tuple was not found in cache, so we have to try to retrieve it directly
+ * from the relation. If found, we will add it to the cache; if not
+ * found, we will add a negative cache entry instead.
*
- * NOTE: it is possible for recursive cache lookups to occur while
- * reading the relation --- for example, due to shared-cache-inval
- * messages being processed during heap_open(). This is OK. It's
- * even possible for one of those lookups to find and enter the very
- * same tuple we are trying to fetch here. If that happens, we will
- * enter a second copy of the tuple into the cache. The first copy
- * will never be referenced again, and will eventually age out of the
- * cache, so there's no functional problem. This case is rare enough
- * that it's not worth expending extra cycles to detect.
+ * NOTE: it is possible for recursive cache lookups to occur while reading
+ * the relation --- for example, due to shared-cache-inval messages being
+ * processed during heap_open(). This is OK. It's even possible for one
+ * of those lookups to find and enter the very same tuple we are trying to
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
+ * will eventually age out of the cache, so there's no functional problem.
+ * This case is rare enough that it's not worth expending extra cycles to
+ * detect.
*/
relation = heap_open(cache->cc_reloid, AccessShareLock);
/*
* If tuple was not found, we need to build a negative cache entry
- * containing a fake tuple. The fake tuple has the correct key
- * columns, but nulls everywhere else.
+ * containing a fake tuple. The fake tuple has the correct key columns,
+ * but nulls everywhere else.
*
- * In bootstrap mode, we don't build negative entries, because the
- * cache invalidation mechanism isn't alive and can't clear them
- * if the tuple gets created later. (Bootstrap doesn't do UPDATEs,
- * so it doesn't need cache inval for that.)
+ * In bootstrap mode, we don't build negative entries, because the cache
+ * invalidation mechanism isn't alive and can't clear them if the tuple
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * cache inval for that.)
*/
if (ct == NULL)
{
cache->cc_relname, hashIndex);
/*
- * We are not returning the negative entry to the caller, so leave
- * its refcount zero.
+ * We are not returning the negative entry to the caller, so leave its
+ * refcount zero.
*/
return NULL;
Dlelem *elt;
CatCList *cl;
CatCTup *ct;
- List * volatile ctlist;
+ List *volatile ctlist;
ListCell *ctlist_item;
int nmembers;
bool ordered;
/*
* compute a hash value of the given keys for faster search. We don't
- * presently divide the CatCList items into buckets, but this still
- * lets us skip non-matching items quickly most of the time.
+ * presently divide the CatCList items into buckets, but this still lets
+ * us skip non-matching items quickly most of the time.
*/
lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
/*
* We found a matching list: mark it as touched since the last
- * CatalogCacheCleanup() sweep. Also move the list to the front
- * of the cache's list-of-lists, to speed subsequent searches.
- * (We do not move the members to the fronts of their hashbucket
- * lists, however, since there's no point in that unless they are
- * searched for individually.)
+ * CatalogCacheCleanup() sweep. Also move the list to the front of
+ * the cache's list-of-lists, to speed subsequent searches. (We do not
+ * move the members to the fronts of their hashbucket lists, however,
+ * since there's no point in that unless they are searched for
+ * individually.)
*/
cl->touched = true;
DLMoveToFront(&cl->cache_elem);
* relation. For each matching tuple found in the relation, use an
* existing cache entry if possible, else build a new one.
*
- * We have to bump the member refcounts temporarily to ensure they
- * won't get dropped from the cache while loading other members.
- * We use a PG_TRY block to ensure we can undo those refcounts if
- * we get an error before we finish constructing the CatCList.
+ * We have to bump the member refcounts temporarily to ensure they won't get
+ * dropped from the cache while loading other members. We use a PG_TRY
+ * block to ensure we can undo those refcounts if we get an error before
+ * we finish constructing the CatCList.
*/
ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
ct = (CatCTup *) DLE_VAL(elt);
if (ct->dead || ct->negative)
- continue; /* ignore dead and negative entries */
+ continue; /* ignore dead and negative entries */
if (ct->hash_value != hashValue)
- continue; /* quickly skip entry if wrong hash val */
+ continue; /* quickly skip entry if wrong hash val */
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
- continue; /* not same tuple */
+ continue; /* not same tuple */
/*
* Found a match, but can't use it if it belongs to another
heap_freetuple(ntp);
/*
- * We are now past the last thing that could trigger an elog before
- * we have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * We are now past the last thing that could trigger an elog before we
+ * have finished building the CatCList and remembering it in the
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
MemoryContext oldcxt;
/*
- * Allocate CatCTup header in cache memory, and copy the tuple there
- * too.
+ * Allocate CatCTup header in cache memory, and copy the tuple there too.
*/
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
CacheHdr->ch_ntup++;
/*
- * If we've exceeded the desired size of the caches, try to throw away
- * the least recently used entry(s). NB: be careful not to throw away
- * the newly-built entry...
+ * If we've exceeded the desired size of the caches, try to throw away the
+ * least recently used entry(s). NB: be careful not to throw away the
+ * newly-built entry...
*/
if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
CatalogCacheCleanup(ct);
*prevelt;
/*
- * Each time we have to do this, try to cut the cache size down to
- * about 90% of the maximum.
+ * Each time we have to do this, try to cut the cache size down to about
+ * 90% of the maximum.
*/
tup_target = (CacheHdr->ch_maxtup * 9) / 10;
/*
- * Our strategy for managing CatCLists is that, each time we have to
- * throw away some cache entries, we first move-to-front all the members
- * of CatCLists that have been touched since the last cleanup sweep.
- * Then we do strict LRU elimination by individual tuples, zapping a list
- * if any of its members gets zapped. Before PostgreSQL 8.1, we moved
- * members to front each time their owning list was touched, which was
- * arguably more fair in balancing list members against standalone tuples
- * --- but the overhead for large lists was horrendous. This scheme is
- * more heavily biased towards preserving lists, but that is not
- * necessarily bad either.
+ * Our strategy for managing CatCLists is that, each time we have to throw
+ * away some cache entries, we first move-to-front all the members of
+ * CatCLists that have been touched since the last cleanup sweep. Then we
+ * do strict LRU elimination by individual tuples, zapping a list if any
+ * of its members gets zapped. Before PostgreSQL 8.1, we moved members to
+ * front each time their owning list was touched, which was arguably more
+ * fair in balancing list members against standalone tuples --- but the
+ * overhead for large lists was horrendous. This scheme is more heavily
+ * biased towards preserving lists, but that is not necessarily bad
+ * either.
*/
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
{
Assert(cl->cl_magic == CL_MAGIC);
if (cl->touched && !cl->dead)
{
- int i;
+ int i;
for (i = 0; i < cl->n_members; i++)
DLMoveToFront(&cl->members[i]->lrulist_elem);
if (attindex > 0)
{
/*
- * Here we must be careful in case the caller passed a C
- * string where a NAME is wanted: convert the given argument
- * to a correctly padded NAME. Otherwise the memcpy() done in
+ * Here we must be careful in case the caller passed a C string
+ * where a NAME is wanted: convert the given argument to a
+ * correctly padded NAME. Otherwise the memcpy() done in
* heap_formtuple could fall off the end of memory.
*/
if (cache->cc_isname[i])
void
PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
- void (*function) (int, uint32, ItemPointer, Oid))
+ void (*function) (int, uint32, ItemPointer, Oid))
{
CatCache *ccp;
Oid reloid;
*
* Also, whenever we see an operation on a pg_class or pg_attribute tuple,
* we register a relcache flush operation for the relation described by that
- * tuple. pg_class updates trigger an smgr flush operation as well.
+ * tuple. pg_class updates trigger an smgr flush operation as well.
*
* We keep the relcache and smgr flush requests in lists separate from the
- * catcache tuple flush requests. This allows us to issue all the pending
+ * catcache tuple flush requests. This allows us to issue all the pending
* catcache flushes before we issue relcache flushes, which saves us from
* loading a catcache tuple during relcache load only to flush it again
* right away. Also, we avoid queuing multiple relcache flush requests for
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.72 2005/06/17 22:32:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.73 2005/10/15 02:49:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
struct TransInvalidationInfo *parent;
/* Subtransaction nesting depth */
- int my_level;
+ int my_level;
/* head of current-command event list */
InvalidationListHeader CurrentCmdInvalidMsgs;
static int cache_callback_count = 0;
/* info values for 2PC callback */
-#define TWOPHASE_INFO_MSG 0 /* SharedInvalidationMessage */
-#define TWOPHASE_INFO_FILE_BEFORE 1 /* relcache file inval */
-#define TWOPHASE_INFO_FILE_AFTER 2 /* relcache file inval */
+#define TWOPHASE_INFO_MSG 0 /* SharedInvalidationMessage */
+#define TWOPHASE_INFO_FILE_BEFORE 1 /* relcache file inval */
+#define TWOPHASE_INFO_FILE_AFTER 2 /* relcache file inval */
static void PersistInvalidationMessage(SharedInvalidationMessage *msg);
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
sizeof(InvalidationChunk) +
- (FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage));
+ (FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = FIRSTCHUNKSIZE;
chunk->next = *listHdr;
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
sizeof(InvalidationChunk) +
- (chunksize - 1) *sizeof(SharedInvalidationMessage));
+ (chunksize - 1) *sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = chunksize;
chunk->next = *listHdr;
ProcessMessageList(hdr->rclist,
if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
msg->rc.relId == relId)
- return);
+ return);
/* OK, add the item */
msg.rc.id = SHAREDINVALRELCACHE_ID;
ProcessMessageList(hdr->rclist,
if (msg->sm.id == SHAREDINVALSMGR_ID &&
RelFileNodeEquals(msg->sm.rnode, rnode))
- return);
+ return);
/* OK, add the item */
msg.sm.id = SHAREDINVALSMGR_ID;
else if (msg->id == SHAREDINVALSMGR_ID)
{
/*
- * We could have smgr entries for relations of other databases,
- * so no short-circuit test is possible here.
+ * We could have smgr entries for relations of other databases, so no
+ * short-circuit test is possible here.
*/
smgrclosenode(msg->sm.rnode);
}
return;
/*
- * We only need to worry about invalidation for tuples that are in
- * system relations; user-relation tuples are never in catcaches and
- * can't affect the relcache either.
+ * We only need to worry about invalidation for tuples that are in system
+ * relations; user-relation tuples are never in catcaches and can't affect
+ * the relcache either.
*/
if (!IsSystemRelation(relation))
return;
/*
- * TOAST tuples can likewise be ignored here. Note that TOAST tables
- * are considered system relations so they are not filtered by the
- * above test.
+ * TOAST tuples can likewise be ignored here. Note that TOAST tables are
+ * considered system relations so they are not filtered by the above test.
*/
if (IsToastRelation(relation))
return;
databaseId = MyDatabaseId;
/*
- * We need to send out an smgr inval as well as a relcache inval.
- * This is needed because other backends might possibly possess
- * smgr cache but not relcache entries for the target relation.
+ * We need to send out an smgr inval as well as a relcache inval. This
+ * is needed because other backends might possibly possess smgr cache
+ * but not relcache entries for the target relation.
*
- * Note: during a pg_class row update that assigns a new
- * relfilenode or reltablespace value, we will be called on both
- * the old and new tuples, and thus will broadcast invalidation
- * messages showing both the old and new RelFileNode values. This
- * ensures that other backends will close smgr references to the
- * old file.
+ * Note: during a pg_class row update that assigns a new relfilenode or
+ * reltablespace value, we will be called on both the old and new
+ * tuples, and thus will broadcast invalidation messages showing both
+ * the old and new RelFileNode values. This ensures that other
+ * backends will close smgr references to the old file.
*
* XXX possible future cleanup: it might be better to trigger smgr
* flushes explicitly, rather than indirectly from pg_class updates.
relationId = atttup->attrelid;
/*
- * KLUGE ALERT: we always send the relcache event with
- * MyDatabaseId, even if the rel in question is shared (which we
- * can't easily tell). This essentially means that only backends
- * in this same database will react to the relcache flush request.
- * This is in fact appropriate, since only those backends could
- * see our pg_attribute change anyway. It looks a bit ugly
- * though.
+ * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
+ * even if the rel in question is shared (which we can't easily tell).
+ * This essentially means that only backends in this same database
+ * will react to the relcache flush request. This is in fact
+ * appropriate, since only those backends could see our pg_attribute
+ * change anyway. It looks a bit ugly though.
*/
databaseId = MyDatabaseId;
}
/*
* AtPrepare_Inval
- * Save the inval lists state at 2PC transaction prepare.
+ * Save the inval lists state at 2PC transaction prepare.
*
* In this phase we just generate 2PC records for all the pending invalidation
* work.
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
/*
- * Relcache init file invalidation requires processing both before
- * and after we send the SI messages.
+ * Relcache init file invalidation requires processing both before and
+ * after we send the SI messages.
*/
if (transInvalInfo->RelcacheInitFileInval)
RegisterTwoPhaseRecord(TWOPHASE_RM_INVAL_ID, TWOPHASE_INFO_FILE_BEFORE,
/*
* PostPrepare_Inval
- * Clean up after successful PREPARE.
+ * Clean up after successful PREPARE.
*
* Here, we want to act as though the transaction aborted, so that we will
* undo any syscache changes it made, thereby bringing us into sync with the
/*
* PersistInvalidationMessage
- * Write an invalidation message to the 2PC state file.
+ * Write an invalidation message to the 2PC state file.
*/
static void
PersistInvalidationMessage(SharedInvalidationMessage *msg)
switch (info)
{
case TWOPHASE_INFO_MSG:
- msg = (SharedInvalidationMessage *) recdata;
+ msg = (SharedInvalidationMessage *) recdata;
Assert(len == sizeof(SharedInvalidationMessage));
SendSharedInvalidMessage(msg);
break;
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
/*
- * Relcache init file invalidation requires processing both before
- * and after we send the SI messages. However, we need not do
- * anything unless we committed.
+ * Relcache init file invalidation requires processing both before and
+ * after we send the SI messages. However, we need not do anything
+ * unless we committed.
*/
if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFileInvalidate(true);
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
- &transInvalInfo->CurrentCmdInvalidMsgs);
+ &transInvalInfo->CurrentCmdInvalidMsgs);
ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
SendSharedInvalidMessage);
CommandEndInvalidationMessages(void)
{
/*
- * You might think this shouldn't be called outside any transaction,
- * but bootstrap does it, and also ABORT issued when not in a
- * transaction. So just quietly return if no state to work on.
+ * You might think this shouldn't be called outside any transaction, but
+ * bootstrap does it, and also ABORT issued when not in a transaction. So
+ * just quietly return if no state to work on.
*/
if (transInvalInfo == NULL)
return;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.128 2005/10/11 17:27:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.129 2005/10/15 02:49:31 momjian Exp $
*
* NOTES
* Eventually, the index information should go through here, too.
Oid opclass = InvalidOid;
/*
- * Search pg_amop to see if the target operator is registered as the
- * "=" operator of any hash opclass. If the operator is registered in
- * multiple opclasses, assume we can use the associated hash function
- * from any one.
+ * Search pg_amop to see if the target operator is registered as the "="
+ * operator of any hash opclass. If the operator is registered in
+ * multiple opclasses, assume we can use the associated hash function from
+ * any one.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(opno),
Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
/*
- * Array types get their typelem as parameter; everybody else gets
- * their own type OID as parameter. (This is a change from 8.0,
- * in which only composite types got their own OID as parameter.)
+ * Array types get their typelem as parameter; everybody else gets their
+ * own type OID as parameter. (This is a change from 8.0, in which only
+ * composite types got their own OID as parameter.)
*/
if (OidIsValid(typeStruct->typelem))
return typeStruct->typelem;
/* Convert C string to a value of the given type */
datum = OidFunctionCall3(type->typinput,
CStringGetDatum(strDefaultVal),
- ObjectIdGetDatum(getTypeIOParam(typeTuple)),
+ ObjectIdGetDatum(getTypeIOParam(typeTuple)),
Int32GetDatum(-1));
/* Build a Const node containing the value */
expr = (Node *) makeConst(typid,
{
/*
* For BPCHAR, the max width is also the only width. Otherwise we
- * need to guess about the typical data width given the max. A
- * sliding scale for percentage of max width seems reasonable.
+ * need to guess about the typical data width given the max. A sliding
+ * scale for percentage of max width seems reasonable.
*/
if (typid == BPCHAROID)
return maxwidth;
/*
* Beyond 1000, assume we're looking at something like
- * "varchar(10000)" where the limit isn't actually reached often,
- * and use a fixed estimate.
+ * "varchar(10000)" where the limit isn't actually reached often, and
+ * use a fixed estimate.
*/
return 32 + (1000 - 32) / 2;
}
values, nvalues);
/*
- * If the element type is pass-by-reference, we now have a bunch
- * of Datums that are pointers into the syscache value. Copy them
- * to avoid problems if syscache decides to drop the entry.
+ * If the element type is pass-by-reference, we now have a bunch of
+ * Datums that are pointers into the syscache value. Copy them to
+ * avoid problems if syscache decides to drop the entry.
*/
if (!typeForm->typbyval)
{
statarray = DatumGetArrayTypeP(val);
/*
- * We expect the array to be a 1-D float4 array; verify that. We
- * don't need to use deconstruct_array() since the array data is
- * just going to look like a C array of float4 values.
+ * We expect the array to be a 1-D float4 array; verify that. We don't
+ * need to use deconstruct_array() since the array data is just going
+ * to look like a C array of float4 values.
*/
narrayelem = ARR_DIMS(statarray)[0];
if (ARR_NDIM(statarray) != 1 || narrayelem <= 0 ||
Oid
get_roleid_checked(const char *rolname)
{
- Oid roleid;
+ Oid roleid;
roleid = get_roleid(rolname);
if (!OidIsValid(roleid))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.229 2005/09/16 04:13:18 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.230 2005/10/15 02:49:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void write_relcache_init_file(void);
static void formrdesc(const char *relationName, Oid relationReltype,
- bool hasoids, int natts, FormData_pg_attribute *att);
+ bool hasoids, int natts, FormData_pg_attribute *att);
static HeapTuple ScanPgRelation(Oid targetRelId, bool indexOK);
static Relation AllocateRelationDesc(Relation relation, Form_pg_class relp);
/*
* Open pg_class and fetch a tuple. Force heap scan if we haven't yet
- * built the critical relcache entries (this includes initdb and
- * startup without a pg_internal.init file). The caller can also
- * force a heap scan by setting indexOK == false.
+ * built the critical relcache entries (this includes initdb and startup
+ * without a pg_internal.init file). The caller can also force a heap
+ * scan by setting indexOK == false.
*/
pg_class_desc = heap_open(RelationRelationId, AccessShareLock);
pg_class_scan = systable_beginscan(pg_class_desc, ClassOidIndexId,
/*
* Copy the relation tuple form
*
- * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
- * relacl is NOT stored in the relcache --- there'd be little point in
- * it, since we don't copy the tuple's nullvalues bitmap and hence
- * wouldn't know if the value is valid ... bottom line is that relacl
- * *cannot* be retrieved from the relcache. Get it from the syscache
- * if you need it.
+ * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE. relacl
+ * is NOT stored in the relcache --- there'd be little point in it, since
+ * we don't copy the tuple's nullvalues bitmap and hence wouldn't know if
+ * the value is valid ... bottom line is that relacl *cannot* be retrieved
+ * from the relcache. Get it from the syscache if you need it.
*/
relationForm = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
/*
* Form a scan key that selects only user attributes (attnum > 0).
- * (Eliminating system attribute rows at the index level is lots
- * faster than fetching them.)
+ * (Eliminating system attribute rows at the index level is lots faster
+ * than fetching them.)
*/
ScanKeyInit(&skey[0],
Anum_pg_attribute_attrelid,
Int16GetDatum(0));
/*
- * Open pg_attribute and begin a scan. Force heap scan if we haven't
- * yet built the critical relcache entries (this includes initdb and
- * startup without a pg_internal.init file).
+ * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
+ * built the critical relcache entries (this includes initdb and startup
+ * without a pg_internal.init file).
*/
pg_attribute_desc = heap_open(AttributeRelationId, AccessShareLock);
pg_attribute_scan = systable_beginscan(pg_attribute_desc,
/*
* However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special
- * cases for attnum=1 that used to exist in fastgetattr() and
- * index_getattr().
+ * attribute: it must be zero. This eliminates the need for special cases
+ * for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
relation->rd_att->attrs[0]->attcacheoff = 0;
constr->num_check = relation->rd_rel->relchecks;
constr->check = (ConstrCheck *)
MemoryContextAllocZero(CacheMemoryContext,
- constr->num_check * sizeof(ConstrCheck));
+ constr->num_check * sizeof(ConstrCheck));
CheckConstraintFetch(relation);
}
else
int maxlocks;
/*
- * Make the private context. Parameters are set on the assumption
- * that it'll probably not contain much data.
+ * Make the private context. Parameters are set on the assumption that
+ * it'll probably not contain much data.
*/
rulescxt = AllocSetContextCreate(CacheMemoryContext,
RelationGetRelationName(relation),
relation->rd_rulescxt = rulescxt;
/*
- * allocate an array to hold the rewrite rules (the array is extended
- * if necessary)
+ * allocate an array to hold the rewrite rules (the array is extended if
+ * necessary)
*/
maxlocks = 4;
rules = (RewriteRule **)
/*
* open pg_rewrite and begin a scan
*
- * Note: since we scan the rules using RewriteRelRulenameIndexId,
- * we will be reading the rules in name order, except possibly during
- * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This
- * in turn ensures that rules will be fired in name order.
+ * Note: since we scan the rules using RewriteRelRulenameIndexId, we will be
+ * reading the rules in name order, except possibly during
+ * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This in
+ * turn ensures that rules will be fired in name order.
*/
rewrite_desc = heap_open(RewriteRelationId, AccessShareLock);
rewrite_tupdesc = RelationGetDescr(rewrite_desc);
&isnull);
Assert(!isnull);
rule_evqual_str = DatumGetCString(DirectFunctionCall1(textout,
- rule_evqual));
+ rule_evqual));
oldcxt = MemoryContextSwitchTo(rulescxt);
rule->qual = (Node *) stringToNode(rule_evqual_str);
MemoryContextSwitchTo(oldcxt);
/*
* As of 7.3 we assume the rule ordering is repeatable, because
- * RelationBuildRuleLock should read 'em in a consistent order. So
- * just compare corresponding slots.
+ * RelationBuildRuleLock should read 'em in a consistent order. So just
+ * compare corresponding slots.
*/
if (rlock1 != NULL)
{
relp = (Form_pg_class) GETSTRUCT(pg_class_tuple);
/*
- * allocate storage for the relation descriptor, and copy
- * pg_class_tuple to relation->rd_rel.
+ * allocate storage for the relation descriptor, and copy pg_class_tuple
+ * to relation->rd_rel.
*/
relation = AllocateRelationDesc(oldrelation, relp);
RelationGetRelid(relation) = relid;
/*
- * normal relations are not nailed into the cache; nor can a
- * pre-existing relation be new. It could be temp though. (Actually,
- * it could be new too, but it's okay to forget that fact if forced to
- * flush the entry.)
+ * normal relations are not nailed into the cache; nor can a pre-existing
+ * relation be new. It could be temp though. (Actually, it could be new
+ * too, but it's okay to forget that fact if forced to flush the entry.)
*/
relation->rd_refcnt = 0;
relation->rd_isnailed = false;
/*
* Make a copy of the pg_index entry for the index. Since pg_index
- * contains variable-length and possibly-null fields, we have to do
- * this honestly rather than just treating it as a Form_pg_index
- * struct.
+ * contains variable-length and possibly-null fields, we have to do this
+ * honestly rather than just treating it as a Form_pg_index struct.
*/
tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(RelationGetRelid(relation)),
ReleaseSysCache(tuple);
/*
- * indclass cannot be referenced directly through the C struct, because
- * it is after the variable-width indkey field. Therefore we extract
- * the datum the hard way and provide a direct link in the relcache.
+ * indclass cannot be referenced directly through the C struct, because it
+ * is after the variable-width indkey field. Therefore we extract the
+ * datum the hard way and provide a direct link in the relcache.
*/
indclassDatum = fastgetattr(relation->rd_indextuple,
Anum_pg_index_indclass,
amsupport = aform->amsupport;
/*
- * Make the private context to hold index access info. The reason we
- * need a context, and not just a couple of pallocs, is so that we
- * won't leak any subsidiary info attached to fmgr lookup records.
+ * Make the private context to hold index access info. The reason we need
+ * a context, and not just a couple of pallocs, is so that we won't leak
+ * any subsidiary info attached to fmgr lookup records.
*
* Context parameters are set on the assumption that it'll probably not
* contain much data.
relation->rd_supportinfo = supportinfo;
/*
- * Fill the operator and support procedure OID arrays. (aminfo and
+ * Fill the operator and support procedure OID arrays. (aminfo and
* supportinfo are left as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(relation->rd_indclass,
opcentry->supportProcs = NULL;
/*
- * To avoid infinite recursion during startup, force heap scans if
- * we're looking up info for the opclasses used by the indexes we
- * would like to reference here.
+ * To avoid infinite recursion during startup, force heap scans if we're
+ * looking up info for the opclasses used by the indexes we would like to
+ * reference here.
*/
indexOK = criticalRelcachesBuilt ||
(operatorClassOid != OID_BTREE_OPS_OID &&
operatorClassOid != INT2_BTREE_OPS_OID);
/*
- * Scan pg_amop to obtain operators for the opclass. We only fetch
- * the default ones (those with subtype zero).
+ * Scan pg_amop to obtain operators for the opclass. We only fetch the
+ * default ones (those with subtype zero).
*/
if (numStrats > 0)
{
}
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only
- * fetch the default ones (those with subtype zero).
+ * Scan pg_amproc to obtain support procs for the opclass. We only fetch
+ * the default ones (those with subtype zero).
*/
if (numSupport > 0)
{
relation->rd_refcnt = 1;
/*
- * all entries built with this routine are nailed-in-cache; none are
- * for new or temp relations.
+ * all entries built with this routine are nailed-in-cache; none are for
+ * new or temp relations.
*/
relation->rd_isnailed = true;
relation->rd_createSubid = InvalidSubTransactionId;
/*
* initialize relation tuple form
*
- * The data we insert here is pretty incomplete/bogus, but it'll serve to
- * get us launched. RelationCacheInitializePhase2() will read the
- * real data from pg_class and replace what we've done here.
+ * The data we insert here is pretty incomplete/bogus, but it'll serve to get
+ * us launched. RelationCacheInitializePhase2() will read the real data
+ * from pg_class and replace what we've done here.
*/
relation->rd_rel = (Form_pg_class) palloc0(CLASS_TUPLE_SIZE);
relation->rd_rel->reltype = relationReltype;
/*
- * It's important to distinguish between shared and non-shared
- * relations, even at bootstrap time, to make sure we know where they
- * are stored. At present, all relations that formrdesc is used for
- * are not shared.
+ * It's important to distinguish between shared and non-shared relations,
+ * even at bootstrap time, to make sure we know where they are stored. At
+ * present, all relations that formrdesc is used for are not shared.
*/
relation->rd_rel->relisshared = false;
* initialize attribute tuple form
*
* Unlike the case with the relation tuple, this data had better be right
- * because it will never be replaced. The input values must be
- * correctly defined by macros in src/include/catalog/ headers.
+ * because it will never be replaced. The input values must be correctly
+ * defined by macros in src/include/catalog/ headers.
*/
relation->rd_att = CreateTemplateTupleDesc(natts, hasoids);
relation->rd_att->tdtypeid = relationReltype;
return rd;
/*
- * no reldesc in the cache, so have RelationBuildDesc() build one and
- * add it.
+ * no reldesc in the cache, so have RelationBuildDesc() build one and add
+ * it.
*/
rd = RelationBuildDesc(relationId, NULL);
if (RelationIsValid(rd))
/* Should be called only for invalidated nailed indexes */
Assert(relation->rd_isnailed && !relation->rd_isvalid &&
relation->rd_rel->relkind == RELKIND_INDEX);
+
/*
* Read the pg_class row
*
- * Don't try to use an indexscan of pg_class_oid_index to reload the
- * info for pg_class_oid_index ...
+ * Don't try to use an indexscan of pg_class_oid_index to reload the info for
+ * pg_class_oid_index ...
*/
indexOK = (RelationGetRelid(relation) != ClassOidIndexId);
pg_class_tuple = ScanPgRelation(RelationGetRelid(relation), indexOK);
/*
* Make sure smgr and lower levels close the relation's files, if they
- * weren't closed already. If the relation is not getting deleted,
- * the next smgr access should reopen the files automatically. This
- * ensures that the low-level file access state is updated after, say,
- * a vacuum truncation.
+ * weren't closed already. If the relation is not getting deleted, the
+ * next smgr access should reopen the files automatically. This ensures
+ * that the low-level file access state is updated after, say, a vacuum
+ * truncation.
*/
RelationCloseSmgr(relation);
/*
- * Never, never ever blow away a nailed-in system relation, because
- * we'd be unable to recover. However, we must reset rd_targblock, in
- * case we got called because of a relation cache flush that was
- * triggered by VACUUM.
+ * Never, never ever blow away a nailed-in system relation, because we'd
+ * be unable to recover. However, we must reset rd_targblock, in case we
+ * got called because of a relation cache flush that was triggered by
+ * VACUUM.
*
- * If it's a nailed index, then we need to re-read the pg_class row to
- * see if its relfilenode changed. We can't necessarily do that here,
- * because we might be in a failed transaction. We assume it's okay
- * to do it if there are open references to the relcache entry (cf
- * notes for AtEOXact_RelationCache). Otherwise just mark the entry
- * as possibly invalid, and it'll be fixed when next opened.
+ * If it's a nailed index, then we need to re-read the pg_class row to see if
+ * its relfilenode changed. We can't necessarily do that here, because we
+ * might be in a failed transaction. We assume it's okay to do it if
+ * there are open references to the relcache entry (cf notes for
+ * AtEOXact_RelationCache). Otherwise just mark the entry as possibly
+ * invalid, and it'll be fixed when next opened.
*/
if (relation->rd_isnailed)
{
* Free all the subsidiary data structures of the relcache entry. We
* cannot free rd_att if we are trying to rebuild the entry, however,
* because pointers to it may be cached in various places. The rule
- * manager might also have pointers into the rewrite rules. So to
- * begin with, we can only get rid of these fields:
+ * manager might also have pointers into the rewrite rules. So to begin
+ * with, we can only get rid of these fields:
*/
FreeTriggerDesc(relation->trigdesc);
if (relation->rd_indextuple)
/*
* If we're really done with the relcache entry, blow it away. But if
- * someone is still using it, reconstruct the whole deal without
- * moving the physical RelationData record (so that the someone's
- * pointer is still valid).
+ * someone is still using it, reconstruct the whole deal without moving
+ * the physical RelationData record (so that the someone's pointer is
+ * still valid).
*/
if (!rebuild)
{
else
{
/*
- * When rebuilding an open relcache entry, must preserve ref count
- * and rd_createSubid state. Also attempt to preserve the
- * tupledesc and rewrite-rule substructures in place.
+ * When rebuilding an open relcache entry, must preserve ref count and
+ * rd_createSubid state. Also attempt to preserve the tupledesc and
+ * rewrite-rule substructures in place.
*
- * Note that this process does not touch CurrentResourceOwner; which
- * is good because whatever ref counts the entry may have do not
+ * Note that this process does not touch CurrentResourceOwner; which is
+ * good because whatever ref counts the entry may have do not
* necessarily belong to that resource owner.
*/
Oid save_relid = RelationGetRelid(relation);
{
/*
* Add this entry to list of stuff to rebuild in second pass.
- * pg_class_oid_index goes on the front of rebuildFirstList,
- * other nailed indexes on the back, and everything else into
+ * pg_class_oid_index goes on the front of rebuildFirstList, other
+ * nailed indexes on the back, and everything else into
* rebuildList (in no particular order).
*/
if (relation->rd_isnailed &&
rebuildList = list_concat(rebuildFirstList, rebuildList);
/*
- * Now zap any remaining smgr cache entries. This must happen before
- * we start to rebuild entries, since that may involve catalog fetches
- * which will re-open catalog files.
+ * Now zap any remaining smgr cache entries. This must happen before we
+ * start to rebuild entries, since that may involve catalog fetches which
+ * will re-open catalog files.
*/
smgrcloseall();
/*
* To speed up transaction exit, we want to avoid scanning the relcache
- * unless there is actually something for this routine to do. Other
- * than the debug-only Assert checks, most transactions don't create
- * any work for us to do here, so we keep a static flag that gets set
- * if there is anything to do. (Currently, this means either a relation
- * is created in the current xact, or an index list is forced.) For
- * simplicity, the flag remains set till end of top-level transaction,
- * even though we could clear it at subtransaction end in some cases.
+ * unless there is actually something for this routine to do. Other than
+ * the debug-only Assert checks, most transactions don't create any work
+ * for us to do here, so we keep a static flag that gets set if there is
+ * anything to do. (Currently, this means either a relation is created in
+ * the current xact, or an index list is forced.) For simplicity, the
+ * flag remains set till end of top-level transaction, even though we
+ * could clear it at subtransaction end in some cases.
*/
if (!need_eoxact_work
#ifdef USE_ASSERT_CHECKING
* The relcache entry's ref count should be back to its normal
* not-in-a-transaction state: 0 unless it's nailed in cache.
*
- * In bootstrap mode, this is NOT true, so don't check it ---
- * the bootstrap code expects relations to stay open across
- * start/commit transaction calls. (That seems bogus, but it's
- * not worth fixing.)
+ * In bootstrap mode, this is NOT true, so don't check it --- the
+ * bootstrap code expects relations to stay open across start/commit
+ * transaction calls. (That seems bogus, but it's not worth fixing.)
*/
#ifdef USE_ASSERT_CHECKING
if (!IsBootstrapProcessingMode())
/*
* Is it a relation created in the current subtransaction?
*
- * During subcommit, mark it as belonging to the parent, instead.
- * During subabort, simply delete the relcache entry.
+ * During subcommit, mark it as belonging to the parent, instead. During
+ * subabort, simply delete the relcache entry.
*/
if (relation->rd_createSubid == mySubid)
{
/*
* create a new tuple descriptor from the one passed in. We do this
- * partly to copy it into the cache context, and partly because the
- * new relation can't have any defaults or constraints yet; they have
- * to be added in later steps, because they require additions to
- * multiple system catalogs. We can copy attnotnull constraints here,
- * however.
+ * partly to copy it into the cache context, and partly because the new
+ * relation can't have any defaults or constraints yet; they have to be
+ * added in later steps, because they require additions to multiple system
+ * catalogs. We can copy attnotnull constraints here, however.
*/
rel->rd_att = CreateTupleDescCopy(tupDesc);
has_not_null = false;
rel->rd_rel->relowner = BOOTSTRAP_SUPERUSERID;
/*
- * Insert relation physical and logical identifiers (OIDs) into the
- * right places. Note that the physical ID (relfilenode) is initially
- * the same as the logical ID (OID).
+ * Insert relation physical and logical identifiers (OIDs) into the right
+ * places. Note that the physical ID (relfilenode) is initially the same
+ * as the logical ID (OID).
*/
rel->rd_rel->relisshared = shared_relation;
/*
* Try to load the relcache cache file. If successful, we're done for
- * now. Otherwise, initialize the cache with pre-made descriptors for
- * the critical "nailed-in" system catalogs.
+ * now. Otherwise, initialize the cache with pre-made descriptors for the
+ * critical "nailed-in" system catalogs.
*/
if (IsBootstrapProcessingMode() ||
!load_relcache_init_file())
return;
/*
- * If we didn't get the critical system indexes loaded into relcache,
- * do so now. These are critical because the catcache depends on them
- * for catcache fetches that are done during relcache load. Thus, we
- * have an infinite-recursion problem. We can break the recursion by
- * doing heapscans instead of indexscans at certain key spots. To
- * avoid hobbling performance, we only want to do that until we have
- * the critical indexes loaded into relcache. Thus, the flag
- * criticalRelcachesBuilt is used to decide whether to do heapscan or
- * indexscan at the key spots, and we set it true after we've loaded
- * the critical indexes.
+ * If we didn't get the critical system indexes loaded into relcache, do
+ * so now. These are critical because the catcache depends on them for
+ * catcache fetches that are done during relcache load. Thus, we have an
+ * infinite-recursion problem. We can break the recursion by doing
+ * heapscans instead of indexscans at certain key spots. To avoid hobbling
+ * performance, we only want to do that until we have the critical indexes
+ * loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
+ * decide whether to do heapscan or indexscan at the key spots, and we set
+ * it true after we've loaded the critical indexes.
*
- * The critical indexes are marked as "nailed in cache", partly to make
- * it easy for load_relcache_init_file to count them, but mainly
- * because we cannot flush and rebuild them once we've set
- * criticalRelcachesBuilt to true. (NOTE: perhaps it would be
- * possible to reload them by temporarily setting
- * criticalRelcachesBuilt to false again. For now, though, we just
- * nail 'em in.)
+ * The critical indexes are marked as "nailed in cache", partly to make it
+ * easy for load_relcache_init_file to count them, but mainly because we
+ * cannot flush and rebuild them once we've set criticalRelcachesBuilt to
+ * true. (NOTE: perhaps it would be possible to reload them by
+ * temporarily setting criticalRelcachesBuilt to false again. For now,
+ * though, we just nail 'em in.)
*/
if (!criticalRelcachesBuilt)
{
}
/*
- * Now, scan all the relcache entries and update anything that might
- * be wrong in the results from formrdesc or the relcache cache file.
- * If we faked up relcache entries using formrdesc, then read the real
- * pg_class rows and replace the fake entries with them. Also, if any
- * of the relcache entries have rules or triggers, load that info the
- * hard way since it isn't recorded in the cache file.
+ * Now, scan all the relcache entries and update anything that might be
+ * wrong in the results from formrdesc or the relcache cache file. If we
+ * faked up relcache entries using formrdesc, then read the real pg_class
+ * rows and replace the fake entries with them. Also, if any of the
+ * relcache entries have rules or triggers, load that info the hard way
+ * since it isn't recorded in the cache file.
*/
hash_seq_init(&status, RelationIdCache);
Form_pg_class relp;
htup = SearchSysCache(RELOID,
- ObjectIdGetDatum(RelationGetRelid(relation)),
+ ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
if (!HeapTupleIsValid(htup))
elog(FATAL, "cache lookup failed for relation %u",
if (needNewCacheFile)
{
/*
- * Force all the catcaches to finish initializing and thereby open
- * the catalogs and indexes they use. This will preload the
- * relcache with entries for all the most important system
- * catalogs and indexes, so that the init file will be most useful
- * for future backends.
+ * Force all the catcaches to finish initializing and thereby open the
+ * catalogs and indexes they use. This will preload the relcache with
+ * entries for all the most important system catalogs and indexes, so
+ * that the init file will be most useful for future backends.
*/
InitCatalogCachePhase2();
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
pgindexdesc = CreateTemplateTupleDesc(Natts_pg_index, false);
- pgindexdesc->tdtypeid = RECORDOID; /* not right, but we don't care */
+ pgindexdesc->tdtypeid = RECORDOID; /* not right, but we don't care */
pgindexdesc->tdtypmod = -1;
for (i = 0; i < Natts_pg_index; i++)
continue;
if (attrdef[i].adbin != NULL)
elog(WARNING, "multiple attrdef records found for attr %s of rel %s",
- NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
+ NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
RelationGetRelationName(relation));
else
found++;
adrel->rd_att, &isnull);
if (isnull)
elog(WARNING, "null adbin for attr %s of rel %s",
- NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
+ NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
RelationGetRelationName(relation));
else
attrdef[i].adbin = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(textout,
- val)));
+ DatumGetCString(DirectFunctionCall1(textout,
+ val)));
break;
}
RelationGetRelationName(relation));
check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
- NameStr(conform->conname));
+ NameStr(conform->conname));
/* Grab and test conbin is actually set */
val = fastgetattr(htup,
RelationGetRelationName(relation));
check[found].ccbin = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(textout,
- val)));
+ DatumGetCString(DirectFunctionCall1(textout,
+ val)));
found++;
}
*
* Since shared cache inval causes the relcache's copy of the list to go away,
* we return a copy of the list palloc'd in the caller's context. The caller
- * may list_free() the returned list after scanning it. This is necessary
+ * may list_free() the returned list after scanning it. This is necessary
* since the caller will typically be doing syscache lookups on the relevant
* indexes, and syscache lookup could cause SI messages to be processed!
*
return list_copy(relation->rd_indexlist);
/*
- * We build the list we intend to return (in the caller's context)
- * while doing the scan. After successfully completing the scan, we
- * copy that list into the relcache entry. This avoids cache-context
- * memory leakage if we get some sort of error partway through.
+ * We build the list we intend to return (in the caller's context) while
+ * doing the scan. After successfully completing the scan, we copy that
+ * list into the relcache entry. This avoids cache-context memory leakage
+ * if we get some sort of error partway through.
*/
result = NIL;
oidIndex = InvalidOid;
List *ilist;
/*
- * If relation doesn't have OIDs at all, caller is probably confused.
- * (We could just silently return InvalidOid, but it seems better to
- * throw an assertion.)
+ * If relation doesn't have OIDs at all, caller is probably confused. (We
+ * could just silently return InvalidOid, but it seems better to throw an
+ * assertion.)
*/
Assert(relation->rd_rel->relhasoids);
return NIL;
/*
- * We build the tree we intend to return in the caller's context.
- * After successfully completing the work, we copy it into the
- * relcache entry. This avoids problems if we get some sort of error
- * partway through.
+ * We build the tree we intend to return in the caller's context. After
+ * successfully completing the work, we copy it into the relcache entry.
+ * This avoids problems if we get some sort of error partway through.
*/
exprsDatum = heap_getattr(relation->rd_indextuple,
Anum_pg_index_indexprs,
return NIL;
/*
- * We build the tree we intend to return in the caller's context.
- * After successfully completing the work, we copy it into the
- * relcache entry. This avoids problems if we get some sort of error
- * partway through.
+ * We build the tree we intend to return in the caller's context. After
+ * successfully completing the work, we copy it into the relcache entry.
+ * This avoids problems if we get some sort of error partway through.
*/
predDatum = heap_getattr(relation->rd_indextuple,
Anum_pg_index_indpred,
* will be comparing it to similarly-processed qual clauses, and may fail
* to detect valid matches without this. This must match the processing
* done to qual clauses in preprocess_expression()! (We can skip the
- * stuff involving subqueries, however, since we don't allow any in
- * index predicates.)
+ * stuff involving subqueries, however, since we don't allow any in index
+ * predicates.)
*/
result = (List *) eval_const_expressions((Node *) result);
}
/*
- * Read the index relcache entries from the file. Note we will not
- * enter any of them into the cache if the read fails partway through;
- * this helps to guard against broken init files.
+ * Read the index relcache entries from the file. Note we will not enter
+ * any of them into the cache if the read fails partway through; this
+ * helps to guard against broken init files.
*/
max_rels = 100;
rels = (Relation *) palloc(max_rels * sizeof(Relation));
/*
* Rules and triggers are not saved (mainly because the internal
- * format is complex and subject to change). They must be rebuilt
- * if needed by RelationCacheInitializePhase2. This is not
- * expected to be a big performance hit since few system catalogs
- * have such. Ditto for index expressions and predicates.
+ * format is complex and subject to change). They must be rebuilt if
+ * needed by RelationCacheInitializePhase2. This is not expected to
+ * be a big performance hit since few system catalogs have such.
+ * Ditto for index expressions and predicates.
*/
rel->rd_rules = NULL;
rel->rd_rulescxt = NULL;
/*
* Recompute lock and physical addressing info. This is needed in
- * case the pg_internal.init file was copied from some other
- * database by CREATE DATABASE.
+ * case the pg_internal.init file was copied from some other database
+ * by CREATE DATABASE.
*/
RelationInitLockInfo(rel);
RelationInitPhysicalAddr(rel);
}
/*
- * We reached the end of the init file without apparent problem. Did
- * we get the right number of nailed items? (This is a useful
- * crosscheck in case the set of critical rels or indexes changes.)
+ * We reached the end of the init file without apparent problem. Did we
+ * get the right number of nailed items? (This is a useful crosscheck in
+ * case the set of critical rels or indexes changes.)
*/
if (nailed_rels != NUM_CRITICAL_RELS ||
nailed_indexes != NUM_CRITICAL_INDEXES)
return true;
/*
- * init file is broken, so do it the hard way. We don't bother trying
- * to free the clutter we just allocated; it's not in the relcache so
- * it won't hurt.
+ * init file is broken, so do it the hard way. We don't bother trying to
+ * free the clutter we just allocated; it's not in the relcache so it
+ * won't hurt.
*/
read_failed:
pfree(rels);
/*
* We must write a temporary file and rename it into place. Otherwise,
- * another backend starting at about the same time might crash trying
- * to read the partially-complete file.
+ * another backend starting at about the same time might crash trying to
+ * read the partially-complete file.
*/
snprintf(tempfilename, sizeof(tempfilename), "%s/%s.%d",
DatabasePath, RELCACHE_INIT_FILENAME, MyProcPid);
(errcode_for_file_access(),
errmsg("could not create relation-cache initialization file \"%s\": %m",
tempfilename),
- errdetail("Continuing anyway, but there's something wrong.")));
+ errdetail("Continuing anyway, but there's something wrong.")));
return;
}
/*
* Now we have to check whether the data we've so painstakingly
- * accumulated is already obsolete due to someone else's
- * just-committed catalog changes. If so, we just delete the temp
- * file and leave it to the next backend to try again. (Our own
- * relcache entries will be updated by SI message processing, but we
- * can't be sure whether what we wrote out was up-to-date.)
+ * accumulated is already obsolete due to someone else's just-committed
+ * catalog changes. If so, we just delete the temp file and leave it to
+ * the next backend to try again. (Our own relcache entries will be
+ * updated by SI message processing, but we can't be sure whether what we
+ * wrote out was up-to-date.)
*
* This mustn't run concurrently with RelationCacheInitFileInvalidate, so
* grab a serialization lock for the duration.
AcceptInvalidationMessages();
/*
- * If we have received any SI relcache invals since backend start,
- * assume we may have written out-of-date data.
+ * If we have received any SI relcache invals since backend start, assume
+ * we may have written out-of-date data.
*/
if (relcacheInvalsReceived == 0L)
{
* OK, rename the temp file to its final name, deleting any
* previously-existing init file.
*
- * Note: a failure here is possible under Cygwin, if some other
- * backend is holding open an unlinked-but-not-yet-gone init file.
- * So treat this as a noncritical failure; just remove the useless
- * temp file on failure.
+ * Note: a failure here is possible under Cygwin, if some other backend
+ * is holding open an unlinked-but-not-yet-gone init file. So treat
+ * this as a noncritical failure; just remove the useless temp file on
+ * failure.
*/
if (rename(tempfilename, finalfilename) < 0)
unlink(tempfilename);
/*
* We need to interlock this against write_relcache_init_file, to
* guard against possibility that someone renames a new-but-
- * already-obsolete init file into place just after we unlink.
- * With the interlock, it's certain that write_relcache_init_file
- * will notice our SI inval message before renaming into place, or
- * else that we will execute second and successfully unlink the
- * file.
+ * already-obsolete init file into place just after we unlink. With
+ * the interlock, it's certain that write_relcache_init_file will
+ * notice our SI inval message before renaming into place, or else
+ * that we will execute second and successfully unlink the file.
*/
LWLockAcquire(RelCacheInitLock, LW_EXCLUSIVE);
unlink(initfilename);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.100 2005/06/28 05:09:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.101 2005/10/15 02:49:32 momjian Exp $
*
* NOTES
* These routines allow the parser/planner/executor to perform
Add your entry to the cacheinfo[] array below. All cache lists are
alphabetical, so add it in the proper place. Specify the relation
- OID, index OID, number of keys, and key attribute numbers. If the
+ OID, index OID, number of keys, and key attribute numbers. If the
relation contains tuples that are associated with a particular relation
(for example, its attributes, rules, triggers, etc) then specify the
attribute number that contains the OID of the associated relation.
};
static const struct cachedesc cacheinfo[] = {
- {AggregateRelationId, /* AGGFNOID */
+ {AggregateRelationId, /* AGGFNOID */
AggregateFnoidIndexId,
0,
1,
0,
0
}},
- {AccessMethodRelationId, /* AMNAME */
+ {AccessMethodRelationId, /* AMNAME */
AmNameIndexId,
0,
1,
0,
0
}},
- {AccessMethodRelationId, /* AMOID */
+ {AccessMethodRelationId, /* AMOID */
AmOidIndexId,
0,
1,
Anum_pg_amproc_amprocnum,
0
}},
- {AttributeRelationId, /* ATTNAME */
+ {AttributeRelationId, /* ATTNAME */
AttributeRelidNameIndexId,
Anum_pg_attribute_attrelid,
2,
0,
0
}},
- {AttributeRelationId, /* ATTNUM */
+ {AttributeRelationId, /* ATTNUM */
AttributeRelidNumIndexId,
Anum_pg_attribute_attrelid,
2,
0,
0
}},
- {AuthMemRelationId, /* AUTHMEMMEMROLE */
+ {AuthMemRelationId, /* AUTHMEMMEMROLE */
AuthMemMemRoleIndexId,
0,
2,
0,
0
}},
- {AuthMemRelationId, /* AUTHMEMROLEMEM */
+ {AuthMemRelationId, /* AUTHMEMROLEMEM */
AuthMemRoleMemIndexId,
0,
2,
0,
0
}},
- {AuthIdRelationId, /* AUTHNAME */
+ {AuthIdRelationId, /* AUTHNAME */
AuthIdRolnameIndexId,
0,
1,
0,
0
}},
- {AuthIdRelationId, /* AUTHOID */
+ {AuthIdRelationId, /* AUTHOID */
AuthIdOidIndexId,
0,
1,
0
}},
{
- CastRelationId, /* CASTSOURCETARGET */
+ CastRelationId, /* CASTSOURCETARGET */
CastSourceTargetIndexId,
0,
2,
0,
0
}},
- {OperatorClassRelationId, /* CLAAMNAMENSP */
+ {OperatorClassRelationId, /* CLAAMNAMENSP */
OpclassAmNameNspIndexId,
0,
3,
Anum_pg_opclass_opcnamespace,
0
}},
- {OperatorClassRelationId, /* CLAOID */
+ {OperatorClassRelationId, /* CLAOID */
OpclassOidIndexId,
0,
1,
0,
0
}},
- {ConversionRelationId, /* CONDEFAULT */
+ {ConversionRelationId, /* CONDEFAULT */
ConversionDefaultIndexId,
0,
4,
Anum_pg_conversion_contoencoding,
ObjectIdAttributeNumber,
}},
- {ConversionRelationId, /* CONNAMENSP */
+ {ConversionRelationId, /* CONNAMENSP */
ConversionNameNspIndexId,
0,
2,
0,
0
}},
- {ConversionRelationId, /* CONOID */
+ {ConversionRelationId, /* CONOID */
ConversionOidIndexId,
0,
1,
0,
0
}},
- {IndexRelationId, /* INDEXRELID */
+ {IndexRelationId, /* INDEXRELID */
IndexRelidIndexId,
Anum_pg_index_indrelid,
1,
0,
0
}},
- {InheritsRelationId, /* INHRELID */
+ {InheritsRelationId, /* INHRELID */
InheritsRelidSeqnoIndexId,
Anum_pg_inherits_inhrelid,
2,
0,
0
}},
- {LanguageRelationId, /* LANGNAME */
+ {LanguageRelationId, /* LANGNAME */
LanguageNameIndexId,
0,
1,
0,
0
}},
- {LanguageRelationId, /* LANGOID */
+ {LanguageRelationId, /* LANGOID */
LanguageOidIndexId,
0,
1,
0,
0
}},
- {NamespaceRelationId, /* NAMESPACENAME */
+ {NamespaceRelationId, /* NAMESPACENAME */
NamespaceNameIndexId,
0,
1,
0,
0
}},
- {NamespaceRelationId, /* NAMESPACEOID */
+ {NamespaceRelationId, /* NAMESPACEOID */
NamespaceOidIndexId,
0,
1,
0,
0
}},
- {OperatorRelationId, /* OPERNAMENSP */
+ {OperatorRelationId, /* OPERNAMENSP */
OperatorNameNspIndexId,
0,
4,
Anum_pg_operator_oprright,
Anum_pg_operator_oprnamespace
}},
- {OperatorRelationId, /* OPEROID */
+ {OperatorRelationId, /* OPEROID */
OperatorOidIndexId,
0,
1,
0,
0
}},
- {ProcedureRelationId, /* PROCNAMEARGSNSP */
+ {ProcedureRelationId, /* PROCNAMEARGSNSP */
ProcedureNameArgsNspIndexId,
0,
3,
Anum_pg_proc_pronamespace,
0
}},
- {ProcedureRelationId, /* PROCOID */
+ {ProcedureRelationId, /* PROCOID */
ProcedureOidIndexId,
0,
1,
0,
0
}},
- {RelationRelationId, /* RELNAMENSP */
+ {RelationRelationId, /* RELNAMENSP */
ClassNameNspIndexId,
ObjectIdAttributeNumber,
2,
0,
0
}},
- {RelationRelationId, /* RELOID */
+ {RelationRelationId, /* RELOID */
ClassOidIndexId,
ObjectIdAttributeNumber,
1,
0,
0
}},
- {RewriteRelationId, /* RULERELNAME */
+ {RewriteRelationId, /* RULERELNAME */
RewriteRelRulenameIndexId,
Anum_pg_rewrite_ev_class,
2,
0,
0
}},
- {StatisticRelationId, /* STATRELATT */
+ {StatisticRelationId, /* STATRELATT */
StatisticRelidAttnumIndexId,
Anum_pg_statistic_starelid,
2,
0,
0
}},
- {TypeRelationId, /* TYPENAMENSP */
+ {TypeRelationId, /* TYPENAMENSP */
TypeNameNspIndexId,
Anum_pg_type_typrelid,
2,
0,
0
}},
- {TypeRelationId, /* TYPEOID */
+ {TypeRelationId, /* TYPEOID */
TypeOidIndexId,
Anum_pg_type_typrelid,
1,
}}
};
-static CatCache *SysCache[lengthof(cacheinfo)];
+static CatCache *SysCache[
+ lengthof(cacheinfo)];
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
bool *isNull)
{
/*
- * We just need to get the TupleDesc out of the cache entry, and then
- * we can apply heap_getattr(). We expect that the cache control data
- * is currently valid --- if the caller recently fetched the tuple,
- * then it should be.
+ * We just need to get the TupleDesc out of the cache entry, and then we
+ * can apply heap_getattr(). We expect that the cache control data is
+ * currently valid --- if the caller recently fetched the tuple, then it
+ * should be.
*/
if (cacheId < 0 || cacheId >= SysCacheSize)
elog(ERROR, "invalid cache id: %d", cacheId);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.14 2005/05/29 04:23:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.15 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (typentry == NULL)
{
/*
- * If we didn't find one, we want to make one. But first look up
- * the pg_type row, just to make sure we don't make a cache entry
- * for an invalid type OID.
+ * If we didn't find one, we want to make one. But first look up the
+ * pg_type row, just to make sure we don't make a cache entry for an
+ * invalid type OID.
*/
HeapTuple tp;
Form_pg_type typtup;
{
/*
* If we find a btree opclass where previously we only found a
- * hash opclass, forget the hash equality operator so we can
- * use the btree operator instead.
+ * hash opclass, forget the hash equality operator so we can use
+ * the btree operator instead.
*/
typentry->eq_opr = InvalidOid;
typentry->eq_opr_finfo.fn_oid = InvalidOid;
if (typentry->btree_opc != InvalidOid)
typentry->gt_opr = get_opclass_member(typentry->btree_opc,
InvalidOid,
- BTGreaterStrategyNumber);
+ BTGreaterStrategyNumber);
}
if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
typentry->cmp_proc == InvalidOid)
/*
* Set up fmgr lookup info as requested
*
- * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
- * which is not quite right (they're really in DynaHashContext) but
- * this will do for our purposes.
+ * Note: we tell fmgr the finfo structures live in CacheMemoryContext, which
+ * is not quite right (they're really in DynaHashContext) but this will do
+ * for our purposes.
*/
if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
typentry->eq_opr_finfo.fn_oid == InvalidOid &&
Assert(rel->rd_rel->reltype == typentry->type_id);
/*
- * Notice that we simply store a link to the relcache's tupdesc.
- * Since we are relying on relcache to detect cache flush events,
- * there's not a lot of point to maintaining an independent copy.
+ * Notice that we simply store a link to the relcache's tupdesc. Since
+ * we are relying on relcache to detect cache flush events, there's
+ * not a lot of point to maintaining an independent copy.
*/
typentry->tupDesc = RelationGetDescr(rel);
* (either exactly or binary-compatibly, but prefer an exact match).
*
* We could find more than one binary-compatible match, in which case we
- * require the user to specify which one he wants. If we find more
- * than one exact match, then someone put bogus entries in pg_opclass.
+ * require the user to specify which one he wants. If we find more than
+ * one exact match, then someone put bogus entries in pg_opclass.
*
- * This is the same logic as GetDefaultOpClass() in indexcmds.c, except
- * that we consider all opclasses, regardless of the current search
- * path.
+ * This is the same logic as GetDefaultOpClass() in indexcmds.c, except that
+ * we consider all opclasses, regardless of the current search path.
*/
rel = heap_open(OperatorClassRelationId, AccessShareLock);
if (nexact != 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple default operator classes for data type %s",
- format_type_be(type_id))));
+ errmsg("there are multiple default operator classes for data type %s",
+ format_type_be(type_id))));
if (ncompatible == 1)
return compatibleOid;
int32 newlen = RecordCacheArrayLen * 2;
RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
- newlen * sizeof(TupleDesc));
+ newlen * sizeof(TupleDesc));
RecordCacheArrayLen = newlen;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/assert.c,v 1.30 2004/12/31 22:01:27 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/assert.c,v 1.31 2005/10/15 02:49:32 momjian Exp $
*
* NOTE
* This should eventually work with elog()
#ifdef SLEEP_ON_ASSERT
/*
- * It would be nice to use pg_usleep() here, but only does 2000 sec or
- * 33 minutes, which seems too short.
+ * It would be nice to use pg_usleep() here, but only does 2000 sec or 33
+ * minutes, which seems too short.
*/
sleep(1000000);
#endif
* scenario of this sort is "out of memory"; and it's also the nastiest
* to handle because we'd likely also run out of memory while trying to
* report this error! Our escape hatch for this case is to reset the
- * ErrorContext to empty before trying to process the inner error. Since
+ * ErrorContext to empty before trying to process the inner error. Since
* ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c),
* we should be able to process an "out of memory" message successfully.
* Since we lose the prior error state due to the reset, we won't be able
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.164 2005/10/14 20:53:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.165 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Now decide whether we need to process this report at all; if it's
- * warning or less and not enabled for logging, just return FALSE
- * without starting up any error logging machinery.
+ * warning or less and not enabled for logging, just return FALSE without
+ * starting up any error logging machinery.
*/
/* Determine whether message is enabled for server log output */
MemoryContextReset(ErrorContext);
/*
- * If we recurse more than once, the problem might be something
- * broken in a context traceback routine. Abandon them too.
+ * If we recurse more than once, the problem might be something broken
+ * in a context traceback routine. Abandon them too.
*/
if (recursion_depth > 2)
error_context_stack = NULL;
CHECK_STACK_DEPTH();
/*
- * Do processing in ErrorContext, which we hope has enough reserved
- * space to report an error.
+ * Do processing in ErrorContext, which we hope has enough reserved space
+ * to report an error.
*/
oldcontext = MemoryContextSwitchTo(ErrorContext);
/*
* Call any context callback functions. Errors occurring in callback
- * functions will be treated as recursive errors --- this ensures we
- * will avoid infinite recursion (see errstart).
+ * functions will be treated as recursive errors --- this ensures we will
+ * avoid infinite recursion (see errstart).
*/
for (econtext = error_context_stack;
econtext != NULL;
/*
* If ERROR (not more nor less) we pass it off to the current handler.
- * Printing it and popping the stack is the responsibility of
- * the handler.
+ * Printing it and popping the stack is the responsibility of the handler.
*/
if (elevel == ERROR)
{
/*
- * We do some minimal cleanup before longjmp'ing so that handlers
- * can execute in a reasonably sane state.
+ * We do some minimal cleanup before longjmp'ing so that handlers can
+ * execute in a reasonably sane state.
*/
/* This is just in case the error came while waiting for input */
ImmediateInterruptOK = false;
/*
- * Reset InterruptHoldoffCount in case we ereport'd from
- * inside an interrupt holdoff section. (We assume here that
- * no handler will itself be inside a holdoff section. If
- * necessary, such a handler could save and restore
- * InterruptHoldoffCount for itself, but this should make life
- * easier for most.)
+ * Reset InterruptHoldoffCount in case we ereport'd from inside an
+ * interrupt holdoff section. (We assume here that no handler will
+ * itself be inside a holdoff section. If necessary, such a handler
+ * could save and restore InterruptHoldoffCount for itself, but this
+ * should make life easier for most.)
*/
InterruptHoldoffCount = 0;
- CritSectionCount = 0; /* should be unnecessary, but... */
+ CritSectionCount = 0; /* should be unnecessary, but... */
/*
- * Note that we leave CurrentMemoryContext set to ErrorContext.
- * The handler should reset it to something else soon.
+ * Note that we leave CurrentMemoryContext set to ErrorContext. The
+ * handler should reset it to something else soon.
*/
recursion_depth--;
/*
* If we are doing FATAL or PANIC, abort any old-style COPY OUT in
* progress, so that we can report the message before dying. (Without
- * this, pq_putmessage will refuse to send the message at all, which
- * is what we want for NOTICE messages, but not for fatal exits.) This
- * hack is necessary because of poor design of old-style copy
- * protocol. Note we must do this even if client is fool enough to
- * have set client_min_messages above FATAL, so don't look at
- * output_to_client.
+ * this, pq_putmessage will refuse to send the message at all, which is
+ * what we want for NOTICE messages, but not for fatal exits.) This hack
+ * is necessary because of poor design of old-style copy protocol. Note
+ * we must do this even if client is fool enough to have set
+ * client_min_messages above FATAL, so don't look at output_to_client.
*/
if (elevel >= FATAL && whereToSendOutput == Remote)
pq_endcopyout(true);
ImmediateInterruptOK = false;
/*
- * If we just reported a startup failure, the client will
- * disconnect on receiving it, so don't send any more to the
- * client.
+ * If we just reported a startup failure, the client will disconnect
+ * on receiving it, so don't send any more to the client.
*/
if (PG_exception_stack == NULL && whereToSendOutput == Remote)
whereToSendOutput = None;
/*
* fflush here is just to improve the odds that we get to see the
- * error message, in case things are so hosed that proc_exit
- * crashes. Any other code you might be tempted to add here
- * should probably be in an on_proc_exit callback instead.
+ * error message, in case things are so hosed that proc_exit crashes.
+ * Any other code you might be tempted to add here should probably be
+ * in an on_proc_exit callback instead.
*/
fflush(stdout);
fflush(stderr);
/*
- * If proc_exit is already running, we exit with nonzero exit code
- * to indicate that something's pretty wrong. We also want to
- * exit with nonzero exit code if not running under the postmaster
- * (for example, if we are being run from the initdb script, we'd
- * better return an error status).
+ * If proc_exit is already running, we exit with nonzero exit code to
+ * indicate that something's pretty wrong. We also want to exit with
+ * nonzero exit code if not running under the postmaster (for example,
+ * if we are being run from the initdb script, we'd better return an
+ * error status).
*/
proc_exit(proc_exit_inprogress || !IsUnderPostmaster);
}
if (elevel >= PANIC)
{
/*
- * Serious crash time. Postmaster will observe nonzero process
- * exit status and kill the other backends too.
+ * Serious crash time. Postmaster will observe nonzero process exit
+ * status and kill the other backends too.
*
* XXX: what if we are *in* the postmaster? abort() won't kill our
* children...
ErrorData *newedata;
/*
- * we don't increment recursion_depth because out-of-memory here does
- * not indicate a problem within the error subsystem.
+ * we don't increment recursion_depth because out-of-memory here does not
+ * indicate a problem within the error subsystem.
*/
CHECK_STACK_DEPTH();
FlushErrorState(void)
{
/*
- * Reset stack to empty. The only case where it would be more than
- * one deep is if we serviced an error that interrupted construction
- * of another message. We assume control escaped out of that message
+ * Reset stack to empty. The only case where it would be more than one
+ * deep is if we serviced an error that interrupted construction of
+ * another message. We assume control escaped out of that message
* construction and won't ever go back.
*/
errordata_stack_depth = -1;
0666)) < 0)
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", OutputFileName)));
+ errmsg("could not open file \"%s\": %m", OutputFileName)));
istty = isatty(fd);
close(fd);
OutputFileName)));
/*
- * If the file is a tty and we're running under the postmaster,
- * try to send stdout there as well (if it isn't a tty then stderr
- * will block out stdout, so we may as well let stdout go wherever
- * it was going before).
+ * If the file is a tty and we're running under the postmaster, try to
+ * send stdout there as well (if it isn't a tty then stderr will block
+ * out stdout, so we may as well let stdout go wherever it was going
+ * before).
*/
if (istty && IsUnderPostmaster)
if (!freopen(OutputFileName, "a", stdout))
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not reopen file \"%s\" as stdout: %m",
- OutputFileName)));
+ errmsg("could not reopen file \"%s\" as stdout: %m",
+ OutputFileName)));
}
}
set_syslog_parameters(const char *ident, int facility)
{
/*
- * guc.c is likely to call us repeatedly with same parameters, so
- * don't thrash the syslog connection unnecessarily. Also, we do not
- * re-open the connection until needed, since this routine will get called
- * whether or not Log_destination actually mentions syslog.
+ * guc.c is likely to call us repeatedly with same parameters, so don't
+ * thrash the syslog connection unnecessarily. Also, we do not re-open
+ * the connection until needed, since this routine will get called whether
+ * or not Log_destination actually mentions syslog.
*
- * Note that we make our own copy of the ident string rather than relying
- * on guc.c's. This may be overly paranoid, but it ensures that we cannot
+ * Note that we make our own copy of the ident string rather than relying on
+ * guc.c's. This may be overly paranoid, but it ensures that we cannot
* accidentally free a string that syslog is still using.
*/
if (syslog_ident == NULL || strcmp(syslog_ident, ident) != 0 ||
seq++;
/*
- * Our problem here is that many syslog implementations don't handle
- * long messages in an acceptable manner. While this function doesn't
- * help that fact, it does work around by splitting up messages into
- * smaller pieces.
+ * Our problem here is that many syslog implementations don't handle long
+ * messages in an acceptable manner. While this function doesn't help that
+ * fact, it does work around by splitting up messages into smaller pieces.
*
- * We divide into multiple syslog() calls if message is too long
- * or if the message contains embedded NewLine(s) '\n'.
+ * We divide into multiple syslog() calls if message is too long or if the
+ * message contains embedded NewLine(s) '\n'.
*/
len = strlen(line);
if (len > PG_SYSLOG_LIMIT || strchr(line, '\n') != NULL)
static void
write_eventlog(int level, const char *line)
{
- int eventlevel = EVENTLOG_ERROR_TYPE;
+ int eventlevel = EVENTLOG_ERROR_TYPE;
static HANDLE evtHandle = INVALID_HANDLE_VALUE;
if (evtHandle == INVALID_HANDLE_VALUE)
int i;
/*
- * This is one of the few places where we'd rather not inherit a
- * static variable's value from the postmaster. But since we will,
- * reset it when MyProcPid changes.
+ * This is one of the few places where we'd rather not inherit a static
+ * variable's value from the postmaster. But since we will, reset it when
+ * MyProcPid changes.
*/
if (log_my_pid != MyProcPid)
{
if (MyProcPort)
{
appendStringInfo(buf, "%lx.%x",
- (long) (MyProcPort->session_start.tv_sec),
- MyProcPid);
+ (long) (MyProcPort->session_start.tv_sec),
+ MyProcPid);
}
break;
case 'p':
case 'm':
{
/*
- * Note: for %m, %t, and %s we deliberately use the
- * C library's strftime/localtime, and not the
- * equivalent functions from src/timezone. This
- * ensures that all backends will report log entries
- * in the same timezone, namely whatever C-library
- * setting they inherit from the postmaster. If we
- * used src/timezone then local settings of the
- * TimeZone GUC variable would confuse the log.
+ * Note: for %m, %t, and %s we deliberately use the C
+ * library's strftime/localtime, and not the equivalent
+ * functions from src/timezone. This ensures that all
+ * backends will report log entries in the same timezone,
+ * namely whatever C-library setting they inherit from the
+ * postmaster. If we used src/timezone then local
+ * settings of the TimeZone GUC variable would confuse the
+ * log.
*/
- time_t stamp_time;
- char strfbuf[128], msbuf[8];
+ time_t stamp_time;
+ char strfbuf[128],
+ msbuf[8];
struct timeval tv;
gettimeofday(&tv, NULL);
- stamp_time = tv.tv_sec;
+ stamp_time = tv.tv_sec;
strftime(strfbuf, sizeof(strfbuf),
/* leave room for milliseconds... */
localtime(&stamp_time));
/* 'paste' milliseconds into place... */
- sprintf(msbuf, ".%03d", (int) (tv.tv_usec/1000));
- strncpy(strfbuf+19, msbuf, 4);
+ sprintf(msbuf, ".%03d", (int) (tv.tv_usec / 1000));
+ strncpy(strfbuf + 19, msbuf, 4);
appendStringInfoString(buf, strfbuf);
}
char *
unpack_sql_state(int sql_state)
{
- static char buf[12];
+ static char buf[12];
int i;
for (i = 0; i < 5; i++)
}
/*
- * If the user wants the query that generated this error logged, do
- * it.
+ * If the user wants the query that generated this error logged, do it.
*/
if (edata->elevel >= log_min_error_statement && debug_query_string != NULL)
{
if ((Log_destination & LOG_DESTINATION_STDERR) || whereToSendOutput == Debug)
{
#ifdef WIN32
+
/*
* In a win32 service environment, there is no usable stderr. Capture
* anything going there and write it to the eventlog instead.
*
- * If stderr redirection is active, it's ok to write to stderr
- * because that's really a pipe to the syslogger process.
+ * If stderr redirection is active, it's ok to write to stderr because
+ * that's really a pipe to the syslogger process.
*/
if ((!Redirect_stderr || am_syslogger) && pgwin32_is_service())
write_eventlog(edata->elevel, buf.data);
pq_endmessage(&msgbuf);
/*
- * This flush is normally not necessary, since postgres.c will flush
- * out waiting data when control returns to the main loop. But it
- * seems best to leave it here, so that the client has some clue what
- * happened if the backend dies before getting back to the main loop
- * ... error/notice messages should not be a performance-critical path
- * anyway, so an extra flush won't hurt much ...
+ * This flush is normally not necessary, since postgres.c will flush out
+ * waiting data when control returns to the main loop. But it seems best
+ * to leave it here, so that the client has some clue what happened if the
+ * backend dies before getting back to the main loop ... error/notice
+ * messages should not be a performance-critical path anyway, so an extra
+ * flush won't hurt much ...
*/
pq_flush();
}
if (*cp == 'm')
{
/*
- * Replace %m by system error string. If there are any
- * %'s in the string, we'd better double them so that
- * vsnprintf won't misinterpret.
+ * Replace %m by system error string. If there are any %'s in
+ * the string, we'd better double them so that vsnprintf won't
+ * misinterpret.
*/
const char *cp2;
str = strerror(errnum);
/*
- * Some strerror()s return an empty string for out-of-range errno.
- * This is ANSI C spec compliant, but not exactly useful.
+ * Some strerror()s return an empty string for out-of-range errno. This is
+ * ANSI C spec compliant, but not exactly useful.
*/
if (str == NULL || *str == '\0')
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.80 2005/05/11 01:26:02 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.81 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
struct df_files *next; /* List link */
dev_t device; /* Device file is on */
-#ifndef WIN32 /* ensures we never again depend on this
- * under win32 */
+#ifndef WIN32 /* ensures we never again depend on this under
+ * win32 */
ino_t inode; /* Inode number of file */
#endif
void *handle; /* a handle for pg_dl* functions */
/*
* We need to do stat() in order to determine whether this is the same
- * file as a previously loaded file; it's also handy so as to give a
- * good error message if bogus file name given.
+ * file as a previously loaded file; it's also handy so as to give a good
+ * error message if bogus file name given.
*/
if (stat(fullname, &stat_buf) == -1)
ereport(ERROR,
errmsg("could not access file \"%s\": %m", fullname)));
/*
- * We have to zap all entries in the list that match on either
- * filename or inode, else load_external_function() won't do anything.
+ * We have to zap all entries in the list that match on either filename or
+ * inode, else load_external_function() won't do anything.
*/
prv = NULL;
for (file_scanner = file_list; file_scanner != NULL; file_scanner = nxt)
strncmp(name, "$libdir", strlen("$libdir")) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("invalid macro name in dynamic library path: %s", name)));
+ errmsg("invalid macro name in dynamic library path: %s", name)));
ret = palloc(strlen(pkglib_path) + strlen(sep_ptr) + 1);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.96 2005/06/28 05:09:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.97 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* some warnings about int->pointer conversions...
*/
#if (defined(__mc68000__) || (defined(__m68k__))) && defined(__ELF__)
-typedef int32 (*func_ptr) ();
+typedef int32 (*func_ptr) ();
#else
-typedef char * (*func_ptr) ();
+typedef char *(*func_ptr) ();
#endif
/*
typedef struct
{
func_ptr func; /* Address of the oldstyle function */
- bool arg_toastable[FUNC_MAX_ARGS]; /* is n'th arg of a
- * toastable datatype? */
+ bool arg_toastable[FUNC_MAX_ARGS]; /* is n'th arg of a toastable
+ * datatype? */
} Oldstyle_fnextra;
/*
int high = fmgr_nbuiltins - 1;
/*
- * Loop invariant: low is the first index that could contain target
- * entry, and high is the last index that could contain it.
+ * Loop invariant: low is the first index that could contain target entry,
+ * and high is the last index that could contain it.
*/
while (low <= high)
{
char *prosrc;
/*
- * fn_oid *must* be filled in last. Some code assumes that if fn_oid
- * is valid, the whole struct is valid. Some FmgrInfo struct's do
- * survive elogs.
+ * fn_oid *must* be filled in last. Some code assumes that if fn_oid is
+ * valid, the whole struct is valid. Some FmgrInfo struct's do survive
+ * elogs.
*/
finfo->fn_oid = InvalidOid;
finfo->fn_extra = NULL;
if ((fbp = fmgr_isbuiltin(functionId)) != NULL)
{
/*
- * Fast path for builtin functions: don't bother consulting
- * pg_proc
+ * Fast path for builtin functions: don't bother consulting pg_proc
*/
finfo->fn_nargs = fbp->nargs;
finfo->fn_strict = fbp->strict;
/*
* For an ordinary builtin function, we should never get here
* because the isbuiltin() search above will have succeeded.
- * However, if the user has done a CREATE FUNCTION to create
- * an alias for a builtin function, we can end up here. In
- * that case we have to look up the function by name. The
- * name of the internal function is stored in prosrc (it
- * doesn't have to be the same as the name of the alias!)
+ * However, if the user has done a CREATE FUNCTION to create an
+ * alias for a builtin function, we can end up here. In that case
+ * we have to look up the function by name. The name of the
+ * internal function is stored in prosrc (it doesn't have to be
+ * the same as the name of the alias!)
*/
prosrcdatum = SysCacheGetAttr(PROCOID, procedureTuple,
Anum_pg_proc_prosrc, &isnull);
void *libraryhandle;
/*
- * Get prosrc and probin strings (link symbol and library
- * filename)
+ * Get prosrc and probin strings (link symbol and library filename)
*/
prosrcattr = SysCacheGetAttr(PROCOID, procedureTuple,
Anum_pg_proc_prosrc, &isnull);
fnextra = (Oldstyle_fnextra *) fcinfo->flinfo->fn_extra;
/*
- * Result is NULL if any argument is NULL, but we still call the
- * function (peculiar, but that's the way it worked before, and after
- * all this is a backwards-compatibility wrapper). Note, however,
- * that we'll never get here with NULL arguments if the function is
- * marked strict.
+ * Result is NULL if any argument is NULL, but we still call the function
+ * (peculiar, but that's the way it worked before, and after all this is a
+ * backwards-compatibility wrapper). Note, however, that we'll never get
+ * here with NULL arguments if the function is marked strict.
*
- * We also need to detoast any TOAST-ed inputs, since it's unlikely that
- * an old-style function knows about TOASTing.
+ * We also need to detoast any TOAST-ed inputs, since it's unlikely that an
+ * old-style function knows about TOASTing.
*/
isnull = false;
for (i = 0; i < n_arguments; i++)
case 1:
/*
- * nullvalue() used to use isNull to check if arg is NULL;
- * perhaps there are other functions still out there that also
- * rely on this undocumented hack?
+ * nullvalue() used to use isNull to check if arg is NULL; perhaps
+ * there are other functions still out there that also rely on
+ * this undocumented hack?
*/
returnValue = (*user_fn) (fcinfo->arg[0], &fcinfo->isnull);
break;
default:
/*
- * Increasing FUNC_MAX_ARGS doesn't automatically add cases to
- * the above code, so mention the actual value in this error
- * not FUNC_MAX_ARGS. You could add cases to the above if you
- * needed to support old-style functions with many arguments,
- * but making 'em be new-style is probably a better idea.
+ * Increasing FUNC_MAX_ARGS doesn't automatically add cases to the
+ * above code, so mention the actual value in this error not
+ * FUNC_MAX_ARGS. You could add cases to the above if you needed
+ * to support old-style functions with many arguments, but making
+ * 'em be new-style is probably a better idea.
*/
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("function %u has too many arguments (%d, maximum is %d)",
- fcinfo->flinfo->fn_oid, n_arguments, 16)));
+ errmsg("function %u has too many arguments (%d, maximum is %d)",
+ fcinfo->flinfo->fn_oid, n_arguments, 16)));
returnValue = NULL; /* keep compiler quiet */
break;
}
struct fmgr_security_definer_cache
{
FmgrInfo flinfo;
- Oid userid;
+ Oid userid;
};
/*
{
Datum result;
FmgrInfo *save_flinfo;
- struct fmgr_security_definer_cache * volatile fcache;
- Oid save_userid;
+ struct fmgr_security_definer_cache *volatile fcache;
+ Oid save_userid;
HeapTuple tuple;
if (!fcinfo->flinfo->fn_extra)
if (n_arguments > FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("function %u has too many arguments (%d, maximum is %d)",
- flinfo.fn_oid, n_arguments, FUNC_MAX_ARGS)));
+ errmsg("function %u has too many arguments (%d, maximum is %d)",
+ flinfo.fn_oid, n_arguments, FUNC_MAX_ARGS)));
va_start(pvar, procedureId);
for (i = 0; i < n_arguments; i++)
fcinfo.arg[i] = (Datum) va_arg(pvar, char *);
#else /* INT64_IS_BUSTED */
/*
- * On a machine with no 64-bit-int C datatype, sizeof(int64) will not
- * be 8, but we want Int64GetDatum to return an 8-byte object anyway,
- * with zeroes in the unused bits. This is needed so that, for
- * example, hash join of int8 will behave properly.
+ * On a machine with no 64-bit-int C datatype, sizeof(int64) will not be
+ * 8, but we want Int64GetDatum to return an 8-byte object anyway, with
+ * zeroes in the unused bits. This is needed so that, for example, hash
+ * join of int8 will behave properly.
*/
int64 *retval = (int64 *) palloc0(Max(sizeof(int64), 8));
Node *expr;
/*
- * can't return anything useful if we have no FmgrInfo or if its
- * fn_expr node has not been initialized
+ * can't return anything useful if we have no FmgrInfo or if its fn_expr
+ * node has not been initialized
*/
if (!flinfo || !flinfo->fn_expr)
return InvalidOid;
get_fn_expr_argtype(FmgrInfo *flinfo, int argnum)
{
/*
- * can't return anything useful if we have no FmgrInfo or if its
- * fn_expr node has not been initialized
+ * can't return anything useful if we have no FmgrInfo or if its fn_expr
+ * node has not been initialized
*/
if (!flinfo || !flinfo->fn_expr)
return InvalidOid;
argtype = exprType((Node *) list_nth(args, argnum));
/*
- * special hack for ScalarArrayOpExpr: what the underlying function
- * will actually get passed is the element type of the array.
+ * special hack for ScalarArrayOpExpr: what the underlying function will
+ * actually get passed is the element type of the array.
*/
if (IsA(expr, ScalarArrayOpExpr) &&
argnum == 1)
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.25 2005/10/06 19:51:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.26 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void shutdown_MultiFuncCall(Datum arg);
static TypeFuncClass internal_get_result_type(Oid funcid,
- Node *call_expr,
- ReturnSetInfo *rsinfo,
- Oid *resultTypeId,
- TupleDesc *resultTupleDesc);
+ Node *call_expr,
+ ReturnSetInfo *rsinfo,
+ Oid *resultTypeId,
+ TupleDesc *resultTupleDesc);
static bool resolve_polymorphic_tupdesc(TupleDesc tupdesc,
- oidvector *declared_args,
- Node *call_expr);
+ oidvector *declared_args,
+ Node *call_expr);
static TypeFuncClass get_type_func_class(Oid typid);
fcinfo->flinfo->fn_extra = retval;
/*
- * Ensure we will get shut down cleanly if the exprcontext is not
- * run to completion.
+ * Ensure we will get shut down cleanly if the exprcontext is not run
+ * to completion.
*/
RegisterExprContextCallback(rsi->econtext,
shutdown_MultiFuncCall,
FuncCallContext *retval = (FuncCallContext *) fcinfo->flinfo->fn_extra;
/*
- * Clear the TupleTableSlot, if present. This is for safety's sake:
- * the Slot will be in a long-lived context (it better be, if the
+ * Clear the TupleTableSlot, if present. This is for safety's sake: the
+ * Slot will be in a long-lived context (it better be, if the
* FuncCallContext is pointing to it), but in most usage patterns the
- * tuples stored in it will be in the function's per-tuple context. So
- * at the beginning of each call, the Slot will hold a dangling
- * pointer to an already-recycled tuple. We clear it out here.
+ * tuples stored in it will be in the function's per-tuple context. So at
+ * the beginning of each call, the Slot will hold a dangling pointer to an
+ * already-recycled tuple. We clear it out here.
*
* Note: use of retval->slot is obsolete as of 8.0, and we expect that it
- * will always be NULL. This is just here for backwards compatibility
- * in case someone creates a slot anyway.
+ * will always be NULL. This is just here for backwards compatibility in
+ * case someone creates a slot anyway.
*/
if (retval->slot != NULL)
ExecClearTuple(retval->slot);
flinfo->fn_extra = NULL;
/*
- * Caller is responsible to free up memory for individual struct
- * elements other than att_in_funcinfo and elements.
+ * Caller is responsible to free up memory for individual struct elements
+ * other than att_in_funcinfo and elements.
*/
if (funcctx->attinmeta != NULL)
pfree(funcctx->attinmeta);
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result. NB: the tupledesc should
* be copied if it is to be accessed over a long period.
*
* One hard case that this handles is resolution of actual rowtypes for
* functions returning RECORD (from either the function's OUT parameter
- * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
+ * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
* only when we couldn't resolve the actual rowtype for lack of information.
*
* The other hard case that this handles is resolution of polymorphism.
else
{
/* handle as a generic expression; no chance to resolve RECORD */
- Oid typid = exprType(expr);
+ Oid typid = exprType(expr);
if (resultTypeId)
*resultTypeId = typid;
/*
* internal_get_result_type -- workhorse code implementing all the above
*
- * funcid must always be supplied. call_expr and rsinfo can be NULL if not
+ * funcid must always be supplied. call_expr and rsinfo can be NULL if not
* available. We will return TYPEFUNC_RECORD, and store NULL into
* *resultTupleDesc, if we cannot deduce the complete result rowtype from
* the available information.
if (tupdesc)
{
/*
- * It has OUT parameters, so it's basically like a regular
- * composite type, except we have to be able to resolve any
- * polymorphic OUT parameters.
+ * It has OUT parameters, so it's basically like a regular composite
+ * type, except we have to be able to resolve any polymorphic OUT
+ * parameters.
*/
if (resultTypeId)
*resultTypeId = rettype;
*/
if (rettype == ANYARRAYOID || rettype == ANYELEMENTOID)
{
- Oid newrettype = exprType(call_expr);
+ Oid newrettype = exprType(call_expr);
if (newrettype == InvalidOid) /* this probably should not happen */
ereport(ERROR,
if (resultTypeId)
*resultTypeId = rettype;
if (resultTupleDesc)
- *resultTupleDesc = NULL; /* default result */
+ *resultTupleDesc = NULL; /* default result */
/* Classify the result type */
result = get_type_func_class(rettype);
/*
* Given the result tuple descriptor for a function with OUT parameters,
* replace any polymorphic columns (ANYELEMENT/ANYARRAY) with correct data
- * types deduced from the input arguments. Returns TRUE if able to deduce
+ * types deduced from the input arguments. Returns TRUE if able to deduce
* all types, FALSE if not.
*/
static bool
return true;
/*
- * Otherwise, extract actual datatype(s) from input arguments. (We assume
+ * Otherwise, extract actual datatype(s) from input arguments. (We assume
* the parser already validated consistency of the arguments.)
*/
if (!call_expr)
switch (tupdesc->attrs[i]->atttypid)
{
case ANYELEMENTOID:
- TupleDescInitEntry(tupdesc, i+1,
+ TupleDescInitEntry(tupdesc, i + 1,
NameStr(tupdesc->attrs[i]->attname),
anyelement_type,
-1,
0);
break;
case ANYARRAYOID:
- TupleDescInitEntry(tupdesc, i+1,
+ TupleDescInitEntry(tupdesc, i + 1,
NameStr(tupdesc->attrs[i]->attname),
anyarray_type,
-1,
/*
* Given the declared argument types and modes for a function,
* replace any polymorphic types (ANYELEMENT/ANYARRAY) with correct data
- * types deduced from the input arguments. Returns TRUE if able to deduce
+ * types deduced from the input arguments. Returns TRUE if able to deduce
* all types, FALSE if not. This is the same logic as
* resolve_polymorphic_tupdesc, but with a different argument representation.
*
inargno = 0;
for (i = 0; i < numargs; i++)
{
- char argmode = argmodes ? argmodes[i] : PROARGMODE_IN;
+ char argmode = argmodes ? argmodes[i] : PROARGMODE_IN;
switch (argtypes[i])
{
case 'p':
if (typid == RECORDOID)
return TYPEFUNC_RECORD;
+
/*
* We treat VOID and CSTRING as legitimate scalar datatypes,
- * mostly for the convenience of the JDBC driver (which wants
- * to be able to do "SELECT * FROM foo()" for all legitimately
+ * mostly for the convenience of the JDBC driver (which wants to
+ * be able to do "SELECT * FROM foo()" for all legitimately
* user-callable functions).
*/
if (typid == VOIDOID || typid == CSTRINGOID)
* since the array data is just going to look like a C array of
* values.
*/
- arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */
numargs = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numargs < 0 ||
ARR_ELEMTYPE(arr) != CHAROID)
elog(ERROR, "proargmodes is not a 1-D char array");
argmodes = (char *) ARR_DATA_PTR(arr);
- arr = DatumGetArrayTypeP(proargnames); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(proargnames); /* ensure not toasted */
if (ARR_NDIM(arr) != 1 ||
ARR_DIMS(arr)[0] != numargs ||
ARR_ELEMTYPE(arr) != TEXTOID)
Anum_pg_proc_proargnames,
&isnull);
if (isnull)
- proargnames = PointerGetDatum(NULL); /* just to be sure */
+ proargnames = PointerGetDatum(NULL); /* just to be sure */
return build_function_result_tupdesc_d(proallargtypes,
proargmodes,
numoutargs = 0;
for (i = 0; i < numargs; i++)
{
- char *pname;
+ char *pname;
if (argmodes[i] == PROARGMODE_IN)
continue;
desc = CreateTemplateTupleDesc(numoutargs, false);
for (i = 0; i < numoutargs; i++)
{
- TupleDescInitEntry(desc, i+1,
+ TupleDescInitEntry(desc, i + 1,
outargnames[i],
outargtypes[i],
-1,
if (list_length(colaliases) != 1)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("number of aliases does not match number of columns")));
+ errmsg("number of aliases does not match number of columns")));
/* OK, get the column alias */
attname = strVal(linitial(colaliases));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.64 2005/08/20 23:26:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.65 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
HASHHDR *hctl;
/*
- * For shared hash tables, we have a local hash header (HTAB struct)
- * that we allocate in TopMemoryContext; all else is in shared memory.
+ * For shared hash tables, we have a local hash header (HTAB struct) that
+ * we allocate in TopMemoryContext; all else is in shared memory.
*
- * For non-shared hash tables, everything including the hash header
- * is in a memory context created specially for the hash table ---
- * this makes hash_destroy very simple. The memory context is made
- * a child of either a context specified by the caller, or
- * TopMemoryContext if nothing is specified.
+ * For non-shared hash tables, everything including the hash header is in a
+ * memory context created specially for the hash table --- this makes
+ * hash_destroy very simple. The memory context is made a child of either
+ * a context specified by the caller, or TopMemoryContext if nothing is
+ * specified.
*/
if (flags & HASH_SHARED_MEM)
{
}
/* Initialize the hash header, plus a copy of the table name */
- hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) + 1);
+ hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) +1);
MemSet(hashp, 0, sizeof(HTAB));
hashp->tabname = (char *) (hashp + 1);
hashp->hash = string_hash; /* default hash function */
/*
- * If you don't specify a match function, it defaults to strncmp() if
- * you used string_hash (either explicitly or by default) and to
- * memcmp() otherwise. (Prior to PostgreSQL 7.4, memcmp() was always
- * used.)
+ * If you don't specify a match function, it defaults to strncmp() if you
+ * used string_hash (either explicitly or by default) and to memcmp()
+ * otherwise. (Prior to PostgreSQL 7.4, memcmp() was always used.)
*/
if (flags & HASH_COMPARE)
hashp->match = info->match;
if (flags & HASH_SHARED_MEM)
{
/*
- * ctl structure is preallocated for shared memory tables. Note
- * that HASH_DIRSIZE and HASH_ALLOC had better be set as well.
+ * ctl structure is preallocated for shared memory tables. Note that
+ * HASH_DIRSIZE and HASH_ALLOC had better be set as well.
*/
hashp->hctl = info->hctl;
hashp->dir = info->dir;
}
/*
- * hash table now allocates space for key and data but you have to say
- * how much space to allocate
+ * hash table now allocates space for key and data but you have to say how
+ * much space to allocate
*/
if (flags & HASH_ELEM)
{
/*
* Divide number of elements by the fill factor to determine a desired
- * number of buckets. Allocate space for the next greater power of
- * two number of buckets
+ * number of buckets. Allocate space for the next greater power of two
+ * number of buckets
*/
lnbuckets = (nelem - 1) / hctl->ffactor + 1;
hctl->high_mask = (nbuckets << 1) - 1;
/*
- * Figure number of directory segments needed, round up to a power of
- * 2
+ * Figure number of directory segments needed, round up to a power of 2
*/
nsegs = (nbuckets - 1) / hctl->ssize + 1;
nsegs = 1 << my_log2(nsegs);
/*
- * Make sure directory is big enough. If pre-allocated directory is
- * too small, choke (caller screwed up).
+ * Make sure directory is big enough. If pre-allocated directory is too
+ * small, choke (caller screwed up).
*/
if (nsegs > hctl->dsize)
{
size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
/* segments */
size = add_size(size, mul_size(nSegments,
- MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
+ MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
/* elements --- allocated in groups of up to HASHELEMENT_ALLOC_MAX */
elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
elementAllocCnt = Min(num_entries, HASHELEMENT_ALLOC_MAX);
* the result is a dangling pointer that shouldn't be dereferenced!)
*
* HASH_ENTER will normally ereport a generic "out of memory" error if
- * it is unable to create a new entry. The HASH_ENTER_NULL operation is
+ * it is unable to create a new entry. The HASH_ENTER_NULL operation is
* the same except it will return NULL if out of memory. Note that
* HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
* since palloc internally ereports on out-of-memory.
/*
* better hope the caller is synchronizing access to this
- * element, because someone else is going to reuse it the
- * next time something is added to the table
+ * element, because someone else is going to reuse it the next
+ * time something is added to the table
*/
return (void *) ELEMENTKEY(currBucket);
}
if (++hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor)
{
/*
- * NOTE: failure to expand table is not a fatal error, it
- * just means we have to run at higher fill factor than we
- * wanted.
+ * NOTE: failure to expand table is not a fatal error, it just
+ * means we have to run at higher fill factor than we wanted.
*/
expand_table(hashp);
}
{
/* Continuing scan of curBucket... */
status->curEntry = curElem->link;
- if (status->curEntry == NULL) /* end of this bucket */
+ if (status->curEntry == NULL) /* end of this bucket */
++status->curBucket;
return (void *) ELEMENTKEY(curElem);
}
max_bucket = hctl->max_bucket;
if (curBucket > max_bucket)
- return NULL; /* search is done */
+ return NULL; /* search is done */
/*
* first find the right segment in the table directory.
if (++curBucket > max_bucket)
{
status->curBucket = curBucket;
- return NULL; /* search is done */
+ return NULL; /* search is done */
}
if (++segment_ndx >= ssize)
{
/*
* *Before* changing masks, find old bucket corresponding to same hash
- * values; values in that bucket may need to be relocated to new
- * bucket. Note that new_bucket is certainly larger than low_mask at
- * this point, so we can skip the first step of the regular hash mask
- * calc.
+ * values; values in that bucket may need to be relocated to new bucket.
+ * Note that new_bucket is certainly larger than low_mask at this point,
+ * so we can skip the first step of the regular hash mask calc.
*/
old_bucket = (new_bucket & hctl->low_mask);
}
/*
- * Relocate records to the new bucket. NOTE: because of the way the
- * hash masking is done in calc_bucket, only one old bucket can need
- * to be split at this point. With a different way of reducing the
- * hash value, that might not be true!
+ * Relocate records to the new bucket. NOTE: because of the way the hash
+ * masking is done in calc_bucket, only one old bucket can need to be
+ * split at this point. With a different way of reducing the hash value,
+ * that might not be true!
*/
old_segnum = old_bucket >> hctl->sshift;
old_segndx = MOD(old_bucket, hctl->ssize);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.24 2005/06/08 23:02:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.25 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bitmap_hash(const void *key, Size keysize)
{
Assert(keysize == sizeof(Bitmapset *));
- return bms_hash_value(*((const Bitmapset * const *) key));
+ return bms_hash_value(*((const Bitmapset *const *) key));
}
/*
bitmap_match(const void *key1, const void *key2, Size keysize)
{
Assert(keysize == sizeof(Bitmapset *));
- return !bms_equal(*((const Bitmapset * const *) key1),
- *((const Bitmapset * const *) key2));
+ return !bms_equal(*((const Bitmapset *const *) key1),
+ *((const Bitmapset *const *) key2));
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/pg_crc.c,v 1.13 2005/06/02 05:55:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/pg_crc.c,v 1.14 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
0x5DEDC41A, 0x1F1D25F1,
0xD80C07CD, 0x9AFCE626
};
-
#else /* int64 works */
const uint64 pg_crc64_table[256] = {
UINT64CONST(0x5DEDC41A34BBEEB2), UINT64CONST(0x1F1D25F19D51D821),
UINT64CONST(0xD80C07CD676F8394), UINT64CONST(0x9AFCE626CE85B507)
};
-
#endif /* INT64_IS_BUSTED */
-#endif /* PROVIDE_64BIT_CRC */
+#endif /* PROVIDE_64BIT_CRC */
* Routines for maintaining "flat file" images of the shared catalogs.
*
* We use flat files so that the postmaster and not-yet-fully-started
- * backends can look at the contents of pg_database, pg_authid, and
- * pg_auth_members for authentication purposes. This module is
- * responsible for keeping the flat-file images as nearly in sync with
+ * backends can look at the contents of pg_database, pg_authid, and
+ * pg_auth_members for authentication purposes. This module is
+ * responsible for keeping the flat-file images as nearly in sync with
* database reality as possible.
*
* The tricky part of the write_xxx_file() routines in this module is that
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.14 2005/08/11 21:11:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.15 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define AUTH_FLAT_FILE "global/pg_auth"
/* Info bits in a flatfiles 2PC record */
-#define FF_BIT_DATABASE 1
+#define FF_BIT_DATABASE 1
#define FF_BIT_AUTH 2
/*
* Create a temporary filename to be renamed later. This prevents the
- * backend from clobbering the flat file while the postmaster
- * might be reading from it.
+ * backend from clobbering the flat file while the postmaster might be
+ * reading from it.
*/
filename = database_getflatfilename();
bufsize = strlen(filename) + 12;
Oid datoid;
Oid dattablespace;
TransactionId datfrozenxid,
- datvacuumxid;
+ datvacuumxid;
datname = NameStr(dbform->datname);
datoid = HeapTupleGetOid(tuple);
/*
* Identify the oldest datfrozenxid, ignoring databases that are not
- * connectable (we assume they are safely frozen). This must match
+ * connectable (we assume they are safely frozen). This must match
* the logic in vac_truncate_clog() in vacuum.c.
*/
if (dbform->datallowconn &&
tempname)));
/*
- * Rename the temp file to its final name, deleting the old flat file.
- * We expect that rename(2) is an atomic action.
+ * Rename the temp file to its final name, deleting the old flat file. We
+ * expect that rename(2) is an atomic action.
*/
if (rename(tempname, filename))
ereport(ERROR,
* and build data structures in-memory before writing the file.
*/
-typedef struct {
+typedef struct
+{
Oid roleid;
bool rolcanlogin;
- char* rolname;
- char* rolpassword;
- char* rolvaliduntil;
- List* member_of;
+ char *rolname;
+ char *rolpassword;
+ char *rolvaliduntil;
+ List *member_of;
} auth_entry;
-typedef struct {
+typedef struct
+{
Oid roleid;
Oid memberid;
} authmem_entry;
static int
oid_compar(const void *a, const void *b)
{
- const auth_entry *a_auth = (const auth_entry*) a;
- const auth_entry *b_auth = (const auth_entry*) b;
+ const auth_entry *a_auth = (const auth_entry *) a;
+ const auth_entry *b_auth = (const auth_entry *) b;
- if (a_auth->roleid < b_auth->roleid) return -1;
- if (a_auth->roleid > b_auth->roleid) return 1;
+ if (a_auth->roleid < b_auth->roleid)
+ return -1;
+ if (a_auth->roleid > b_auth->roleid)
+ return 1;
return 0;
}
static int
name_compar(const void *a, const void *b)
{
- const auth_entry *a_auth = (const auth_entry*) a;
- const auth_entry *b_auth = (const auth_entry*) b;
+ const auth_entry *a_auth = (const auth_entry *) a;
+ const auth_entry *b_auth = (const auth_entry *) b;
- return strcmp(a_auth->rolname,b_auth->rolname);
+ return strcmp(a_auth->rolname, b_auth->rolname);
}
/* qsort comparator for sorting authmem_entry array by memberid */
static int
mem_compar(const void *a, const void *b)
{
- const authmem_entry *a_auth = (const authmem_entry*) a;
- const authmem_entry *b_auth = (const authmem_entry*) b;
+ const authmem_entry *a_auth = (const authmem_entry *) a;
+ const authmem_entry *b_auth = (const authmem_entry *) b;
- if (a_auth->memberid < b_auth->memberid) return -1;
- if (a_auth->memberid > b_auth->memberid) return 1;
+ if (a_auth->memberid < b_auth->memberid)
+ return -1;
+ if (a_auth->memberid > b_auth->memberid)
+ return 1;
return 0;
}
char *filename,
*tempname;
int bufsize;
- BlockNumber totalblocks;
+ BlockNumber totalblocks;
FILE *fp;
mode_t oumask;
HeapScanDesc scan;
int curr_mem = 0;
int total_mem = 0;
int est_rows;
- auth_entry *auth_info;
+ auth_entry *auth_info;
authmem_entry *authmem_info;
/*
* Create a temporary filename to be renamed later. This prevents the
- * backend from clobbering the flat file while the postmaster might
- * be reading from it.
+ * backend from clobbering the flat file while the postmaster might be
+ * reading from it.
*/
filename = auth_getflatfilename();
bufsize = strlen(filename) + 12;
tempname)));
/*
- * Read pg_authid and fill temporary data structures. Note we must
- * read all roles, even those without rolcanlogin.
+ * Read pg_authid and fill temporary data structures. Note we must read
+ * all roles, even those without rolcanlogin.
*/
totalblocks = RelationGetNumberOfBlocks(rel_authid);
totalblocks = totalblocks ? totalblocks : 1;
- est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData)+sizeof(FormData_pg_authid)));
- auth_info = (auth_entry*) palloc(est_rows*sizeof(auth_entry));
+ est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData) + sizeof(FormData_pg_authid)));
+ auth_info = (auth_entry *) palloc(est_rows * sizeof(auth_entry));
scan = heap_beginscan(rel_authid, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Form_pg_authid aform = (Form_pg_authid) GETSTRUCT(tuple);
HeapTupleHeader tup = tuple->t_data;
- char *tp; /* ptr to tuple data */
- long off; /* offset in tuple data */
+ char *tp; /* ptr to tuple data */
+ long off; /* offset in tuple data */
bits8 *bp = tup->t_bits; /* ptr to null bitmask in tuple */
Datum datum;
if (curr_role >= est_rows)
{
est_rows *= 2;
- auth_info = (auth_entry*)
- repalloc(auth_info, est_rows*sizeof(auth_entry));
+ auth_info = (auth_entry *)
+ repalloc(auth_info, est_rows * sizeof(auth_entry));
}
auth_info[curr_role].roleid = HeapTupleGetOid(tuple);
auth_info[curr_role].member_of = NIL;
/*
- * We can't use heap_getattr() here because during startup we will
- * not have any tupdesc for pg_authid. Fortunately it's not too
- * hard to work around this. rolpassword is the first possibly-null
- * field so we can compute its offset directly.
+ * We can't use heap_getattr() here because during startup we will not
+ * have any tupdesc for pg_authid. Fortunately it's not too hard to
+ * work around this. rolpassword is the first possibly-null field so
+ * we can compute its offset directly.
*/
tp = (char *) tup + tup->t_hoff;
off = offsetof(FormData_pg_authid, rolpassword);
datum = PointerGetDatum(tp + off);
/*
- * The password probably shouldn't ever be out-of-line toasted;
- * if it is, ignore it, since we can't handle that in startup mode.
+ * The password probably shouldn't ever be out-of-line toasted; if
+ * it is, ignore it, since we can't handle that in startup mode.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(datum)))
auth_info[curr_role].rolpassword = pstrdup("");
*/
totalblocks = RelationGetNumberOfBlocks(rel_authmem);
totalblocks = totalblocks ? totalblocks : 1;
- est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData)+sizeof(FormData_pg_auth_members)));
- authmem_info = (authmem_entry*) palloc(est_rows*sizeof(authmem_entry));
+ est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData) + sizeof(FormData_pg_auth_members)));
+ authmem_info = (authmem_entry *) palloc(est_rows * sizeof(authmem_entry));
scan = heap_beginscan(rel_authmem, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
if (curr_mem >= est_rows)
{
est_rows *= 2;
- authmem_info = (authmem_entry*)
- repalloc(authmem_info, est_rows*sizeof(authmem_entry));
+ authmem_info = (authmem_entry *)
+ repalloc(authmem_info, est_rows * sizeof(authmem_entry));
}
authmem_info[curr_mem].roleid = memform->roleid;
heap_endscan(scan);
/*
- * Search for memberships. We can skip all this if pg_auth_members
- * is empty.
+ * Search for memberships. We can skip all this if pg_auth_members is
+ * empty.
*/
if (total_mem > 0)
{
*/
qsort(auth_info, total_roles, sizeof(auth_entry), oid_compar);
qsort(authmem_info, total_mem, sizeof(authmem_entry), mem_compar);
+
/*
* For each role, find what it belongs to.
*/
for (curr_role = 0; curr_role < total_roles; curr_role++)
{
- List *roles_list;
- List *roles_names_list = NIL;
- ListCell *mem;
+ List *roles_list;
+ List *roles_names_list = NIL;
+ ListCell *mem;
/* We can skip this for non-login roles */
if (!auth_info[curr_role].rolcanlogin)
continue;
/*
- * This search algorithm is the same as in is_member_of_role;
- * we are just working with a different input data structure.
+ * This search algorithm is the same as in is_member_of_role; we
+ * are just working with a different input data structure.
*/
roles_list = list_make1_oid(auth_info[curr_role].roleid);
{
authmem_entry key;
authmem_entry *found_mem;
- int first_found, last_found, i;
+ int first_found,
+ last_found,
+ i;
key.memberid = lfirst_oid(mem);
found_mem = bsearch(&key, authmem_info, total_mem,
sizeof(authmem_entry), mem_compar);
if (!found_mem)
continue;
+
/*
- * bsearch found a match for us; but if there were
- * multiple matches it could have found any one of them.
- * Locate first and last match.
+ * bsearch found a match for us; but if there were multiple
+ * matches it could have found any one of them. Locate first
+ * and last match.
*/
first_found = last_found = (found_mem - authmem_info);
while (first_found > 0 &&
while (last_found + 1 < total_mem &&
mem_compar(&key, &authmem_info[last_found + 1]) == 0)
last_found++;
+
/*
* Now add all the new roles to roles_list.
*/
for (i = first_found; i <= last_found; i++)
roles_list = list_append_unique_oid(roles_list,
- authmem_info[i].roleid);
+ authmem_info[i].roleid);
}
/*
- * Convert list of role Oids to list of role names.
- * We must do this before re-sorting auth_info.
+ * Convert list of role Oids to list of role names. We must do
+ * this before re-sorting auth_info.
*
- * We skip the first list element (curr_role itself) since there
- * is no point in writing that a role is a member of itself.
+ * We skip the first list element (curr_role itself) since there is
+ * no point in writing that a role is a member of itself.
*/
for_each_cell(mem, lnext(list_head(roles_list)))
{
- auth_entry key_auth;
+ auth_entry key_auth;
auth_entry *found_role;
key_auth.roleid = lfirst_oid(mem);
found_role = bsearch(&key_auth, auth_info, total_roles,
sizeof(auth_entry), oid_compar);
- if (found_role) /* paranoia */
+ if (found_role) /* paranoia */
roles_names_list = lappend(roles_names_list,
found_role->rolname);
}
if (arole->rolcanlogin)
{
- ListCell *mem;
+ ListCell *mem;
fputs_quote(arole->rolname, fp);
fputs(" ", fp);
tempname)));
/*
- * Rename the temp file to its final name, deleting the old flat file.
- * We expect that rename(2) is an atomic action.
+ * Rename the temp file to its final name, deleting the old flat file. We
+ * expect that rename(2) is an atomic action.
*/
if (rename(tempname, filename))
ereport(ERROR,
{
ResourceOwner owner;
RelFileNode rnode;
- Relation rel_db, rel_authid, rel_authmem;
+ Relation rel_db,
+ rel_authid,
+ rel_authmem;
/*
- * We don't have any hope of running a real relcache, but we can use
- * the same fake-relcache facility that WAL replay uses.
+ * We don't have any hope of running a real relcache, but we can use the
+ * same fake-relcache facility that WAL replay uses.
*/
XLogInitRelationCache();
}
/*
- * Advance command counter to be certain we see all effects of the
- * current transaction.
+ * Advance command counter to be certain we see all effects of the current
+ * transaction.
*/
CommandCounterIncrement();
/*
- * We use ExclusiveLock to ensure that only one backend writes the
- * flat file(s) at a time. That's sufficient because it's okay to
- * allow plain reads of the tables in parallel. There is some chance
- * of a deadlock here (if we were triggered by a user update of one
- * of the tables, which likely won't have gotten a strong enough lock),
- * so get the locks we need before writing anything.
+ * We use ExclusiveLock to ensure that only one backend writes the flat
+ * file(s) at a time. That's sufficient because it's okay to allow plain
+ * reads of the tables in parallel. There is some chance of a deadlock
+ * here (if we were triggered by a user update of one of the tables, which
+ * likely won't have gotten a strong enough lock), so get the locks we
+ * need before writing anything.
*
- * For writing the auth file, it's sufficient to ExclusiveLock pg_authid;
- * we take just regular AccessShareLock on pg_auth_members.
+ * For writing the auth file, it's sufficient to ExclusiveLock pg_authid; we
+ * take just regular AccessShareLock on pg_auth_members.
*/
if (database_file_update_subid != InvalidSubTransactionId)
drel = heap_open(DatabaseRelationId, ExclusiveLock);
* or pg_auth_members via general-purpose INSERT/UPDATE/DELETE commands.
*
* It is sufficient for this to be a STATEMENT trigger since we don't
- * care which individual rows changed. It doesn't much matter whether
+ * care which individual rows changed. It doesn't much matter whether
* it's a BEFORE or AFTER trigger.
*/
Datum
void *recdata, uint32 len)
{
/*
- * Set flags to do the needed file updates at the end of my own
- * current transaction. (XXX this has some issues if my own
- * transaction later rolls back, or if there is any significant
- * delay before I commit. OK for now because we disallow
- * COMMIT PREPARED inside a transaction block.)
+ * Set flags to do the needed file updates at the end of my own current
+ * transaction. (XXX this has some issues if my own transaction later
+ * rolls back, or if there is any significant delay before I commit. OK
+ * for now because we disallow COMMIT PREPARED inside a transaction
+ * block.)
*/
if (info & FF_BIT_DATABASE)
database_file_update_needed();
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.149 2005/08/17 22:14:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.150 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "storage/ipc.h"
#include "storage/pg_shmem.h"
#include "storage/proc.h"
-#include "storage/procarray.h"
+#include "storage/procarray.h"
#include "utils/builtins.h"
#include "utils/guc.h"
#include "utils/lsyscache.h"
* DEFINER functions, as well as locally in some specialized commands.
* ----------------------------------------------------------------
*/
-static Oid AuthenticatedUserId = InvalidOid;
-static Oid SessionUserId = InvalidOid;
-static Oid OuterUserId = InvalidOid;
-static Oid CurrentUserId = InvalidOid;
+static Oid AuthenticatedUserId = InvalidOid;
+static Oid SessionUserId = InvalidOid;
+static Oid OuterUserId = InvalidOid;
+static Oid CurrentUserId = InvalidOid;
/* We also have to remember the superuser state of some of these levels */
static bool AuthenticatedUserIsSuperuser = false;
/*
* These next checks are not enforced when in standalone mode, so that
- * there is a way to recover from sillinesses like
- * "UPDATE pg_authid SET rolcanlogin = false;".
+ * there is a way to recover from sillinesses like "UPDATE pg_authid SET
+ * rolcanlogin = false;".
*
* We do not enforce them for the autovacuum process either.
*/
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("role \"%s\" is not permitted to log in",
rolename)));
+
/*
* Check connection limit for this role.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
- * exactly seems more trouble than it is worth, however; instead
- * we just document that the connection limit is approximate.
+ * exactly seems more trouble than it is worth, however; instead we
+ * just document that the connection limit is approximate.
*/
if (rform->rolconnlimit >= 0 &&
!AuthenticatedUserIsSuperuser &&
errmsg("too many connections for role \"%s\"",
rolename)));
}
-
+
/* Record username and superuser status as GUC settings too */
SetConfigOption("session_authorization", rolename,
PGC_BACKEND, PGC_S_OVERRIDE);
PGC_INTERNAL, PGC_S_OVERRIDE);
/*
- * Set up user-specific configuration variables. This is a good place
- * to do it so we don't have to read pg_authid twice during session
- * startup.
+ * Set up user-specific configuration variables. This is a good place to
+ * do it so we don't have to read pg_authid twice during session startup.
*/
datum = SysCacheGetAttr(AUTHNAME, roleTup,
Anum_pg_authid_rolconfig, &isnull);
!AuthenticatedUserIsSuperuser)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to set session authorization")));
+ errmsg("permission denied to set session authorization")));
SetSessionUserId(userid, is_superuser);
* Change Role ID while running (SET ROLE)
*
* If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the
- * session user authorization. In this case the is_superuser argument
+ * session user authorization. In this case the is_superuser argument
* is ignored.
*
* When roleid is not InvalidOid, the caller must have checked whether
pid_t my_pid = getpid();
/*
- * We need a loop here because of race conditions. But don't loop
- * forever (for example, a non-writable $PGDATA directory might cause
- * a failure that won't go away). 100 tries seems like plenty.
+ * We need a loop here because of race conditions. But don't loop forever
+ * (for example, a non-writable $PGDATA directory might cause a failure
+ * that won't go away). 100 tries seems like plenty.
*/
for (ntries = 0;; ntries++)
{
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See
- * comments below.
+ * Think not to make the file protection weaker than 0600. See comments
+ * below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
if (fd >= 0)
/*
* Check to see if the other process still exists
*
- * If the PID in the lockfile is our own PID or our parent's PID,
- * then the file must be stale (probably left over from a previous
- * system boot cycle). We need this test because of the likelihood
- * that a reboot will assign exactly the same PID as we had in the
- * previous reboot. Also, if there is just one more process launch
- * in this reboot than in the previous one, the lockfile might mention
- * our parent's PID. We can reject that since we'd never be launched
- * directly by a competing postmaster. We can't detect grandparent
- * processes unfortunately, but if the init script is written carefully
- * then all but the immediate parent shell will be root-owned processes
- * and so the kill test will fail with EPERM.
+ * If the PID in the lockfile is our own PID or our parent's PID, then
+ * the file must be stale (probably left over from a previous system
+ * boot cycle). We need this test because of the likelihood that a
+ * reboot will assign exactly the same PID as we had in the previous
+ * reboot. Also, if there is just one more process launch in this
+ * reboot than in the previous one, the lockfile might mention our
+ * parent's PID. We can reject that since we'd never be launched
+ * directly by a competing postmaster. We can't detect grandparent
+ * processes unfortunately, but if the init script is written
+ * carefully then all but the immediate parent shell will be
+ * root-owned processes and so the kill test will fail with EPERM.
*
* We can treat the EPERM-error case as okay because that error implies
* that the existing process has a different userid than we do, which
* means it cannot be a competing postmaster. A postmaster cannot
* successfully attach to a data directory owned by a userid other
- * than its own. (This is now checked directly in checkDataDir(),
- * but has been true for a long time because of the restriction that
- * the data directory isn't group- or world-accessible.) Also,
- * since we create the lockfiles mode 600, we'd have failed above
- * if the lockfile belonged to another userid --- which means that
- * whatever process kill() is reporting about isn't the one that
- * made the lockfile. (NOTE: this last consideration is the only
- * one that keeps us from blowing away a Unix socket file belonging
- * to an instance of Postgres being run by someone else, at least
- * on machines where /tmp hasn't got a stickybit.)
+ * than its own. (This is now checked directly in checkDataDir(), but
+ * has been true for a long time because of the restriction that the
+ * data directory isn't group- or world-accessible.) Also, since we
+ * create the lockfiles mode 600, we'd have failed above if the
+ * lockfile belonged to another userid --- which means that whatever
+ * process kill() is reporting about isn't the one that made the
+ * lockfile. (NOTE: this last consideration is the only one that
+ * keeps us from blowing away a Unix socket file belonging to an
+ * instance of Postgres being run by someone else, at least on
+ * machines where /tmp hasn't got a stickybit.)
*
- * Windows hasn't got getppid(), but doesn't need it since it's not
- * using real kill() either...
+ * Windows hasn't got getppid(), but doesn't need it since it's not using
+ * real kill() either...
*
- * Normally kill() will fail with ESRCH if the given PID doesn't
- * exist. BeOS returns EINVAL for some silly reason, however.
+ * Normally kill() will fail with ESRCH if the given PID doesn't exist.
+ * BeOS returns EINVAL for some silly reason, however.
*/
if (other_pid != my_pid
#ifndef WIN32
}
/*
- * No, the creating process did not exist. However, it could be
- * that the postmaster crashed (or more likely was kill -9'd by a
- * clueless admin) but has left orphan backends behind. Check for
- * this by looking to see if there is an associated shmem segment
- * that is still in use.
+ * No, the creating process did not exist. However, it could be that
+ * the postmaster crashed (or more likely was kill -9'd by a clueless
+ * admin) but has left orphan backends behind. Check for this by
+ * looking to see if there is an associated shmem segment that is
+ * still in use.
*/
if (isDDLock)
{
if (PGSharedMemoryIsInUse(id1, id2))
ereport(FATAL,
(errcode(ERRCODE_LOCK_FILE_EXISTS),
- errmsg("pre-existing shared memory block "
- "(key %lu, ID %lu) is still in use",
- id1, id2),
- errhint("If you're sure there are no old "
- "server processes still running, remove "
- "the shared memory block with "
- "the command \"ipcclean\", \"ipcrm\", "
- "or just delete the file \"%s\".",
- filename)));
+ errmsg("pre-existing shared memory block "
+ "(key %lu, ID %lu) is still in use",
+ id1, id2),
+ errhint("If you're sure there are no old "
+ "server processes still running, remove "
+ "the shared memory block with "
+ "the command \"ipcclean\", \"ipcrm\", "
+ "or just delete the file \"%s\".",
+ filename)));
}
}
}
/*
- * Looks like nobody's home. Unlink the file and try again to
- * create it. Need a loop because of possible race condition
- * against other would-be creators.
+ * Looks like nobody's home. Unlink the file and try again to create
+ * it. Need a loop because of possible race condition against other
+ * would-be creators.
*/
if (unlink(filename) < 0)
ereport(FATAL,
errmsg("could not remove old lock file \"%s\": %m",
filename),
errhint("The file seems accidentally left over, but "
- "it could not be removed. Please remove the file "
+ "it could not be removed. Please remove the file "
"by hand and try again.")));
}
errno = save_errno ? save_errno : ENOSPC;
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not write lock file \"%s\": %m", filename)));
+ errmsg("could not write lock file \"%s\": %m", filename)));
}
if (close(fd))
{
errno = save_errno;
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not write lock file \"%s\": %m", filename)));
+ errmsg("could not write lock file \"%s\": %m", filename)));
}
/*
if (socketLockFile[0] != '\0')
{
/*
- * utime() is POSIX standard, utimes() is a common alternative; if
- * we have neither, fall back to actually reading the file (which
- * only sets the access time not mod time, but that should be
- * enough in most cases). In all paths, we ignore errors.
+ * utime() is POSIX standard, utimes() is a common alternative; if we
+ * have neither, fall back to actually reading the file (which only
+ * sets the access time not mod time, but that should be enough in
+ * most cases). In all paths, we ignore errors.
*/
#ifdef HAVE_UTIME
utime(socketLockFile, NULL);
else
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", full_path)));
+ errmsg("could not open file \"%s\": %m", full_path)));
}
ret = fscanf(file, "%ld.%ld", &file_major, &file_minor);
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("database files are incompatible with server"),
errdetail("The data directory was initialized by PostgreSQL version %ld.%ld, "
- "which is not compatible with this version %s.",
+ "which is not compatible with this version %s.",
file_major, file_minor, version_string)));
}
list_free(elemlist);
ereport(LOG,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("invalid list syntax for parameter \"preload_libraries\"")));
+ errmsg("invalid list syntax for parameter \"preload_libraries\"")));
return;
}
if (sep)
{
/*
- * a colon separator implies there is an initialization
- * function that we need to run in addition to loading the
- * library
+ * a colon separator implies there is an initialization function
+ * that we need to run in addition to loading the library
*/
size_t filename_len = sep - tok;
size_t funcname_len = strlen(tok) - filename_len - 1;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.157 2005/08/11 21:11:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.158 2005/10/15 02:49:33 momjian Exp $
*
*
*-------------------------------------------------------------------------
*
* Since FindMyDatabase cannot lock pg_database, the information it read
* could be stale; for example we might have attached to a database that's in
- * process of being destroyed by dropdb(). This routine is called after
+ * process of being destroyed by dropdb(). This routine is called after
* we have all the locking and other infrastructure running --- now we can
* check that we are really attached to a valid database.
*
ReverifyMyDatabase(const char *name)
{
Relation pgdbrel;
- SysScanDesc pgdbscan;
+ SysScanDesc pgdbscan;
ScanKeyData key;
HeapTuple tup;
Form_pg_database dbform;
/*
- * Because we grab RowShareLock here, we can be sure that dropdb()
- * is not running in parallel with us (any more).
+ * Because we grab RowShareLock here, we can be sure that dropdb() is not
+ * running in parallel with us (any more).
*/
pgdbrel = heap_open(DatabaseRelationId, RowShareLock);
heap_close(pgdbrel, RowShareLock);
/*
- * The only real problem I could have created is to load dirty
- * buffers for the dead database into shared buffer cache; if I
- * did, some other backend will eventually try to write them and
- * die in mdblindwrt. Flush any such pages to forestall trouble.
+ * The only real problem I could have created is to load dirty buffers
+ * for the dead database into shared buffer cache; if I did, some
+ * other backend will eventually try to write them and die in
+ * mdblindwrt. Flush any such pages to forestall trouble.
*/
DropBuffers(MyDatabaseId);
/* Now I can commit hara-kiri with a clear conscience... */
ereport(FATAL,
(errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("database \"%s\", OID %u, has disappeared from pg_database",
- name, MyDatabaseId)));
+ errmsg("database \"%s\", OID %u, has disappeared from pg_database",
+ name, MyDatabaseId)));
}
dbform = (Form_pg_database) GETSTRUCT(tup);
if (!dbform->datallowconn)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("database \"%s\" is not currently accepting connections",
- name)));
+ errmsg("database \"%s\" is not currently accepting connections",
+ name)));
+
/*
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
- * exactly seems more trouble than it is worth, however; instead
- * we just document that the connection limit is approximate.
+ * exactly seems more trouble than it is worth, however; instead we
+ * just document that the connection limit is approximate.
*/
if (dbform->datconnlimit >= 0 &&
!superuser() &&
}
/*
- * OK, we're golden. Next to-do item is to save the encoding
- * info out of the pg_database tuple.
+ * OK, we're golden. Next to-do item is to save the encoding info out of
+ * the pg_database tuple.
*/
SetDatabaseEncoding(dbform->encoding);
/* Record it as a GUC internal option, too */
if (!IsUnderPostmaster) /* postmaster already did this */
{
/*
- * We're running a postgres bootstrap process or a standalone
- * backend. Create private "shmem" and semaphores.
+ * We're running a postgres bootstrap process or a standalone backend.
+ * Create private "shmem" and semaphores.
*/
CreateSharedMemoryAndSemaphores(true, 0);
}
* The return value indicates whether the userID is a superuser. (That
* can only be tested inside a transaction, so we want to do it during
* the startup transaction rather than doing a separate one in postgres.c.)
- *
+ *
* Note:
* Be very careful with the order of calls in the InitPostgres function.
* --------------------------------
/*
* Set up the global variables holding database id and path.
*
- * We take a shortcut in the bootstrap case, otherwise we have to look up
- * the db name in pg_database.
+ * We take a shortcut in the bootstrap case, otherwise we have to look up the
+ * db name in pg_database.
*/
if (bootstrap)
{
char *fullpath;
/*
- * Formerly we validated DataDir here, but now that's done
- * earlier.
+ * Formerly we validated DataDir here, but now that's done earlier.
*/
/*
- * Find oid and tablespace of the database we're about to open.
- * Since we're not yet up and running we have to use the hackish
+ * Find oid and tablespace of the database we're about to open. Since
+ * we're not yet up and running we have to use the hackish
* FindMyDatabase.
*/
if (!FindMyDatabase(dbname, &MyDatabaseId, &MyDatabaseTableSpace))
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist",
dbname),
- errdetail("The database subdirectory \"%s\" is missing.",
- fullpath)));
+ errdetail("The database subdirectory \"%s\" is missing.",
+ fullpath)));
else
ereport(FATAL,
(errcode_for_file_access(),
*/
/*
- * Set up my per-backend PGPROC struct in shared memory. (We need
- * to know MyDatabaseId before we can do this, since it's entered into
- * the PGPROC struct.)
+ * Set up my per-backend PGPROC struct in shared memory. (We need to
+ * know MyDatabaseId before we can do this, since it's entered into the
+ * PGPROC struct.)
*/
InitProcess();
/*
* Initialize my entry in the shared-invalidation manager's array of
- * per-backend data. (Formerly this came before InitProcess, but now
- * it must happen after, because it uses MyProc.) Once I have done
- * this, I am visible to other backends!
+ * per-backend data. (Formerly this came before InitProcess, but now it
+ * must happen after, because it uses MyProc.) Once I have done this, I
+ * am visible to other backends!
*
* Sets up MyBackendId, a unique backend identifier.
*/
InitBufferPoolBackend();
/*
- * Initialize local process's access to XLOG. In bootstrap case we
- * may skip this since StartupXLOG() was run instead.
+ * Initialize local process's access to XLOG. In bootstrap case we may
+ * skip this since StartupXLOG() was run instead.
*/
if (!bootstrap)
InitXLOGAccess();
/*
- * Initialize the relation descriptor cache. This must create at
- * least the minimum set of "nailed-in" cache entries. No catalog
- * access happens here.
+ * Initialize the relation descriptor cache. This must create at least
+ * the minimum set of "nailed-in" cache entries. No catalog access
+ * happens here.
*/
RelationCacheInitialize();
/*
- * Initialize all the system catalog caches. Note that no catalog
- * access happens here; we only set up the cache structure.
+ * Initialize all the system catalog caches. Note that no catalog access
+ * happens here; we only set up the cache structure.
*/
InitCatalogCache();
EnablePortalManager();
/*
- * Set up process-exit callback to do pre-shutdown cleanup. This
- * has to be after we've initialized all the low-level modules
- * like the buffer manager, because during shutdown this has to
- * run before the low-level modules start to close down. On the
- * other hand, we want it in place before we begin our first
- * transaction --- if we fail during the initialization transaction,
- * as is entirely possible, we need the AbortTransaction call to
- * clean up.
+ * Set up process-exit callback to do pre-shutdown cleanup. This has to
+ * be after we've initialized all the low-level modules like the buffer
+ * manager, because during shutdown this has to run before the low-level
+ * modules start to close down. On the other hand, we want it in place
+ * before we begin our first transaction --- if we fail during the
+ * initialization transaction, as is entirely possible, we need the
+ * AbortTransaction call to clean up.
*/
on_shmem_exit(ShutdownPostgres, 0);
}
/*
- * Unless we are bootstrapping, double-check that InitMyDatabaseInfo()
- * got a correct result. We can't do this until all the
- * database-access infrastructure is up. (Also, it wants to know if
- * the user is a superuser, so the above stuff has to happen first.)
+ * Unless we are bootstrapping, double-check that InitMyDatabaseInfo() got
+ * a correct result. We can't do this until all the database-access
+ * infrastructure is up. (Also, it wants to know if the user is a
+ * superuser, so the above stuff has to happen first.)
*/
if (!bootstrap)
ReverifyMyDatabase(dbname);
/*
* Final phase of relation cache startup: write a new cache file if
- * necessary. This is done after ReverifyMyDatabase to avoid writing
- * a cache file into a dead database.
+ * necessary. This is done after ReverifyMyDatabase to avoid writing a
+ * cache file into a dead database.
*/
RelationCacheInitializePhase3();
AbortOutOfAnyTransaction();
/*
- * User locks are not released by transaction end, so be sure to
- * release them explicitly.
+ * User locks are not released by transaction end, so be sure to release
+ * them explicitly.
*/
#ifdef USER_LOCKS
LockReleaseAll(USER_LOCKMETHOD, true);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.54 2005/09/24 17:53:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.55 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
latin2mic_with_table(
unsigned char *l, /* local charset string (source) */
- unsigned char *p, /* pointer to store mule internal
- * code (destination) */
+ unsigned char *p, /* pointer to store mule internal code
+ * (destination) */
int len, /* length of l */
int lc, /* leading character of p */
unsigned char *tab /* code conversion table */
*/
void
mic2latin_with_table(
- unsigned char *mic, /* mule internal code
- * (source) */
+ unsigned char *mic, /* mule internal code (source) */
unsigned char *p, /* local code (destination) */
int len, /* length of p */
int lc, /* leading character */
{
ereport(WARNING,
(errcode(ERRCODE_UNTRANSLATABLE_CHARACTER),
- errmsg("ignoring unconvertible UTF8 character 0x%04x",
- iutf)));
+ errmsg("ignoring unconvertible UTF8 character 0x%04x",
+ iutf)));
continue;
}
if (p->code & 0xff000000)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c,v 1.12 2005/09/24 17:53:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c,v 1.13 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
while (euc_end >= euc && (c1 = *euc++))
{
- if(c1 < 0x80)
+ if (c1 < 0x80)
{
/* should be ASCII */
*p++ = c1;
}
else
{
- int i, k2;
+ int i,
+ k2;
/* IBM kanji */
for (i = 0;; i++)
}
}
}
- }
+ }
else
- {
+ {
/* JIS X0208 kanji? */
c2 = *euc++;
k = (c1 << 8) | (c2 & 0xff);
while (sjis_end >= sjis && (c1 = *sjis++))
{
- if(c1 < 0x80)
+ if (c1 < 0x80)
{
/* should be ASCII */
*p++ = c1;
}
*p = '\0';
}
-
*
* 1999/1/15 Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.5 2004/08/30 02:54:40 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.6 2005/10/15 02:49:34 momjian Exp $
*/
/* can be used in either frontend or backend */
{
unsigned short code,
peer;
-} codes_t;
+} codes_t;
/* map Big5 Level 1 to CNS 11643-1992 Plane 1 */
static codes_t big5Level1ToCnsPlane1[25] = { /* range */
};
static unsigned short BinarySearchRange
- (codes_t *array, int high, unsigned short code)
+ (codes_t * array, int high, unsigned short code)
{
int low,
mid,
/*
* NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e,
- * 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix
- * is 0x9d. [region_low, region_high]
- * We should remember big5 has two different regions
- * (above). There is a bias for the distance between these
- * regions. 0xa1 - 0x7e + bias = 1 (Distance between 0xa1
- * and 0x7e is 1.) bias = - 0x22.
+ * 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is
+ * 0x9d. [region_low, region_high] We
+ * should remember big5 has two different regions (above).
+ * There is a bias for the distance between these regions.
+ * 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is
+ * 1.) bias = - 0x22.
*/
distance = tmp * 0x9d + high - low +
(high >= 0xa1 ? (low >= 0xa1 ? 0 : -0x22)
: (low >= 0xa1 ? +0x22 : 0));
/*
- * NOTE: we have to convert the distance into a code
- * point. The code point's low_byte is 0x21 plus mod_0x5e.
- * In the first, we extract the mod_0x5e of the starting
- * code point, subtracting 0x21, and add distance to it.
- * Then we calculate again mod_0x5e of them, and restore
- * the final codepoint, adding 0x21.
+ * NOTE: we have to convert the distance into a code point.
+ * The code point's low_byte is 0x21 plus mod_0x5e. In the
+ * first, we extract the mod_0x5e of the starting code point,
+ * subtracting 0x21, and add distance to it. Then we calculate
+ * again mod_0x5e of them, and restore the final codepoint,
+ * adding 0x21.
*/
tmp = (array[mid].peer & 0x00ff) + distance - 0x21;
tmp = (array[mid].peer & 0xff00) + ((tmp / 0x5e) << 8)
tmp = ((code & 0xff00) - (array[mid].code & 0xff00)) >> 8;
/*
- * NOTE: ISO charsets ranges between 0x21-0xfe
- * (94charset). Its radix is 0x5e. But there is no
- * distance bias like big5.
+ * NOTE: ISO charsets ranges between 0x21-0xfe (94charset).
+ * Its radix is 0x5e. But there is no distance bias like big5.
*/
distance = tmp * 0x5e
+ ((int) (code & 0x00ff) - (int) (array[mid].code & 0x00ff));
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.11 2005/09/24 17:53:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapKOI8R,
- sizeof(LUmapKOI8R) / sizeof(pg_local_to_utf), PG_KOI8R, len);
+ sizeof(LUmapKOI8R) / sizeof(pg_local_to_utf), PG_KOI8R, len);
PG_RETURN_VOID();
}
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1251,
- sizeof(LUmapWIN1251) / sizeof(pg_local_to_utf), PG_WIN1251, len);
+ sizeof(LUmapWIN1251) / sizeof(pg_local_to_utf), PG_WIN1251, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c,v 1.11 2005/09/24 17:53:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_CN,
- sizeof(LUmapEUC_CN) / sizeof(pg_local_to_utf), PG_EUC_CN, len);
+ sizeof(LUmapEUC_CN) / sizeof(pg_local_to_utf), PG_EUC_CN, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c,v 1.11 2005/09/24 17:53:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_JP,
- sizeof(LUmapEUC_JP) / sizeof(pg_local_to_utf), PG_EUC_JP, len);
+ sizeof(LUmapEUC_JP) / sizeof(pg_local_to_utf), PG_EUC_JP, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c,v 1.11 2005/09/24 17:53:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_KR,
- sizeof(LUmapEUC_KR) / sizeof(pg_local_to_utf), PG_EUC_KR, len);
+ sizeof(LUmapEUC_KR) / sizeof(pg_local_to_utf), PG_EUC_KR, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c,v 1.11 2005/09/24 17:53:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_TW,
- sizeof(LUmapEUC_TW) / sizeof(pg_local_to_utf), PG_EUC_TW, len);
+ sizeof(LUmapEUC_TW) / sizeof(pg_local_to_utf), PG_EUC_TW, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.11 2005/09/24 17:53:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapGB18030,
- sizeof(LUmapGB18030) / sizeof(pg_local_to_utf), PG_GB18030, len);
+ sizeof(LUmapGB18030) / sizeof(pg_local_to_utf), PG_GB18030, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.14 2005/09/24 17:53:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.15 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
pg_utf_to_local *map2; /* from UTF8 map name */
int size1; /* size of map1 */
int size2; /* size of map2 */
-} pg_conv_map;
+} pg_conv_map;
static pg_conv_map maps[] = {
{PG_SQL_ASCII}, /* SQL/ASCII */
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c,v 1.11 2005/09/24 17:53:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c,v 1.12 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapJOHAB,
- sizeof(LUmapJOHAB) / sizeof(pg_local_to_utf), PG_JOHAB, len);
+ sizeof(LUmapJOHAB) / sizeof(pg_local_to_utf), PG_JOHAB, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c,v 1.12 2005/09/24 17:53:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c,v 1.13 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1250,
- sizeof(LUmapWIN1250) / sizeof(pg_local_to_utf), PG_WIN1250, len);
+ sizeof(LUmapWIN1250) / sizeof(pg_local_to_utf), PG_WIN1250, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c,v 1.4 2005/09/24 17:53:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c,v 1.5 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1252,
- sizeof(LUmapWIN1252) / sizeof(pg_local_to_utf), PG_WIN1252, len);
+ sizeof(LUmapWIN1252) / sizeof(pg_local_to_utf), PG_WIN1252, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c,v 1.12 2005/09/24 17:53:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c,v 1.13 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1256,
- sizeof(LUmapWIN1256) / sizeof(pg_local_to_utf), PG_WIN1256, len);
+ sizeof(LUmapWIN1256) / sizeof(pg_local_to_utf), PG_WIN1256, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c,v 1.2 2005/09/24 17:53:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c,v 1.3 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1258,
- sizeof(LUmapWIN1258) / sizeof(pg_local_to_utf), PG_WIN1258, len);
+ sizeof(LUmapWIN1258) / sizeof(pg_local_to_utf), PG_WIN1258, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c,v 1.12 2005/09/24 17:53:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c,v 1.13 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN874,
- sizeof(LUmapWIN874) / sizeof(pg_local_to_utf), PG_WIN874, len);
+ sizeof(LUmapWIN874) / sizeof(pg_local_to_utf), PG_WIN874, len);
PG_RETURN_VOID();
}
* Encoding names and routines for work with it. All
* in this file is shared bedween FE and BE.
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/encnames.c,v 1.25 2005/03/14 18:31:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/encnames.c,v 1.26 2005/10/15 02:49:33 momjian Exp $
*/
#ifdef FRONTEND
#include "postgres_fe.h"
}, /* Big5; Chinese for Taiwan multibyte set */
{
"euccn", PG_EUC_CN
- }, /* EUC-CN; Extended Unix Code for
- * simplified Chinese */
+ }, /* EUC-CN; Extended Unix Code for simplified
+ * Chinese */
{
"eucjp", PG_EUC_JP
- }, /* EUC-JP; Extended UNIX Code fixed Width
- * for Japanese, standard OSF */
+ }, /* EUC-JP; Extended UNIX Code fixed Width for
+ * Japanese, standard OSF */
{
"euckr", PG_EUC_KR
- }, /* EUC-KR; Extended Unix Code for Korean ,
- * KS X 1001 standard */
+ }, /* EUC-KR; Extended Unix Code for Korean , KS
+ * X 1001 standard */
{
"euctw", PG_EUC_TW
}, /* EUC-TW; Extended Unix Code for
}, /* ISO-8859-9; RFC1345,KXS2 */
{
"johab", PG_JOHAB
- }, /* JOHAB; Extended Unix Code for
- * simplified Chinese */
+ }, /* JOHAB; Extended Unix Code for simplified
+ * Chinese */
{
"koi8", PG_KOI8R
}, /* _dirty_ alias for KOI8-R (backward
}, /* alias for WIN1258 */
{
"win", PG_WIN1251
- }, /* _dirty_ alias for windows-1251
- * (backward compatibility) */
+ }, /* _dirty_ alias for windows-1251 (backward
+ * compatibility) */
{
"win1250", PG_WIN1250
}, /* alias for Windows-1250 */
* (currently mule internal code (mic) is used)
* Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.51 2005/09/24 17:53:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.52 2005/10/15 02:49:33 momjian Exp $
*/
#include "postgres.h"
}
/*
- * If we're not inside a transaction then we can't do catalog lookups,
- * so fail. After backend startup, this could only happen if we are
+ * If we're not inside a transaction then we can't do catalog lookups, so
+ * fail. After backend startup, this could only happen if we are
* re-reading postgresql.conf due to SIGHUP --- so basically this just
* constrains the ability to change client_encoding on the fly from
- * postgresql.conf. Which would probably be a stupid thing to do
- * anyway.
+ * postgresql.conf. Which would probably be a stupid thing to do anyway.
*/
if (!IsTransactionState())
return -1;
return 0;
/*
- * load the fmgr info into TopMemoryContext so that it survives
- * outside transaction.
+ * load the fmgr info into TopMemoryContext so that it survives outside
+ * transaction.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
to_server = palloc(sizeof(FmgrInfo));
if (SetClientEncoding(pending_client_encoding, true) < 0)
{
/*
- * Oops, the requested conversion is not available. We couldn't
- * fail before, but we can now.
+ * Oops, the requested conversion is not available. We couldn't fail
+ * before, but we can now.
*/
ereport(FATAL,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
}
/*
- * XXX we should avoid throwing errors in OidFunctionCall. Otherwise
- * we are going into infinite loop! So we have to make sure that the
+ * XXX we should avoid throwing errors in OidFunctionCall. Otherwise we
+ * are going into infinite loop! So we have to make sure that the
* function exists before calling OidFunctionCall.
*/
if (!SearchSysCacheExists(PROCOID,
Datum string = PG_GETARG_DATUM(0);
Datum dest_encoding_name = PG_GETARG_DATUM(1);
Datum src_encoding_name = DirectFunctionCall1(
- namein, CStringGetDatum(DatabaseEncoding->name));
+ namein, CStringGetDatum(DatabaseEncoding->name));
Datum result;
result = DirectFunctionCall3(
- pg_convert2, string, src_encoding_name, dest_encoding_name);
+ pg_convert2, string, src_encoding_name, dest_encoding_name);
/* free memory allocated by namein */
pfree((void *) src_encoding_name);
/*
* build text data type structure. we cannot use textin() here, since
- * textin assumes that input string encoding is same as database
- * encoding.
+ * textin assumes that input string encoding is same as database encoding.
*/
len = strlen((char *) result) + VARHDRSZ;
retval = palloc(len);
while (limit > 0 && *mbstr)
{
- int l = pg_mblen(mbstr);
+ int l = pg_mblen(mbstr);
limit -= l;
mbstr += l;
/*
* conversion functions between pg_wchar and multibyte streams.
* Tatsuo Ishii
- * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.45 2005/09/24 17:53:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.46 2005/10/15 02:49:33 momjian Exp $
*
* WIN1250 client encoding updated by Pavel Behal
*
len = 1;
else if ((*s & 0xe0) == 0xc0)
len = 2;
- else if ((*s & 0xf0) == 0xe0)
- len = 3;
- else if ((*s & 0xf8) == 0xf0)
- len = 4;
- else if ((*s & 0xfc) == 0xf8)
- len = 5;
- else if ((*s & 0xfe) == 0xfc)
- len = 6;
+ else if ((*s & 0xf0) == 0xe0)
+ len = 3;
+ else if ((*s & 0xf8) == 0xf0)
+ len = 4;
+ else if ((*s & 0xfc) == 0xf8)
+ len = 5;
+ else if ((*s & 0xfe) == 0xfc)
+ len = 6;
return (len);
}
{pg_euckr2wchar_with_len, pg_euckr_mblen, pg_euckr_dsplen, 3}, /* 3; PG_EUC_KR */
{pg_euctw2wchar_with_len, pg_euctw_mblen, pg_euctw_dsplen, 3}, /* 4; PG_EUC_TW */
{pg_johab2wchar_with_len, pg_johab_mblen, pg_johab_dsplen, 3}, /* 5; PG_JOHAB */
- {pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, 4}, /* 6; PG_UTF8 */
- {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, 3}, /* 7; PG_MULE_INTERNAL */
+ {pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, 4}, /* 6; PG_UTF8 */
+ {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, 3}, /* 7; PG_MULE_INTERNAL */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 8; PG_LATIN1 */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 9; PG_LATIN2 */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 10; PG_LATIN3 */
return ((encoding >= 0 &&
encoding < sizeof(pg_wchar_table) / sizeof(pg_wchar_tbl)) ?
- ((*pg_wchar_table[encoding].mblen) ((const unsigned char *) mbstr)) :
- ((*pg_wchar_table[PG_SQL_ASCII].mblen) ((const unsigned char *) mbstr)));
+ ((*pg_wchar_table[encoding].mblen) ((const unsigned char *) mbstr)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].mblen) ((const unsigned char *) mbstr)));
}
/*
return ((encoding >= 0 &&
encoding < sizeof(pg_wchar_table) / sizeof(pg_wchar_tbl)) ?
- ((*pg_wchar_table[encoding].dsplen) ((const unsigned char *) mbstr)) :
- ((*pg_wchar_table[PG_SQL_ASCII].dsplen) ((const unsigned char *) mbstr)));
+ ((*pg_wchar_table[encoding].dsplen) ((const unsigned char *) mbstr)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].dsplen) ((const unsigned char *) mbstr)));
}
/*
#ifndef FRONTEND
-bool pg_utf8_islegal(const unsigned char *source, int length) {
- unsigned char a;
- const unsigned char *srcptr = source+length;
- switch (length) {
- default: return false;
- /* Everything else falls through when "true"... */
- case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
- case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
- case 2: if ((a = (*--srcptr)) > 0xBF) return false;
- switch (*source) {
- /* no fall-through in this inner switch */
- case 0xE0: if (a < 0xA0) return false; break;
- case 0xED: if (a > 0x9F) return false; break;
- case 0xF0: if (a < 0x90) return false; break;
- case 0xF4: if (a > 0x8F) return false; break;
- default: if (a < 0x80) return false;
- }
-
- case 1: if (*source >= 0x80 && *source < 0xC2) return false;
- }
- if (*source > 0xF4) return false;
- return true;
+bool
+pg_utf8_islegal(const unsigned char *source, int length)
+{
+ unsigned char a;
+ const unsigned char *srcptr = source + length;
+
+ switch (length)
+ {
+ default:
+ return false;
+ /* Everything else falls through when "true"... */
+ case 4:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF)
+ return false;
+ case 3:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF)
+ return false;
+ case 2:
+ if ((a = (*--srcptr)) > 0xBF)
+ return false;
+ switch (*source)
+ {
+ /* no fall-through in this inner switch */
+ case 0xE0:
+ if (a < 0xA0)
+ return false;
+ break;
+ case 0xED:
+ if (a > 0x9F)
+ return false;
+ break;
+ case 0xF0:
+ if (a < 0x90)
+ return false;
+ break;
+ case 0xF4:
+ if (a > 0x8F)
+ return false;
+ break;
+ default:
+ if (a < 0x80)
+ return false;
+ }
+
+ case 1:
+ if (*source >= 0x80 && *source < 0xC2)
+ return false;
+ }
+ if (*source > 0xF4)
+ return false;
+ return true;
}
while (len > 0 && *mbstr)
{
l = pg_mblen(mbstr);
-
+
/* special UTF-8 check */
if (encoding == PG_UTF8)
{
- if(!pg_utf8_islegal((const unsigned char *) mbstr, l))
+ if (!pg_utf8_islegal((const unsigned char *) mbstr, l))
{
if (noError)
return false;
errmsg("invalid UNICODE byte sequence detected near byte 0x%02x",
(unsigned char) *mbstr)));
}
- } else {
+ }
+ else
+ {
for (i = 1; i < l; i++)
{
/*
if (i >= len || (mbstr[i] & 0x80) == 0)
{
char buf[8 * 2 + 1];
- char *p = buf;
- int j,
- jlimit;
+ char *p = buf;
+ int j,
+ jlimit;
if (noError)
return false;
jlimit = Min(l, len);
- jlimit = Min(jlimit, 8); /* prevent buffer overrun */
+ jlimit = Min(jlimit, 8); /* prevent buffer overrun */
for (j = 0; j < jlimit; j++)
p += sprintf(p, "%02x", (unsigned char) mbstr[j]);
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
- errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
- GetDatabaseEncodingName(), buf)));
+ errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
+ GetDatabaseEncodingName(), buf)));
}
}
}
* Written by Peter Eisentraut
.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.292 2005/10/14 20:53:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.293 2005/10/15 02:49:36 momjian Exp $
*
*--------------------------------------------------------------------
*/
#define PG_KRB_SRVNAM ""
#endif
-#define CONFIG_FILENAME "postgresql.conf"
+#define CONFIG_FILENAME "postgresql.conf"
#define HBA_FILENAME "pg_hba.conf"
#define IDENT_FILENAME "pg_ident.conf"
extern int CommitDelay;
extern int CommitSiblings;
extern char *default_tablespace;
-extern bool fullPageWrites;
+extern bool fullPageWrites;
+
#ifdef TRACE_SORT
-extern bool trace_sort;
+extern bool trace_sort;
#endif
static const char *assign_log_destination(const char *value,
static int syslog_facility = LOG_LOCAL0;
static const char *assign_syslog_facility(const char *facility,
- bool doit, GucSource source);
+ bool doit, GucSource source);
static const char *assign_syslog_ident(const char *ident,
- bool doit, GucSource source);
+ bool doit, GucSource source);
#endif
static const char *assign_defaultxactisolevel(const char *newval, bool doit,
bool log_parser_stats = false;
bool log_planner_stats = false;
bool log_executor_stats = false;
-bool log_statement_stats = false; /* this is sort of all
- * three above together */
+bool log_statement_stats = false; /* this is sort of all three
+ * above together */
bool log_btree_build_stats = false;
bool SQL_inheritance = true;
char *IdentFileName;
char *external_pid_file;
-int tcp_keepalives_idle;
-int tcp_keepalives_interval;
-int tcp_keepalives_count;
+int tcp_keepalives_idle;
+int tcp_keepalives_interval;
+int tcp_keepalives_count;
/*
* These variables are all dummies that don't do anything, except in some
static int max_index_keys;
static int max_identifier_length;
static int block_size;
-static bool integer_datetimes;
-static bool standard_conforming_strings;
+static bool integer_datetimes;
+static bool standard_conforming_strings;
/* should be static, but commands/variable.c needs to get at these */
char *role_string;
{"fsync", PGC_SIGHUP, WAL_SETTINGS,
gettext_noop("Forces synchronization of updates to disk."),
gettext_noop("The server will use the fsync() system call in several places to make "
- "sure that updates are physically written to disk. This insures "
+ "sure that updates are physically written to disk. This insures "
"that a database cluster will recover to a consistent state after "
"an operating system or hardware crash.")
},
{"zero_damaged_pages", PGC_SUSET, DEVELOPER_OPTIONS,
gettext_noop("Continues processing past damaged page headers."),
gettext_noop("Detection of a damaged page header normally causes PostgreSQL to "
- "report an error, aborting the current transaction. Setting "
+ "report an error, aborting the current transaction. Setting "
"zero_damaged_pages to true causes the system to instead report a "
"warning, zero out the damaged page, and continue processing. This "
"behavior will destroy data, namely all the rows on the damaged page."),
gettext_noop("Writes full pages to WAL when first modified after a checkpoint."),
gettext_noop("A page write in process during an operating system crash might be "
"only partially written to disk. During recovery, the row changes"
- "stored in WAL are not enough to recover. This option writes "
+ "stored in WAL are not enough to recover. This option writes "
"pages when first modified after a checkpoint to WAL so full recovery "
"is possible.")
},
{"silent_mode", PGC_POSTMASTER, LOGGING_WHEN,
gettext_noop("Runs the server silently."),
gettext_noop("If this parameter is set, the server will automatically run in the "
- "background and any controlling terminals are dissociated.")
+ "background and any controlling terminals are dissociated.")
},
&SilentMode,
false, NULL, NULL
{"stats_command_string", PGC_SUSET, STATS_COLLECTOR,
gettext_noop("Collects statistics about executing commands."),
gettext_noop("Enables the collection of statistics on the currently "
- "executing command of each session, along with the time "
+ "executing command of each session, along with the time "
"at which that command began execution.")
},
&pgstat_collect_querystring,
NULL
},
&autovacuum_start_daemon,
- false, NULL, NULL
+ false, NULL, NULL
},
{
gettext_noop("Logs the host name in the connection logs."),
gettext_noop("By default, connection logs only show the IP address "
"of the connecting host. If you want them to show the host name you "
- "can turn this on, but depending on your host name resolution "
- "setup it might impose a non-negligible performance penalty.")
+ "can turn this on, but depending on your host name resolution "
+ "setup it might impose a non-negligible performance penalty.")
},
&log_hostname,
false, NULL, NULL
{"password_encryption", PGC_USERSET, CONN_AUTH_SECURITY,
gettext_noop("Encrypt passwords."),
gettext_noop("When a password is specified in CREATE USER or "
- "ALTER USER without writing either ENCRYPTED or UNENCRYPTED, "
+ "ALTER USER without writing either ENCRYPTED or UNENCRYPTED, "
"this parameter determines whether the password is to be encrypted.")
},
&Password_encryption,
{"transform_null_equals", PGC_USERSET, COMPAT_OPTIONS_CLIENT,
gettext_noop("Treats \"expr=NULL\" as \"expr IS NULL\"."),
gettext_noop("When turned on, expressions of the form expr = NULL "
- "(or NULL = expr) are treated as expr IS NULL, that is, they "
- "return true if expr evaluates to the null value, and false "
- "otherwise. The correct behavior of expr = NULL is to always "
+ "(or NULL = expr) are treated as expr IS NULL, that is, they "
+ "return true if expr evaluates to the null value, and false "
+ "otherwise. The correct behavior of expr = NULL is to always "
"return null (unknown).")
},
&Transform_null_equals,
{"default_statistics_target", PGC_USERSET, QUERY_TUNING_OTHER,
gettext_noop("Sets the default statistics target."),
gettext_noop("This applies to table columns that have not had a "
- "column-specific target set via ALTER TABLE SET STATISTICS.")
+ "column-specific target set via ALTER TABLE SET STATISTICS.")
},
&default_statistics_target,
10, 1, 1000, NULL, NULL
gettext_noop("Sets the FROM-list size beyond which subqueries are not "
"collapsed."),
gettext_noop("The planner will merge subqueries into upper "
- "queries if the resulting FROM list would have no more than "
+ "queries if the resulting FROM list would have no more than "
"this many items.")
},
&from_collapse_limit,
gettext_noop("Sets the FROM-list size beyond which JOIN constructs are not "
"flattened."),
gettext_noop("The planner will flatten explicit inner JOIN "
- "constructs into lists of FROM items whenever a list of no more "
+ "constructs into lists of FROM items whenever a list of no more "
"than this many items would result.")
},
&join_collapse_limit,
* Note: There is some postprocessing done in PostmasterMain() to make
* sure the buffers are at least twice the number of backends, so the
* constraints here are partially unused. Similarly, the superuser
- * reserved number is checked to ensure it is less than the max
- * backends number.
+ * reserved number is checked to ensure it is less than the max backends
+ * number.
*
* MaxBackends is limited to INT_MAX/4 because some places compute
- * 4*MaxBackends without any overflow check. Likewise we have to
- * limit NBuffers to INT_MAX/2.
+ * 4*MaxBackends without any overflow check. Likewise we have to limit
+ * NBuffers to INT_MAX/2.
*/
{
{"max_connections", PGC_POSTMASTER, CONN_AUTH_SETTINGS,
{"work_mem", PGC_USERSET, RESOURCES_MEM,
gettext_noop("Sets the maximum memory to be used for query workspaces."),
gettext_noop("This much memory may be used by each internal "
- "sort operation and hash table before switching to "
+ "sort operation and hash table before switching to "
"temporary disk files.")
},
&work_mem,
{"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
gettext_noop("Sets the maximum number of locks per transaction."),
gettext_noop("The shared lock table is sized on the assumption that "
- "at most max_locks_per_transaction * max_connections distinct "
+ "at most max_locks_per_transaction * max_connections distinct "
"objects will need to be locked at any one time.")
},
&max_locks_per_xact,
gettext_noop("Logs if filling of checkpoint segments happens more "
"frequently than this (in seconds)."),
gettext_noop("Write a message to the server log if checkpoints "
- "caused by the filling of checkpoint segment files happens more "
+ "caused by the filling of checkpoint segment files happens more "
"frequently than this number of seconds. Zero turns off the warning.")
},
&CheckPointWarning,
{"extra_float_digits", PGC_USERSET, CLIENT_CONN_LOCALE,
gettext_noop("Sets the number of digits displayed for floating-point values."),
gettext_noop("This affects real, double precision, and geometric data types. "
- "The parameter value is added to the standard number of digits "
+ "The parameter value is added to the standard number of digits "
"(FLT_DIG or DBL_DIG as appropriate).")
},
&extra_float_digits,
{
{"tcp_keepalives_idle", PGC_USERSET, CLIENT_CONN_OTHER,
- gettext_noop("Seconds between issuing TCP keepalives."),
- gettext_noop("A value of 0 uses the system default."),
- },
+ gettext_noop("Seconds between issuing TCP keepalives."),
+ gettext_noop("A value of 0 uses the system default."),
+ },
&tcp_keepalives_idle,
0, 0, INT_MAX, assign_tcp_keepalives_idle, show_tcp_keepalives_idle
},
{
{"tcp_keepalives_interval", PGC_USERSET, CLIENT_CONN_OTHER,
- gettext_noop("Seconds between TCP keepalive retransmits."),
- gettext_noop("A value of 0 uses the system default."),
- },
+ gettext_noop("Seconds between TCP keepalive retransmits."),
+ gettext_noop("A value of 0 uses the system default."),
+ },
&tcp_keepalives_interval,
0, 0, INT_MAX, assign_tcp_keepalives_interval, show_tcp_keepalives_interval
},
{
{"tcp_keepalives_count", PGC_USERSET, CLIENT_CONN_OTHER,
- gettext_noop("Maximum number of TCP keepalive retransmits."),
- gettext_noop("This controls the number of consecutive keepalive retransmits that can be "
- "lost before a connection is considered dead. A value of 0 uses the "
- "system default."),
- },
+ gettext_noop("Maximum number of TCP keepalive retransmits."),
+ gettext_noop("This controls the number of consecutive keepalive retransmits that can be "
+ "lost before a connection is considered dead. A value of 0 uses the "
+ "system default."),
+ },
&tcp_keepalives_count,
0, 0, INT_MAX, assign_tcp_keepalives_count, show_tcp_keepalives_count
},
gettext_noop("Sets the planner's estimate of the cost of a nonsequentially "
"fetched disk page."),
gettext_noop("This is measured as a multiple of the cost of a "
- "sequential page fetch. A higher value makes it more likely a "
+ "sequential page fetch. A higher value makes it more likely a "
"sequential scan will be used, a lower value makes it more likely an "
"index scan will be used.")
},
{"log_min_messages", PGC_SUSET, LOGGING_WHEN,
gettext_noop("Sets the message levels that are logged."),
gettext_noop("Valid values are DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1, "
- "INFO, NOTICE, WARNING, ERROR, LOG, FATAL, and PANIC. Each level "
+ "INFO, NOTICE, WARNING, ERROR, LOG, FATAL, and PANIC. Each level "
"includes all the levels that follow it.")
},
&log_min_messages_str,
{
{"data_directory", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's data directory."),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's data directory."),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&data_directory,
NULL, NULL, NULL
{
{"config_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's main configuration file."),
- NULL,
- GUC_DISALLOW_IN_FILE | GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's main configuration file."),
+ NULL,
+ GUC_DISALLOW_IN_FILE | GUC_SUPERUSER_ONLY
},
&ConfigFileName,
NULL, NULL, NULL
{
{"hba_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's \"hba\" configuration file"),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's \"hba\" configuration file"),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&HbaFileName,
NULL, NULL, NULL
{
{"ident_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's \"ident\" configuration file"),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's \"ident\" configuration file"),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&IdentFileName,
NULL, NULL, NULL
{
{"external_pid_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Writes the postmaster PID to the specified file."),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Writes the postmaster PID to the specified file."),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&external_pid_file,
NULL, assign_canonical_path, NULL
is_custom_class(const char *name, int dotPos)
{
/*
- * assign_custom_variable_classes() has made sure no empty
- * identifiers or whitespace exists in the variable
+ * assign_custom_variable_classes() has made sure no empty identifiers or
+ * whitespace exists in the variable
*/
bool result = false;
const char *ccs = GetConfigOption("custom_variable_classes");
Assert(name);
/*
- * By equating const char ** with struct config_generic *, we are
- * assuming the name field is first in config_generic.
+ * By equating const char ** with struct config_generic *, we are assuming
+ * the name field is first in config_generic.
*/
res = (struct config_generic **) bsearch((void *) &key,
(void *) guc_variables,
num_guc_variables,
- sizeof(struct config_generic *),
+ sizeof(struct config_generic *),
guc_var_compare);
if (res)
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that
- * the set of supported old names is short enough that a brute-force
- * search is the best way.
+ * See if the name is an obsolete name for a variable. We assume that the
+ * set of supported old names is short enough that a brute-force search is
+ * the best way.
*/
for (i = 0; map_old_guc_names[i] != NULL; i += 2)
{
}
/*
- * Check if the name is qualified, and if so, check if the qualifier
- * maps to a custom variable class.
+ * Check if the name is qualified, and if so, check if the qualifier maps
+ * to a custom variable class.
*/
dot = strchr(name, GUC_QUALIFIER_SEPARATOR);
if (dot != NULL && is_custom_class(name, dot - name))
guc_name_compare(const char *namea, const char *nameb)
{
/*
- * The temptation to use strcasecmp() here must be resisted, because
- * the array ordering has to remain stable across setlocale() calls.
- * So, build our own with a simple ASCII-only downcasing.
+ * The temptation to use strcasecmp() here must be resisted, because the
+ * array ordering has to remain stable across setlocale() calls. So, build
+ * our own with a simple ASCII-only downcasing.
*/
while (*namea && *nameb)
{
free(str);
/*
- * See notes in set_config_option about
- * casting
+ * See notes in set_config_option about casting
*/
str = (char *) newstr;
conf->reset_val = str;
PGC_POSTMASTER, PGC_S_OVERRIDE);
/*
- * For historical reasons, some GUC parameters can receive defaults
- * from environment variables. Process those settings.
+ * For historical reasons, some GUC parameters can receive defaults from
+ * environment variables. Process those settings.
*/
env = getenv("PGPORT");
/*
* Find the configuration file: if config_file was specified on the
- * command line, use it, else use configdir/postgresql.conf. In any
- * case ensure the result is an absolute path, so that it will be
- * interpreted the same way by future backends.
+ * command line, use it, else use configdir/postgresql.conf. In any case
+ * ensure the result is an absolute path, so that it will be interpreted
+ * the same way by future backends.
*/
if (ConfigFileName)
fname = make_absolute_path(ConfigFileName);
}
/*
- * Set the ConfigFileName GUC variable to its final value, ensuring
- * that it can't be overridden later.
+ * Set the ConfigFileName GUC variable to its final value, ensuring that
+ * it can't be overridden later.
*/
SetConfigOption("config_file", fname, PGC_POSTMASTER, PGC_S_OVERRIDE);
free(fname);
* If the data_directory GUC variable has been set, use that as DataDir;
* otherwise use configdir if set; else punt.
*
- * Note: SetDataDir will copy and absolute-ize its argument,
- * so we don't have to.
+ * Note: SetDataDir will copy and absolute-ize its argument, so we don't have
+ * to.
*/
if (data_directory)
SetDataDir(data_directory);
* Reflect the final DataDir value back into the data_directory GUC var.
* (If you are wondering why we don't just make them a single variable,
* it's because the EXEC_BACKEND case needs DataDir to be transmitted to
- * child backends specially. XXX is that still true? Given that we
- * now chdir to DataDir, EXEC_BACKEND can read the config file without
- * knowing DataDir in advance.)
+ * child backends specially. XXX is that still true? Given that we now
+ * chdir to DataDir, EXEC_BACKEND can read the config file without knowing
+ * DataDir in advance.)
*/
SetConfigOption("data_directory", DataDir, PGC_POSTMASTER, PGC_S_OVERRIDE);
else if (newstr != str)
{
/*
- * See notes in set_config_option about
- * casting
+ * See notes in set_config_option about casting
*/
str = (char *) newstr;
}
/*
* We keep all the stack entries in TopTransactionContext so as to
- * avoid allocation problems when a subtransaction back-fills
- * stack entries for upper transaction levels.
+ * avoid allocation problems when a subtransaction back-fills stack
+ * entries for upper transaction levels.
*/
stack = (GucStack *) MemoryContextAlloc(TopTransactionContext,
sizeof(GucStack));
Assert(stack->nest_level == my_level);
/*
- * We will pop the stack entry. Start by restoring outer xact
- * status (since we may want to modify it below). Be careful to
- * use my_status to reference the inner xact status below this
- * point...
+ * We will pop the stack entry. Start by restoring outer xact status
+ * (since we may want to modify it below). Be careful to use
+ * my_status to reference the inner xact status below this point...
*/
gconf->status = stack->status;
/*
* We have two cases:
*
- * If commit and HAVE_TENTATIVE, set actual value to tentative (this
- * is to override a SET LOCAL if one occurred later than SET). We
- * keep the tentative value and propagate HAVE_TENTATIVE to the
- * parent status, allowing the SET's effect to percolate up. (But
- * if we're exiting the outermost transaction, we'll drop the
- * HAVE_TENTATIVE bit below.)
+ * If commit and HAVE_TENTATIVE, set actual value to tentative (this is
+ * to override a SET LOCAL if one occurred later than SET). We keep
+ * the tentative value and propagate HAVE_TENTATIVE to the parent
+ * status, allowing the SET's effect to percolate up. (But if we're
+ * exiting the outermost transaction, we'll drop the HAVE_TENTATIVE
+ * bit below.)
*
* Otherwise, we have a transaction that aborted or executed only SET
- * LOCAL (or no SET at all). In either case it should have no
- * further effect, so restore both tentative and actual values
- * from the stack entry.
+ * LOCAL (or no SET at all). In either case it should have no further
+ * effect, so restore both tentative and actual values from the stack
+ * entry.
*/
useTentative = isCommit && (my_status & GUC_HAVE_TENTATIVE) != 0;
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
const char *newstr;
newstr = (*conf->assign_hook) (newval, true,
- PGC_S_OVERRIDE);
+ PGC_S_OVERRIDE);
if (newstr == NULL)
elog(LOG, "failed to commit %s",
conf->gen.name);
* If newval should now be freed, it'll be
* taken care of below.
*
- * See notes in set_config_option about
- * casting
+ * See notes in set_config_option about casting
*/
newval = (char *) newstr;
}
pfree(stack);
/*
- * If we're now out of all xact levels, forget TENTATIVE status
- * bit; there's nothing tentative about the value anymore.
+ * If we're now out of all xact levels, forget TENTATIVE status bit;
+ * there's nothing tentative about the value anymore.
*/
if (!isSubXact)
{
}
/*
- * If we're now out of all xact levels, we can clear guc_dirty. (Note:
- * we cannot reset guc_dirty when exiting a subtransaction, because we
- * know that all outer transaction levels will have stacked values to
- * deal with.)
+ * If we're now out of all xact levels, we can clear guc_dirty. (Note: we
+ * cannot reset guc_dirty when exiting a subtransaction, because we know
+ * that all outer transaction levels will have stacked values to deal
+ * with.)
*/
if (!isSubXact)
guc_dirty = false;
int i;
/*
- * Don't do anything unless talking to an interactive frontend of
- * protocol 3.0 or later.
+ * Don't do anything unless talking to an interactive frontend of protocol
+ * 3.0 or later.
*/
if (whereToSendOutput != Remote ||
PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
{
ereport(elevel,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
return false;
}
/*
- * Check if the option can be set at this time. See guc.h for the
- * precise rules. Note that we don't want to throw errors if we're in
- * the SIGHUP context. In that case we just ignore the attempt and
- * return true.
+ * Check if the option can be set at this time. See guc.h for the precise
+ * rules. Note that we don't want to throw errors if we're in the SIGHUP
+ * context. In that case we just ignore the attempt and return true.
*/
switch (record->context)
{
}
/*
- * Hmm, the idea of the SIGHUP context is "ought to be global,
- * but can be changed after postmaster start". But there's
- * nothing that prevents a crafty administrator from sending
- * SIGHUP signals to individual backends only.
+ * Hmm, the idea of the SIGHUP context is "ought to be global, but
+ * can be changed after postmaster start". But there's nothing
+ * that prevents a crafty administrator from sending SIGHUP
+ * signals to individual backends only.
*/
break;
case PGC_BACKEND:
if (context == PGC_SIGHUP)
{
/*
- * If a PGC_BACKEND parameter is changed in the config
- * file, we want to accept the new value in the postmaster
- * (whence it will propagate to subsequently-started
- * backends), but ignore it in existing backends. This is
- * a tad klugy, but necessary because we don't re-read the
- * config file during backend start.
+ * If a PGC_BACKEND parameter is changed in the config file,
+ * we want to accept the new value in the postmaster (whence
+ * it will propagate to subsequently-started backends), but
+ * ignore it in existing backends. This is a tad klugy, but
+ * necessary because we don't re-read the config file during
+ * backend start.
*/
if (IsUnderPostmaster)
return true;
{
ereport(elevel,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to set parameter \"%s\"",
- name)));
+ errmsg("permission denied to set parameter \"%s\"",
+ name)));
return false;
}
break;
/*
* Ignore attempted set if overridden by previously processed setting.
* However, if changeVal is false then plow ahead anyway since we are
- * trying to find out if the value is potentially good, not actually
- * use it. Also keep going if makeDefault is true, since we may want
- * to set the reset/stacked values even if we can't set the variable
- * itself.
+ * trying to find out if the value is potentially good, not actually use
+ * it. Also keep going if makeDefault is true, since we may want to set
+ * the reset/stacked values even if we can't set the variable itself.
*/
if (record->source > source)
{
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a Boolean value",
- name)));
+ errmsg("parameter \"%s\" requires a Boolean value",
+ name)));
return false;
}
}
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %d",
- name, (int) newval)));
+ errmsg("invalid value for parameter \"%s\": %d",
+ name, (int) newval)));
return false;
}
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires an integer value",
- name)));
+ errmsg("parameter \"%s\" requires an integer value",
+ name)));
return false;
}
if (newval < conf->min || newval > conf->max)
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%d is outside the valid range for parameter \"%s\" (%d .. %d)",
- newval, name, conf->min, conf->max)));
+ newval, name, conf->min, conf->max)));
return false;
}
}
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %d",
- name, newval)));
+ errmsg("invalid value for parameter \"%s\": %d",
+ name, newval)));
return false;
}
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a numeric value",
- name)));
+ errmsg("parameter \"%s\" requires a numeric value",
+ name)));
return false;
}
if (newval < conf->min || newval > conf->max)
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%g is outside the valid range for parameter \"%s\" (%g .. %g)",
- newval, name, conf->min, conf->max)));
+ newval, name, conf->min, conf->max)));
return false;
}
}
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %g",
- name, newval)));
+ errmsg("invalid value for parameter \"%s\": %g",
+ name, newval)));
return false;
}
else if (conf->reset_val)
{
/*
- * We could possibly avoid strdup here, but easier to
- * make this case work the same as the normal
- * assignment case.
+ * We could possibly avoid strdup here, but easier to make
+ * this case work the same as the normal assignment case.
*/
newval = guc_strdup(elevel, conf->reset_val);
if (newval == NULL)
free(newval);
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- name, value ? value : "")));
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ name, value ? value : "")));
return false;
}
else if (hookresult != newval)
free(newval);
/*
- * Having to cast away const here is annoying, but
- * the alternative is to declare assign_hooks as
- * returning char*, which would mean they'd have
- * to cast away const, or as both taking and
- * returning char*, which doesn't seem attractive
- * either --- we don't want them to scribble on
- * the passed str.
+ * Having to cast away const here is annoying, but the
+ * alternative is to declare assign_hooks as returning
+ * char*, which would mean they'd have to cast away
+ * const, or as both taking and returning char*, which
+ * doesn't seem attractive either --- we don't want
+ * them to scribble on the passed str.
*/
newval = (char *) hookresult;
}
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
ListCell *l;
/*
- * Fast path if just DEFAULT. We do not check the variable name in
- * this case --- necessary for RESET ALL to work correctly.
+ * Fast path if just DEFAULT. We do not check the variable name in this
+ * case --- necessary for RESET ALL to work correctly.
*/
if (args == NIL)
return NULL;
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
flags = record->flags;
if (arg->typename != NULL)
{
/*
- * Must be a ConstInterval argument for TIME ZONE.
- * Coerce to interval and back to normalize the value
- * and account for any typmod.
+ * Must be a ConstInterval argument for TIME ZONE. Coerce
+ * to interval and back to normalize the value and account
+ * for any typmod.
*/
- Datum interval;
+ Datum interval;
char *intervalout;
interval =
- DirectFunctionCall3(interval_in,
- CStringGetDatum(val),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(arg->typename->typmod));
+ DirectFunctionCall3(interval_in,
+ CStringGetDatum(val),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(arg->typename->typmod));
intervalout =
DatumGetCString(DirectFunctionCall1(interval_out,
else
{
/*
- * Plain string literal or identifier. For quote
- * mode, quote it if it's not a vanilla identifier.
+ * Plain string literal or identifier. For quote mode,
+ * quote it if it's not a vanilla identifier.
*/
if (flags & GUC_LIST_QUOTE)
appendStringInfoString(&buf, quote_identifier(val));
value = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(1)));
/*
- * Get the desired state of is_local. Default to false if provided
- * value is NULL
+ * Get the desired state of is_local. Default to false if provided value
+ * is NULL
*/
if (PG_ARGISNULL(2))
is_local = false;
const char *value;
struct config_string *pHolder;
struct config_generic **res = (struct config_generic **) bsearch(
- (void *) &nameAddr,
- (void *) guc_variables,
- num_guc_variables,
- sizeof(struct config_generic *),
- guc_var_compare);
+ (void *) &nameAddr,
+ (void *) guc_variables,
+ num_guc_variables,
+ sizeof(struct config_generic *),
+ guc_var_compare);
if (res == NULL)
{
value = *pHolder->variable;
/*
- * Assign the string value stored in the placeholder to the real
- * variable.
+ * Assign the string value stored in the placeholder to the real variable.
*
* XXX this is not really good enough --- it should be a nontransactional
* assignment, since we don't want it to roll back if the current xact
TEXTOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "description",
TEXTOID, -1, 0);
-
+
/* prepare for projection of tuples */
tstate = begin_tup_output_tupdesc(dest, tupdesc);
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
default:
{
/*
- * should never get here, but in case we do, set 'em to
- * NULL
+ * should never get here, but in case we do, set 'em to NULL
*/
/* min_val */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/*
- * need a tuple descriptor representing NUM_PG_SETTINGS_ATTS
- * columns of the appropriate types
+ * need a tuple descriptor representing NUM_PG_SETTINGS_ATTS columns
+ * of the appropriate types
*/
tupdesc = CreateTemplateTupleDesc(NUM_PG_SETTINGS_ATTS, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
TEXTOID, -1, 0);
/*
- * Generate attribute metadata needed later to produce tuples from
- * raw C strings
+ * Generate attribute metadata needed later to produce tuples from raw
+ * C strings
*/
attinmeta = TupleDescGetAttInMetadata(tupdesc);
funcctx->attinmeta = attinmeta;
}
/*
- * Put new file in place. This could delay on Win32, but we don't
- * hold any exclusive locks.
+ * Put new file in place. This could delay on Win32, but we don't hold
+ * any exclusive locks.
*/
rename(CONFIG_EXEC_PARAMS_NEW, CONFIG_EXEC_PARAMS);
}
FreeFile(fp);
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
{
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("could not parse setting for parameter \"%s\"", name)));
+ errmsg("could not parse setting for parameter \"%s\"", name)));
free(name);
continue;
}
/*
- * We process all these options at SUSET level. We assume that
- * the right to insert an option into pg_database or pg_authid was
- * checked when it was inserted.
+ * We process all these options at SUSET level. We assume that the
+ * right to insert an option into pg_database or pg_authid was checked
+ * when it was inserted.
*/
SetConfigOption(name, value, PGC_SUSET, source);
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for parameter \"log_destination\"")));
+ errmsg("invalid list syntax for parameter \"log_destination\"")));
return NULL;
}
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized \"log_destination\" key word: \"%s\"",
- tok)));
+ errmsg("unrecognized \"log_destination\" key word: \"%s\"",
+ tok)));
pfree(rawstring);
list_free(elemlist);
return NULL;
static const char *
assign_syslog_facility(const char *facility, bool doit, GucSource source)
{
- int syslog_fac;
+ int syslog_fac;
if (pg_strcasecmp(facility, "LOCAL0") == 0)
syslog_fac = LOG_LOCAL0;
return ident;
}
-
-#endif /* HAVE_SYSLOG */
+#endif /* HAVE_SYSLOG */
static const char *
}
/*
- * Client_min_messages always prints 'info', but we allow it as a
- * value anyway.
+ * Client_min_messages always prints 'info', but we allow it as a value
+ * anyway.
*/
else if (pg_strcasecmp(newval, "info") == 0)
{
show_num_temp_buffers(void)
{
/*
- * We show the GUC var until local buffers have been initialized,
- * and NLocBuffer afterwards.
+ * We show the GUC var until local buffers have been initialized, and
+ * NLocBuffer afterwards.
*/
static char nbuf[32];
if (doit && source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SET AUTOCOMMIT TO OFF is no longer supported")));
+ errmsg("SET AUTOCOMMIT TO OFF is no longer supported")));
return false;
}
return true;
if (hasSpaceAfterToken || !isalnum(c))
{
/*
- * Syntax error due to token following space after token or
- * non alpha numeric character
+ * Syntax error due to token following space after token or non
+ * alpha numeric character
*/
ereport(LOG,
(errcode(ERRCODE_SYNTAX_ERROR),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/pg_rusage.c,v 1.1 2005/10/03 22:52:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/pg_rusage.c,v 1.2 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
snprintf(result, sizeof(result),
"CPU %d.%02ds/%d.%02du sec elapsed %d.%02d sec",
(int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec),
- (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
+ (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
(int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec),
- (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
+ (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
(int) (ru1.tv.tv_sec - ru0->tv.tv_sec),
(int) (ru1.tv.tv_usec - ru0->tv.tv_usec) / 10000);
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.24 2005/05/24 07:16:27 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.25 2005/10/15 02:49:36 momjian Exp $
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
* various details abducted from various places
#define PS_BUFFER_SIZE 256
static char ps_buffer[PS_BUFFER_SIZE];
static const size_t ps_buffer_size = PS_BUFFER_SIZE;
-
#else /* PS_USE_CLOBBER_ARGV */
static char *ps_buffer; /* will point to argv area */
static size_t ps_buffer_size; /* space determined at run time */
static char **save_argv;
#ifdef WIN32
- /*
- * Win32 does not support showing any changed arguments. To make it
- * at all possible to track which backend is doing what, we create
- * a named object that can be viewed with for example Process Explorer
- */
+
+ /*
+ * Win32 does not support showing any changed arguments. To make it at all
+ * possible to track which backend is doing what, we create a named object
+ * that can be viewed with for example Process Explorer
+ */
static HANDLE ident_handle = INVALID_HANDLE_VALUE;
-static void pgwin32_update_ident(char *ident)
+static void
+pgwin32_update_ident(char *ident)
{
- char name[PS_BUFFER_SIZE+32];
+ char name[PS_BUFFER_SIZE + 32];
if (ident_handle != INVALID_HANDLE_VALUE)
CloseHandle(ident_handle);
- sprintf(name,"pgident: %s",ident);
+ sprintf(name, "pgident: %s", ident);
ident_handle = CreateEvent(NULL,
TRUE,
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
-char **
+char **
save_ps_display_args(int argc, char **argv)
{
save_argc = argc;
#if defined(PS_USE_CLOBBER_ARGV)
/*
- * If we're going to overwrite the argv area, count the available
- * space. Also move the environment to make additional room.
+ * If we're going to overwrite the argv area, count the available space.
+ * Also move the environment to make additional room.
*/
{
char *end_of_area = NULL;
* argument parsing purposes.
*
* (NB: do NOT think to remove the copying of argv[], even though
- * postmaster.c finishes looking at argv[] long before we ever
- * consider changing the ps display. On some platforms, getopt()
- * keeps pointers into the argv array, and will get horribly confused
- * when it is re-called to analyze a subprocess' argument string if
- * the argv storage has been clobbered meanwhile. Other platforms
- * have other dependencies on argv[].
+ * postmaster.c finishes looking at argv[] long before we ever consider
+ * changing the ps display. On some platforms, getopt() keeps pointers
+ * into the argv array, and will get horribly confused when it is
+ * re-called to analyze a subprocess' argument string if the argv storage
+ * has been clobbered meanwhile. Other platforms have other dependencies
+ * on argv[].
*/
{
char **new_argv;
argv = new_argv;
}
-#endif /* PS_USE_CHANGE_ARGV or
- * PS_USE_CLOBBER_ARGV */
+#endif /* PS_USE_CHANGE_ARGV or PS_USE_CLOBBER_ARGV */
return argv;
}
#ifdef PS_USE_SETPROCTITLE
/*
- * apparently setproctitle() already adds a `progname:' prefix to the
- * ps line
+ * apparently setproctitle() already adds a `progname:' prefix to the ps
+ * line
*/
snprintf(ps_buffer, ps_buffer_size,
"%s %s %s ",
#ifdef WIN32
pgwin32_update_ident(ps_buffer);
#endif
-
#endif /* not PS_USE_NONE */
}
#ifdef WIN32
pgwin32_update_ident(ps_buffer);
#endif
-
#endif /* not PS_USE_NONE */
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/superuser.c,v 1.33 2005/08/15 02:40:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/superuser.c,v 1.34 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* the status of the last requested roleid. The cache can be flushed
* at need by watching for cache update events on pg_authid.
*/
-static Oid last_roleid = InvalidOid; /* InvalidOid == cache not valid */
-static bool last_roleid_is_super = false;
-static bool roleid_callback_registered = false;
+static Oid last_roleid = InvalidOid; /* InvalidOid == cache not valid */
+static bool last_roleid_is_super = false;
+static bool roleid_callback_registered = false;
static void RoleidCallback(Datum arg, Oid relid);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.63 2005/09/01 18:15:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.64 2005/10/15 02:49:36 momjian Exp $
*
* NOTE:
* This is a new (Feb. 05, 1999) implementation of the allocation set
/* Allocation parameters for this context: */
Size initBlockSize; /* initial block size */
Size maxBlockSize; /* maximum block size */
- AllocBlock keeper; /* if not NULL, keep this block over
- * resets */
+ AllocBlock keeper; /* if not NULL, keep this block over resets */
} AllocSetContext;
typedef AllocSetContext *AllocSet;
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while creating memory context \"%s\".",
- name)));
+ errdetail("Failed while creating memory context \"%s\".",
+ name)));
}
block->aset = context;
block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
AssertArg(AllocSetIsValid(set));
/*
- * If requested size exceeds maximum for chunks, allocate an entire
- * block for this request.
+ * If requested size exceeds maximum for chunks, allocate an entire block
+ * for this request.
*/
if (size > ALLOC_CHUNK_LIMIT)
{
#endif
/*
- * Stick the new block underneath the active allocation block, so
- * that we don't lose the use of the space remaining therein.
+ * Stick the new block underneath the active allocation block, so that
+ * we don't lose the use of the space remaining therein.
*/
if (set->blocks != NULL)
{
/*
* Request is small enough to be treated as a chunk. Look in the
- * corresponding free list to see if there is a free chunk we could
- * reuse.
+ * corresponding free list to see if there is a free chunk we could reuse.
*/
fidx = AllocSetFreeIndex(size);
priorfree = NULL;
}
/*
- * If one is found, remove it from the free list, make it again a
- * member of the alloc set and return its data address.
+ * If one is found, remove it from the free list, make it again a member
+ * of the alloc set and return its data address.
*/
if (chunk != NULL)
{
Assert(chunk_size >= size);
/*
- * If there is enough room in the active allocation block, we will put
- * the chunk into that block. Else must start a new one.
+ * If there is enough room in the active allocation block, we will put the
+ * chunk into that block. Else must start a new one.
*/
if ((block = set->blocks) != NULL)
{
if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
{
/*
- * The existing active (top) block does not have enough room
- * for the requested allocation, but it might still have a
- * useful amount of space in it. Once we push it down in the
- * block list, we'll never try to allocate more space from it.
- * So, before we do that, carve up its free space into chunks
- * that we can put on the set's freelists.
+ * The existing active (top) block does not have enough room for
+ * the requested allocation, but it might still have a useful
+ * amount of space in it. Once we push it down in the block list,
+ * we'll never try to allocate more space from it. So, before we
+ * do that, carve up its free space into chunks that we can put on
+ * the set's freelists.
*
* Because we can only get here when there's less than
- * ALLOC_CHUNK_LIMIT left in the block, this loop cannot
- * iterate more than ALLOCSET_NUM_FREELISTS-1 times.
+ * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
+ * more than ALLOCSET_NUM_FREELISTS-1 times.
*/
while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
{
int a_fidx = AllocSetFreeIndex(availchunk);
/*
- * In most cases, we'll get back the index of the next
- * larger freelist than the one we need to put this chunk
- * on. The exception is when availchunk is exactly a
- * power of 2.
+ * In most cases, we'll get back the index of the next larger
+ * freelist than the one we need to put this chunk on. The
+ * exception is when availchunk is exactly a power of 2.
*/
if (availchunk != (1 << (a_fidx + ALLOC_MINBITS)))
{
else
{
/*
- * Use first power of 2 that is larger than previous block,
- * but not more than the allowed limit. (We don't simply
- * double the prior block size, because in some cases this
- * could be a funny size, eg if very first allocation was for
- * an odd-sized large chunk.)
+ * Use first power of 2 that is larger than previous block, but
+ * not more than the allowed limit. (We don't simply double the
+ * prior block size, because in some cases this could be a funny
+ * size, eg if very first allocation was for an odd-sized large
+ * chunk.)
*/
Size pblksize = set->blocks->endptr - ((char *) set->blocks);
}
/*
- * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need
- * more space... but try to keep it a power of 2.
+ * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
+ * space... but try to keep it a power of 2.
*/
required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
while (blksize < required_size)
block = (AllocBlock) malloc(blksize);
/*
- * We could be asking for pretty big blocks here, so cope if
- * malloc fails. But give up if there's less than a meg or so
- * available...
+ * We could be asking for pretty big blocks here, so cope if malloc
+ * fails. But give up if there's less than a meg or so available...
*/
while (block == NULL && blksize > 1024 * 1024)
{
block->endptr = ((char *) block) + blksize;
/*
- * If this is the first block of the set, make it the "keeper"
- * block. Formerly, a keeper block could only be created during
- * context creation, but allowing it to happen here lets us have
- * fast reset cycling even for contexts created with
- * minContextSize = 0; that way we don't have to force space to be
- * allocated in contexts that might never need any space. Don't
- * mark an oversize block as a keeper, however.
+ * If this is the first block of the set, make it the "keeper" block.
+ * Formerly, a keeper block could only be created during context
+ * creation, but allowing it to happen here lets us have fast reset
+ * cycling even for contexts created with minContextSize = 0; that way
+ * we don't have to force space to be allocated in contexts that might
+ * never need any space. Don't mark an oversize block as a keeper,
+ * however.
*/
if (set->blocks == NULL && blksize == set->initBlockSize)
{
/*
* Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
- * allocated area already is >= the new size. (In particular, we
- * always fall out here if the requested size is a decrease.)
+ * allocated area already is >= the new size. (In particular, we always
+ * fall out here if the requested size is a decrease.)
*/
if (oldsize >= size)
{
if (oldsize > ALLOC_CHUNK_LIMIT)
{
/*
- * The chunk must been allocated as a single-chunk block. Find
- * the containing block and use realloc() to make it bigger with
- * minimum space wastage.
+ * The chunk must been allocated as a single-chunk block. Find the
+ * containing block and use realloc() to make it bigger with minimum
+ * space wastage.
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
else
{
/*
- * Small-chunk case. If the chunk is the last one in its block,
- * there might be enough free space after it that we can just
- * enlarge the chunk in-place. It's relatively painful to find
- * the containing block in the general case, but we can detect
- * last-ness quite cheaply for the typical case where the chunk is
- * in the active (topmost) allocation block. (At least with the
- * regression tests and code as of 1/2001, realloc'ing the last
- * chunk of a non-topmost block hardly ever happens, so it's not
- * worth scanning the block list to catch that case.)
+ * Small-chunk case. If the chunk is the last one in its block, there
+ * might be enough free space after it that we can just enlarge the
+ * chunk in-place. It's relatively painful to find the containing
+ * block in the general case, but we can detect last-ness quite
+ * cheaply for the typical case where the chunk is in the active
+ * (topmost) allocation block. (At least with the regression tests
+ * and code as of 1/2001, realloc'ing the last chunk of a non-topmost
+ * block hardly ever happens, so it's not worth scanning the block
+ * list to catch that case.)
*
* NOTE: must be careful not to create a chunk of a size that
* AllocSetAlloc would not create, else we'll get confused later.
AllocSet set = (AllocSet) context;
/*
- * For now, we say "empty" only if the context is new or just reset.
- * We could examine the freelists to determine if all space has been
- * freed, but it's not really worth the trouble for present uses of
- * this functionality.
+ * For now, we say "empty" only if the context is new or just reset. We
+ * could examine the freelists to determine if all space has been freed,
+ * but it's not really worth the trouble for present uses of this
+ * functionality.
*/
if (set->isReset)
return true;
}
}
fprintf(stderr,
- "%s: %ld total in %ld blocks; %ld free (%ld chunks); %ld used\n",
+ "%s: %ld total in %ld blocks; %ld free (%ld chunks); %ld used\n",
set->header.name, totalspace, nblocks, freespace, nchunks,
totalspace - freespace);
}
name, chunk, block);
/*
- * If chunk is allocated, check for correct aset pointer. (If
- * it's free, the aset is the freelist pointer, which we can't
- * check as easily...)
+ * If chunk is allocated, check for correct aset pointer. (If it's
+ * free, the aset is the freelist pointer, which we can't check as
+ * easily...)
*/
if (dsize > 0 && chunk->aset != (void *) set)
elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.55 2005/05/14 23:16:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.56 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
AssertState(TopMemoryContext == NULL);
/*
- * Initialize TopMemoryContext as an AllocSetContext with slow growth
- * rate --- we don't really expect much to be allocated in it.
+ * Initialize TopMemoryContext as an AllocSetContext with slow growth rate
+ * --- we don't really expect much to be allocated in it.
*
* (There is special-case code in MemoryContextCreate() for this call.)
*/
8 * 1024);
/*
- * Not having any other place to point CurrentMemoryContext, make it
- * point to TopMemoryContext. Caller should change this soon!
+ * Not having any other place to point CurrentMemoryContext, make it point
+ * to TopMemoryContext. Caller should change this soon!
*/
CurrentMemoryContext = TopMemoryContext;
/*
- * Initialize ErrorContext as an AllocSetContext with slow growth rate
- * --- we don't really expect much to be allocated in it. More to the
- * point, require it to contain at least 8K at all times. This is the
- * only case where retained memory in a context is *essential* --- we
- * want to be sure ErrorContext still has some memory even if we've
- * run out elsewhere!
+ * Initialize ErrorContext as an AllocSetContext with slow growth rate ---
+ * we don't really expect much to be allocated in it. More to the point,
+ * require it to contain at least 8K at all times. This is the only case
+ * where retained memory in a context is *essential* --- we want to be
+ * sure ErrorContext still has some memory even if we've run out
+ * elsewhere!
*/
ErrorContext = AllocSetContextCreate(TopMemoryContext,
"ErrorContext",
MemoryContextDeleteChildren(context);
/*
- * We delink the context from its parent before deleting it, so that
- * if there's an error we won't have deleted/busted contexts still
- * attached to the context tree. Better a leak than a crash.
+ * We delink the context from its parent before deleting it, so that if
+ * there's an error we won't have deleted/busted contexts still attached
+ * to the context tree. Better a leak than a crash.
*/
if (context->parent)
{
AssertArg(MemoryContextIsValid(context));
/*
- * MemoryContextDelete will delink the child from me, so just iterate
- * as long as there is a child.
+ * MemoryContextDelete will delink the child from me, so just iterate as
+ * long as there is a child.
*/
while (context->firstchild != NULL)
MemoryContextDelete(context->firstchild);
((char *) pointer - STANDARDCHUNKHEADERSIZE);
/*
- * If the context link doesn't match then we certainly have a
- * non-member chunk. Also check for a reasonable-looking size as
- * extra guard against being fooled by bogus pointers.
+ * If the context link doesn't match then we certainly have a non-member
+ * chunk. Also check for a reasonable-looking size as extra guard against
+ * being fooled by bogus pointers.
*/
if (header->context == context && AllocSizeIsValid(header->size))
return true;
CurrentMemoryContext = context;
return old;
}
-
#endif /* ! __GNUC__ */
/*
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.81 2005/06/17 22:32:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.82 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(portal->holdStore == NULL);
/*
- * Create the memory context that is used for storage of the tuple
- * set. Note this is NOT a child of the portal's heap memory.
+ * Create the memory context that is used for storage of the tuple set.
+ * Note this is NOT a child of the portal's heap memory.
*/
portal->holdContext =
AllocSetContextCreate(PortalMemory,
elog(ERROR, "cannot drop active portal");
/*
- * Remove portal from hash table. Because we do this first, we will
- * not come back to try to remove the portal again if there's any
- * error in the subsequent steps. Better to leak a little memory than
- * to get into an infinite error-recovery loop.
+ * Remove portal from hash table. Because we do this first, we will not
+ * come back to try to remove the portal again if there's any error in the
+ * subsequent steps. Better to leak a little memory than to get into an
+ * infinite error-recovery loop.
*/
PortalHashTableDelete(portal);
(*portal->cleanup) (portal);
/*
- * Release any resources still attached to the portal. There are
- * several cases being covered here:
+ * Release any resources still attached to the portal. There are several
+ * cases being covered here:
*
- * Top transaction commit (indicated by isTopCommit): normally we should
- * do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we
- * have a FAILED portal (eg, a cursor that got an error), we'd better
- * clean up its resources to avoid resource-leakage warning messages.
+ * Top transaction commit (indicated by isTopCommit): normally we should do
+ * nothing here and let the regular end-of-transaction resource releasing
+ * mechanism handle these resources too. However, if we have a FAILED
+ * portal (eg, a cursor that got an error), we'd better clean up its
+ * resources to avoid resource-leakage warning messages.
*
- * Sub transaction commit: never comes here at all, since we don't kill
- * any portals in AtSubCommit_Portals().
+ * Sub transaction commit: never comes here at all, since we don't kill any
+ * portals in AtSubCommit_Portals().
*
* Main or sub transaction abort: we will do nothing here because
* portal->resowner was already set NULL; the resources were already
* cleaned up in transaction abort.
*
- * Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become
- * the responsibility of the transaction's ResourceOwner (since it is
- * the parent of the portal's owner) and will be released when the
- * transaction eventually ends.
+ * Ordinary portal drop: must release resources. However, if the portal is
+ * not FAILED then we do not release its locks. The locks become the
+ * responsibility of the transaction's ResourceOwner (since it is the
+ * parent of the portal's owner) and will be released when the transaction
+ * eventually ends.
*/
if (portal->resowner &&
(!isTopCommit || portal->status == PORTAL_FAILED))
bool
CommitHoldablePortals(void)
{
- bool result = false;
+ bool result = false;
HASH_SEQ_STATUS status;
PortalHashEnt *hentry;
portal->status == PORTAL_READY)
{
/*
- * We are exiting the transaction that created a holdable
- * cursor. Instead of dropping the portal, prepare it for
- * access by later transactions.
+ * We are exiting the transaction that created a holdable cursor.
+ * Instead of dropping the portal, prepare it for access by later
+ * transactions.
*
- * Note that PersistHoldablePortal() must release all resources
- * used by the portal that are local to the creating
- * transaction.
+ * Note that PersistHoldablePortal() must release all resources used
+ * by the portal that are local to the creating transaction.
*/
PortalCreateHoldStore(portal);
PersistHoldablePortal(portal);
/*
- * Any resources belonging to the portal will be released in
- * the upcoming transaction-wide cleanup; the portal will no
- * longer have its own resources.
+ * Any resources belonging to the portal will be released in the
+ * upcoming transaction-wide cleanup; the portal will no longer
+ * have its own resources.
*/
portal->resowner = NULL;
/*
- * Having successfully exported the holdable cursor, mark it
- * as not belonging to this transaction.
+ * Having successfully exported the holdable cursor, mark it as
+ * not belonging to this transaction.
*/
portal->createSubid = InvalidSubTransactionId;
bool
PrepareHoldablePortals(void)
{
- bool result = false;
+ bool result = false;
HASH_SEQ_STATUS status;
PortalHashEnt *hentry;
portal->status == PORTAL_READY)
{
/*
- * We are exiting the transaction that created a holdable
- * cursor. Can't do PREPARE.
+ * We are exiting the transaction that created a holdable cursor.
+ * Can't do PREPARE.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
Portal portal = hentry->portal;
/*
- * Do not touch active portals --- this can only happen in the
- * case of a multi-transaction utility command, such as VACUUM.
+ * Do not touch active portals --- this can only happen in the case of
+ * a multi-transaction utility command, such as VACUUM.
*
* Note however that any resource owner attached to such a portal is
* still going to go away, so don't leave a dangling pointer.
portal->status = PORTAL_FAILED;
/*
- * Do nothing else to cursors held over from a previous
- * transaction.
+ * Do nothing else to cursors held over from a previous transaction.
*/
if (portal->createSubid == InvalidSubTransactionId)
continue;
/*
* Any resources belonging to the portal will be released in the
- * upcoming transaction-wide cleanup; they will be gone before we
- * run PortalDrop.
+ * upcoming transaction-wide cleanup; they will be gone before we run
+ * PortalDrop.
*/
portal->resowner = NULL;
}
continue;
/*
- * Force any active portals of my own transaction into FAILED
- * state. This is mostly to ensure that a portal running a FETCH
- * will go FAILED if the underlying cursor fails. (Note we do NOT
- * want to do this to upper-level portals, since they may be able
- * to continue.)
+ * Force any active portals of my own transaction into FAILED state.
+ * This is mostly to ensure that a portal running a FETCH will go
+ * FAILED if the underlying cursor fails. (Note we do NOT want to do
+ * this to upper-level portals, since they may be able to continue.)
*
* This is only needed to dodge the sanity check in PortalDrop.
*/
* If the portal is READY then allow it to survive into the parent
* transaction; otherwise shut it down.
*
- * Currently, we can't actually support that because the portal's
- * query might refer to objects created or changed in the failed
- * subtransaction, leading to crashes if execution is resumed.
- * So, even READY portals are deleted. It would be nice to detect
- * whether the query actually depends on any such object, instead.
+ * Currently, we can't actually support that because the portal's query
+ * might refer to objects created or changed in the failed
+ * subtransaction, leading to crashes if execution is resumed. So,
+ * even READY portals are deleted. It would be nice to detect whether
+ * the query actually depends on any such object, instead.
*/
#ifdef NOT_USED
if (portal->status == PORTAL_READY)
}
/*
- * Any resources belonging to the portal will be released in
- * the upcoming transaction-wide cleanup; they will be gone
- * before we run PortalDrop.
+ * Any resources belonging to the portal will be released in the
+ * upcoming transaction-wide cleanup; they will be gone before we
+ * run PortalDrop.
*/
portal->resowner = NULL;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.13 2005/08/08 19:17:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.14 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ResourceOwner owner;
owner = (ResourceOwner) MemoryContextAllocZero(TopMemoryContext,
- sizeof(ResourceOwnerData));
+ sizeof(ResourceOwnerData));
owner->name = name;
if (parent)
ResourceOwnerReleaseInternal(child, phase, isCommit, isTopLevel);
/*
- * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc
- * don't get confused. We needn't PG_TRY here because the outermost
- * level will fix it on error abort.
+ * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc don't
+ * get confused. We needn't PG_TRY here because the outermost level will
+ * fix it on error abort.
*/
save = CurrentResourceOwner;
CurrentResourceOwner = owner;
if (phase == RESOURCE_RELEASE_BEFORE_LOCKS)
{
/*
- * Release buffer pins. Note that ReleaseBuffer will
- * remove the buffer entry from my list, so I just have to
- * iterate till there are none.
+ * Release buffer pins. Note that ReleaseBuffer will remove the
+ * buffer entry from my list, so I just have to iterate till there are
+ * none.
*
- * During a commit, there shouldn't be any remaining pins ---
- * that would indicate failure to clean up the executor correctly ---
- * so issue warnings. In the abort case, just clean up quietly.
+ * During a commit, there shouldn't be any remaining pins --- that would
+ * indicate failure to clean up the executor correctly --- so issue
+ * warnings. In the abort case, just clean up quietly.
*
- * We are careful to do the releasing back-to-front, so as to
- * avoid O(N^2) behavior in ResourceOwnerForgetBuffer().
+ * We are careful to do the releasing back-to-front, so as to avoid
+ * O(N^2) behavior in ResourceOwnerForgetBuffer().
*/
while (owner->nbuffers > 0)
{
}
/*
- * Release relcache references. Note that RelationClose will
- * remove the relref entry from my list, so I just have to
- * iterate till there are none.
+ * Release relcache references. Note that RelationClose will remove
+ * the relref entry from my list, so I just have to iterate till there
+ * are none.
*
- * As with buffer pins, warn if any are left at commit time,
- * and release back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and release
+ * back-to-front for speed.
*/
while (owner->nrelrefs > 0)
{
if (isTopLevel)
{
/*
- * For a top-level xact we are going to release all locks (or
- * at least all non-session locks), so just do a single lmgr
- * call at the top of the recursion.
+ * For a top-level xact we are going to release all locks (or at
+ * least all non-session locks), so just do a single lmgr call at
+ * the top of the recursion.
*/
if (owner == TopTransactionResourceOwner)
ProcReleaseLocks(isCommit);
{
/*
* Release locks retail. Note that if we are committing a
- * subtransaction, we do NOT release its locks yet, but
- * transfer them to the parent.
+ * subtransaction, we do NOT release its locks yet, but transfer
+ * them to the parent.
*/
Assert(owner->parent != NULL);
if (isCommit)
else if (phase == RESOURCE_RELEASE_AFTER_LOCKS)
{
/*
- * Release catcache references. Note that ReleaseCatCache
- * will remove the catref entry from my list, so I just have
- * to iterate till there are none. Ditto for catcache lists.
+ * Release catcache references. Note that ReleaseCatCache will remove
+ * the catref entry from my list, so I just have to iterate till there
+ * are none. Ditto for catcache lists.
*
- * As with buffer pins, warn if any are left at commit time,
- * and release back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and release
+ * back-to-front for speed.
*/
while (owner->ncatrefs > 0)
{
Assert(owner->nrelrefs == 0);
/*
- * Delete children. The recursive call will delink the child from me,
- * so just iterate as long as there is a child.
+ * Delete children. The recursive call will delink the child from me, so
+ * just iterate as long as there is a child.
*/
while (owner->firstchild != NULL)
ResourceOwnerDelete(owner->firstchild);
/*
* We delink the owner from its parent before deleting it, so that if
- * there's an error we won't have deleted/busted owners still attached
- * to the owner tree. Better a leak than a crash.
+ * there's an error we won't have deleted/busted owners still attached to
+ * the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
/*
* Scan back-to-front because it's more likely we are releasing a
- * recently pinned buffer. This isn't always the case of course,
- * but it's the way to bet.
+ * recently pinned buffer. This isn't always the case of course, but
+ * it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.15 2004/12/31 22:02:52 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.16 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct IndirectBlock
{
int nextSlot; /* next pointer slot to write or read */
- struct IndirectBlock *nextup; /* parent indirect level, or NULL
- * if top */
- long ptrs[BLOCKS_PER_INDIR_BLOCK]; /* indexes of contained
- * blocks */
+ struct IndirectBlock *nextup; /* parent indirect level, or NULL if
+ * top */
+ long ptrs[BLOCKS_PER_INDIR_BLOCK]; /* indexes of contained blocks */
} IndirectBlock;
/*
{
IndirectBlock *indirect; /* bottom of my indirect-block hierarchy */
bool writing; /* T while in write phase */
- bool frozen; /* T if blocks should not be freed when
- * read */
+ bool frozen; /* T if blocks should not be freed when read */
bool dirty; /* does buffer need to be written? */
/*
- * The total data volume in the logical tape is numFullBlocks * BLCKSZ
- * + lastBlockBytes. BUT: we do not update lastBlockBytes during
- * writing, only at completion of a write phase.
+ * The total data volume in the logical tape is numFullBlocks * BLCKSZ +
+ * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
+ * only at completion of a write phase.
*/
long numFullBlocks; /* number of complete blocks in log tape */
int lastBlockBytes; /* valid bytes in last (incomplete) block */
/*
* Buffer for current data block. Note we don't bother to store the
- * actual file block number of the data block (during the write phase
- * it hasn't been assigned yet, and during read we don't care
- * anymore). But we do need the relative block number so we can detect
- * end-of-tape while reading.
+ * actual file block number of the data block (during the write phase it
+ * hasn't been assigned yet, and during read we don't care anymore). But
+ * we do need the relative block number so we can detect end-of-tape while
+ * reading.
*/
long curBlockNumber; /* this block's logical blk# within tape */
int pos; /* next read/write position in buffer */
long nFileBlocks; /* # of blocks used in underlying file */
/*
- * We store the numbers of recycled-and-available blocks in
- * freeBlocks[]. When there are no such blocks, we extend the
- * underlying file. Note that the block numbers in freeBlocks are
- * always in *decreasing* order, so that removing the last entry gives
- * us the lowest free block.
+ * We store the numbers of recycled-and-available blocks in freeBlocks[].
+ * When there are no such blocks, we extend the underlying file. Note
+ * that the block numbers in freeBlocks are always in *decreasing* order,
+ * so that removing the last entry gives us the lowest free block.
*/
long *freeBlocks; /* resizable array */
int nFreeBlocks; /* # of currently free blocks */
- int freeBlocksLen; /* current allocated length of
- * freeBlocks[] */
+ int freeBlocksLen; /* current allocated length of freeBlocks[] */
/*
- * tapes[] is declared size 1 since C wants a fixed size, but actually
- * it is of length nTapes.
+ * tapes[] is declared size 1 since C wants a fixed size, but actually it
+ * is of length nTapes.
*/
int nTapes; /* # of logical tapes in set */
LogicalTape *tapes[1]; /* must be last in struct! */
ltsGetFreeBlock(LogicalTapeSet *lts)
{
/*
- * If there are multiple free blocks, we select the one appearing last
- * in freeBlocks[]. If there are none, assign the next block at the
- * end of the file.
+ * If there are multiple free blocks, we select the one appearing last in
+ * freeBlocks[]. If there are none, assign the next block at the end of
+ * the file.
*/
if (lts->nFreeBlocks > 0)
return lts->freeBlocks[--lts->nFreeBlocks];
{
lts->freeBlocksLen *= 2;
lts->freeBlocks = (long *) repalloc(lts->freeBlocks,
- lts->freeBlocksLen * sizeof(long));
+ lts->freeBlocksLen * sizeof(long));
}
/*
* Insert blocknum into array, preserving decreasing order (so that
- * ltsGetFreeBlock returns the lowest available block number). This
- * could get fairly slow if there were many free blocks, but we don't
- * expect there to be very many at one time.
+ * ltsGetFreeBlock returns the lowest available block number). This could
+ * get fairly slow if there were many free blocks, but we don't expect
+ * there to be very many at one time.
*/
ndx = lts->nFreeBlocks++;
ptr = lts->freeBlocks + ndx;
if (indirect->nextSlot >= BLOCKS_PER_INDIR_BLOCK)
{
/*
- * This indirect block is full, so dump it out and recursively
- * save its address in the next indirection level. Create a new
+ * This indirect block is full, so dump it out and recursively save
+ * its address in the next indirection level. Create a new
* indirection level if there wasn't one before.
*/
long indirblock = ltsGetFreeBlock(lts);
indirect->ptrs[indirect->nextSlot] = -1L;
/*
- * If block is not topmost, write it out, and recurse to obtain
- * address of first block in this hierarchy level. Read that one in.
+ * If block is not topmost, write it out, and recurse to obtain address of
+ * first block in this hierarchy level. Read that one in.
*/
if (indirect->nextup != NULL)
{
IndirectBlock *indirect)
{
/*
- * If block is not topmost, recurse to obtain address of first block
- * in this hierarchy level. Read that one in.
+ * If block is not topmost, recurse to obtain address of first block in
+ * this hierarchy level. Read that one in.
*/
if (indirect->nextup != NULL)
{
ltsReadBlock(lts, indirblock, (void *) indirect->ptrs);
/*
- * The previous block would only have been written out if full, so
- * we need not search it for a -1 sentinel.
+ * The previous block would only have been written out if full, so we
+ * need not search it for a -1 sentinel.
*/
indirect->nextSlot = BLOCKS_PER_INDIR_BLOCK + 1;
}
int i;
/*
- * Create top-level struct. First LogicalTape pointer is already
- * counted in sizeof(LogicalTapeSet).
+ * Create top-level struct. First LogicalTape pointer is already counted
+ * in sizeof(LogicalTapeSet).
*/
Assert(ntapes > 0);
lts = (LogicalTapeSet *) palloc(sizeof(LogicalTapeSet) +
if (lt->writing)
{
/*
- * Completion of a write phase. Flush last partial data
- * block, flush any partial indirect blocks, rewind for normal
+ * Completion of a write phase. Flush last partial data block,
+ * flush any partial indirect blocks, rewind for normal
* (destructive) read.
*/
if (lt->dirty)
else
{
/*
- * This is only OK if tape is frozen; we rewind for (another)
- * read pass.
+ * This is only OK if tape is frozen; we rewind for (another) read
+ * pass.
*/
Assert(lt->frozen);
datablocknum = ltsRewindFrozenIndirectBlock(lts, lt->indirect);
*
* NOTE: we assume the caller has read the tape to the end; otherwise
* untouched data and indirect blocks will not have been freed. We
- * could add more code to free any unread blocks, but in current
- * usage of this module it'd be useless code.
+ * could add more code to free any unread blocks, but in current usage
+ * of this module it'd be useless code.
*/
IndirectBlock *ib,
*nextib;
Assert(lt->writing);
/*
- * Completion of a write phase. Flush last partial data block, flush
- * any partial indirect blocks, rewind for nondestructive read.
+ * Completion of a write phase. Flush last partial data block, flush any
+ * partial indirect blocks, rewind for nondestructive read.
*/
if (lt->dirty)
ltsDumpBuffer(lts, lt);
return false; /* a seek too far... */
/*
- * OK, we need to back up nblocks blocks. This implementation would
- * be pretty inefficient for long seeks, but we really aren't
- * expecting that (a seek over one tuple is typical).
+ * OK, we need to back up nblocks blocks. This implementation would be
+ * pretty inefficient for long seeks, but we really aren't expecting that
+ * (a seek over one tuple is typical).
*/
while (nblocks-- > 0)
{
return false;
/*
- * OK, advance or back up to the target block. This implementation
- * would be pretty inefficient for long seeks, but we really aren't
- * expecting that (a seek over one tuple is typical).
+ * OK, advance or back up to the target block. This implementation would
+ * be pretty inefficient for long seeks, but we really aren't expecting
+ * that (a seek over one tuple is typical).
*/
while (lt->curBlockNumber > blocknum)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.51 2005/10/03 22:55:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.52 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* GUC variable */
#ifdef TRACE_SORT
-bool trace_sort = false;
+bool trace_sort = false;
#endif
*/
typedef enum
{
- TSS_INITIAL, /* Loading tuples; still within memory
- * limit */
+ TSS_INITIAL, /* Loading tuples; still within memory limit */
TSS_BUILDRUNS, /* Loading tuples; writing to tape */
TSS_SORTEDINMEM, /* Sort completed entirely in memory */
TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
TupSortStatus status; /* enumerated value as shown above */
bool randomAccess; /* did caller request random access? */
long availMem; /* remaining memory available, in bytes */
- LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp
- * file */
+ LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
/*
- * These function pointers decouple the routines that must know what
- * kind of tuple we are sorting from the routines that don't need to
- * know it. They are set up by the tuplesort_begin_xxx routines.
+ * These function pointers decouple the routines that must know what kind
+ * of tuple we are sorting from the routines that don't need to know it.
+ * They are set up by the tuplesort_begin_xxx routines.
*
* Function to compare two tuples; result is per qsort() convention, ie:
*
int (*comparetup) (Tuplesortstate *state, const void *a, const void *b);
/*
- * Function to copy a supplied input tuple into palloc'd space. (NB:
- * we assume that a single pfree() is enough to release the tuple
- * later, so the representation must be "flat" in one palloc chunk.)
- * state->availMem must be decreased by the amount of space used.
+ * Function to copy a supplied input tuple into palloc'd space. (NB: we
+ * assume that a single pfree() is enough to release the tuple later, so
+ * the representation must be "flat" in one palloc chunk.) state->availMem
+ * must be decreased by the amount of space used.
*/
void *(*copytup) (Tuplesortstate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of
- * the tuple on tape need not be the same as it is in memory;
- * requirements on the tape representation are given below. After
- * writing the tuple, pfree() it, and increase state->availMem by the
- * amount of memory space thereby released.
+ * Function to write a stored tuple onto tape. The representation of the
+ * tuple on tape need not be the same as it is in memory; requirements on
+ * the tape representation are given below. After writing the tuple,
+ * pfree() it, and increase state->availMem by the amount of memory space
+ * thereby released.
*/
void (*writetup) (Tuplesortstate *state, int tapenum, void *tup);
/*
- * Function to read a stored tuple from tape back into memory. 'len'
- * is the already-read length of the stored tuple. Create and return
- * a palloc'd copy, and decrease state->availMem by the amount of
- * memory space consumed.
+ * Function to read a stored tuple from tape back into memory. 'len' is
+ * the already-read length of the stored tuple. Create and return a
+ * palloc'd copy, and decrease state->availMem by the amount of memory
+ * space consumed.
*/
void *(*readtup) (Tuplesortstate *state, int tapenum, unsigned int len);
/*
- * This array holds pointers to tuples in sort memory. If we are in
- * state INITIAL, the tuples are in no particular order; if we are in
- * state SORTEDINMEM, the tuples are in final sorted order; in states
- * BUILDRUNS and FINALMERGE, the tuples are organized in "heap" order
- * per Algorithm H. (Note that memtupcount only counts the tuples
- * that are part of the heap --- during merge passes, memtuples[]
- * entries beyond TAPERANGE are never in the heap and are used to hold
- * pre-read tuples.) In state SORTEDONTAPE, the array is not used.
+ * This array holds pointers to tuples in sort memory. If we are in state
+ * INITIAL, the tuples are in no particular order; if we are in state
+ * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
+ * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
+ * H. (Note that memtupcount only counts the tuples that are part of the
+ * heap --- during merge passes, memtuples[] entries beyond TAPERANGE are
+ * never in the heap and are used to hold pre-read tuples.) In state
+ * SORTEDONTAPE, the array is not used.
*/
void **memtuples; /* array of pointers to palloc'd tuples */
int memtupcount; /* number of tuples currently present */
int memtupsize; /* allocated length of memtuples array */
/*
- * While building initial runs, this array holds the run number for
- * each tuple in memtuples[]. During merge passes, we re-use it to
- * hold the input tape number that each tuple in the heap was read
- * from, or to hold the index of the next tuple pre-read from the same
- * tape in the case of pre-read entries. This array is never
- * allocated unless we need to use tapes. Whenever it is allocated,
- * it has the same length as memtuples[].
+ * While building initial runs, this array holds the run number for each
+ * tuple in memtuples[]. During merge passes, we re-use it to hold the
+ * input tape number that each tuple in the heap was read from, or to hold
+ * the index of the next tuple pre-read from the same tape in the case of
+ * pre-read entries. This array is never allocated unless we need to use
+ * tapes. Whenever it is allocated, it has the same length as
+ * memtuples[].
*/
- int *memtupindex; /* index value associated with
- * memtuples[i] */
+ int *memtupindex; /* index value associated with memtuples[i] */
/*
* While building initial runs, this is the current output run number
- * (starting at 0). Afterwards, it is the number of initial runs we
- * made.
+ * (starting at 0). Afterwards, it is the number of initial runs we made.
*/
int currentRun;
/*
- * These variables are only used during merge passes. mergeactive[i]
- * is true if we are reading an input run from (actual) tape number i
- * and have not yet exhausted that run. mergenext[i] is the memtuples
- * index of the next pre-read tuple (next to be loaded into the heap)
- * for tape i, or 0 if we are out of pre-read tuples. mergelast[i]
- * similarly points to the last pre-read tuple from each tape.
- * mergeavailmem[i] is the amount of unused space allocated for tape
- * i. mergefreelist and mergefirstfree keep track of unused locations
- * in the memtuples[] array. memtupindex[] links together pre-read
- * tuples for each tape as well as recycled locations in
- * mergefreelist. It is OK to use 0 as a null link in these lists,
- * because memtuples[0] is part of the merge heap and is never a
- * pre-read tuple.
+ * These variables are only used during merge passes. mergeactive[i] is
+ * true if we are reading an input run from (actual) tape number i and
+ * have not yet exhausted that run. mergenext[i] is the memtuples index
+ * of the next pre-read tuple (next to be loaded into the heap) for tape
+ * i, or 0 if we are out of pre-read tuples. mergelast[i] similarly
+ * points to the last pre-read tuple from each tape. mergeavailmem[i] is
+ * the amount of unused space allocated for tape i. mergefreelist and
+ * mergefirstfree keep track of unused locations in the memtuples[] array.
+ * memtupindex[] links together pre-read tuples for each tape as well as
+ * recycled locations in mergefreelist. It is OK to use 0 as a null link
+ * in these lists, because memtuples[0] is part of the merge heap and is
+ * never a pre-read tuple.
*/
bool mergeactive[MAXTAPES]; /* Active input run source? */
- int mergenext[MAXTAPES]; /* first preread tuple for each
- * source */
- int mergelast[MAXTAPES]; /* last preread tuple for each
- * source */
+ int mergenext[MAXTAPES]; /* first preread tuple for each source */
+ int mergelast[MAXTAPES]; /* last preread tuple for each source */
long mergeavailmem[MAXTAPES]; /* availMem for prereading
* tapes */
long spacePerTape; /* actual per-tape target usage */
*/
int Level; /* Knuth's l */
int destTape; /* current output tape (Knuth's j, less 1) */
- int tp_fib[MAXTAPES]; /* Target Fibonacci run counts
- * (A[]) */
+ int tp_fib[MAXTAPES]; /* Target Fibonacci run counts (A[]) */
int tp_runs[MAXTAPES]; /* # of real runs on each tape */
- int tp_dummy[MAXTAPES]; /* # of dummy runs for each tape
- * (D[]) */
+ int tp_dummy[MAXTAPES]; /* # of dummy runs for each tape (D[]) */
int tp_tapenum[MAXTAPES]; /* Actual tape numbers (TAPE[]) */
/*
- * These variables are used after completion of sorting to keep track
- * of the next tuple to return. (In the tape case, the tape's current
- * read position is also critical state.)
+ * These variables are used after completion of sorting to keep track of
+ * the next tuple to return. (In the tape case, the tape's current read
+ * position is also critical state.)
*/
int result_tape; /* actual tape number of finished output */
int current; /* array index (only used if SORTEDINMEM) */
/* markpos_xxx holds marked position for mark and restore */
long markpos_block; /* tape block# (only used if SORTEDONTAPE) */
- int markpos_offset; /* saved "current", or offset in tape
- * block */
+ int markpos_offset; /* saved "current", or offset in tape block */
bool markpos_eof; /* saved "eof_reached" */
/*
SortFunctionKind *sortFnKinds;
/*
- * These variables are specific to the IndexTuple case; they are set
- * by tuplesort_begin_index and used only by the IndexTuple routines.
+ * These variables are specific to the IndexTuple case; they are set by
+ * tuplesort_begin_index and used only by the IndexTuple routines.
*/
Relation indexRel;
ScanKey indexScanKey;
/* Algorithm D variables will be initialized by inittapes, if needed */
- state->result_tape = -1; /* flag that result tape has not been
- * formed */
+ state->result_tape = -1; /* flag that result tape has not been formed */
return state;
}
&state->sortFnKinds[i]);
/*
- * We needn't fill in sk_strategy or sk_subtype since these
- * scankeys will never be passed to an index.
+ * We needn't fill in sk_strategy or sk_subtype since these scankeys
+ * will never be passed to an index.
*/
ScanKeyInit(&state->scanKeys[i],
attNums[i],
pfree(state->memtupindex);
/*
- * this stuff might better belong in a variant-specific shutdown
- * routine
+ * this stuff might better belong in a variant-specific shutdown routine
*/
if (state->scanKeys)
pfree(state->scanKeys);
/*
* Insert the copied tuple into the heap, with run number
- * currentRun if it can go into the current run, else run
- * number currentRun+1. The tuple can go into the current run
- * if it is >= the first not-yet-output tuple. (Actually, it
- * could go into the current run if it is >= the most recently
- * output tuple ... but that would require keeping around the
- * tuple we last output, and it's simplest to let writetup
- * free each tuple as soon as it's written.)
+ * currentRun if it can go into the current run, else run number
+ * currentRun+1. The tuple can go into the current run if it is
+ * >= the first not-yet-output tuple. (Actually, it could go into
+ * the current run if it is >= the most recently output tuple ...
+ * but that would require keeping around the tuple we last output,
+ * and it's simplest to let writetup free each tuple as soon as
+ * it's written.)
*
- * Note there will always be at least one tuple in the heap at
- * this point; see dumptuples.
+ * Note there will always be at least one tuple in the heap at this
+ * point; see dumptuples.
*/
Assert(state->memtupcount > 0);
if (COMPARETUP(state, tuple, state->memtuples[0]) >= 0)
tuplesort_heap_insert(state, tuple, state->currentRun + 1, true);
/*
- * If we are over the memory limit, dump tuples till we're
- * under.
+ * If we are over the memory limit, dump tuples till we're under.
*/
dumptuples(state, false);
break;
case TSS_INITIAL:
/*
- * We were able to accumulate all the tuples within the
- * allowed amount of memory. Just qsort 'em and we're done.
+ * We were able to accumulate all the tuples within the allowed
+ * amount of memory. Just qsort 'em and we're done.
*/
if (state->memtupcount > 1)
{
case TSS_BUILDRUNS:
/*
- * Finish tape-based sort. First, flush all tuples remaining
- * in memory out to tape; then merge until we have a single
- * remaining run (or, if !randomAccess, one run per tape).
- * Note that mergeruns sets the correct state->status.
+ * Finish tape-based sort. First, flush all tuples remaining in
+ * memory out to tape; then merge until we have a single remaining
+ * run (or, if !randomAccess, one run per tape). Note that
+ * mergeruns sets the correct state->status.
*/
dumptuples(state, true);
mergeruns(state);
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple,
- * else - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple, else
+ * - tuple before last returned.
*/
if (state->eof_reached)
{
/*
- * Seek position is pointing just past the zero tuplen at
- * the end of file; back up to fetch last tuple's ending
- * length word. If seek fails we must have a completely
- * empty file.
+ * Seek position is pointing just past the zero tuplen at the
+ * end of file; back up to fetch last tuple's ending length
+ * word. If seek fails we must have a completely empty file.
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
else
{
/*
- * Back up and fetch previously-returned tuple's ending
- * length word. If seek fails, assume we are at start of
- * file.
+ * Back up and fetch previously-returned tuple's ending length
+ * word. If seek fails, assume we are at start of file.
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
- tuplen + 2 * sizeof(unsigned int)))
+ tuplen + 2 * sizeof(unsigned int)))
{
/*
- * If that fails, presumably the prev tuple is the
- * first in the file. Back up so that it becomes next
- * to read in forward direction (not obviously right,
- * but that is what in-memory case does).
+ * If that fails, presumably the prev tuple is the first
+ * in the file. Back up so that it becomes next to read
+ * in forward direction (not obviously right, but that is
+ * what in-memory case does).
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
- tuplen + sizeof(unsigned int)))
+ tuplen + sizeof(unsigned int)))
elog(ERROR, "bogus tuple length in backward scan");
return NULL;
}
tuplen = getlen(state, state->result_tape, false);
/*
- * Now we have the length of the prior tuple, back up and read
- * it. Note: READTUP expects we are positioned after the
- * initial length word of the tuple, so back up to that point.
+ * Now we have the length of the prior tuple, back up and read it.
+ * Note: READTUP expects we are positioned after the initial
+ * length word of the tuple, so back up to that point.
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
if ((tupIndex = state->mergenext[srcTape]) == 0)
{
/*
- * out of preloaded data on this tape, try to read
- * more
+ * out of preloaded data on this tape, try to read more
*/
mergepreread(state);
/*
- * if still no data, we've reached end of run on this
- * tape
+ * if still no data, we've reached end of run on this tape
*/
if ((tupIndex = state->mergenext[srcTape]) == 0)
return tup;
USEMEM(state, GetMemoryChunkSpace(state->memtupindex));
/*
- * Convert the unsorted contents of memtuples[] into a heap. Each
- * tuple is marked as belonging to run number zero.
+ * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
+ * marked as belonging to run number zero.
*
* NOTE: we pass false for checkIndex since there's no point in comparing
- * indexes in this step, even though we do intend the indexes to be
- * part of the sort key...
+ * indexes in this step, even though we do intend the indexes to be part
+ * of the sort key...
*/
ntuples = state->memtupcount;
state->memtupcount = 0; /* make the heap empty */
/*
* If we produced only one initial run (quite likely if the total data
- * volume is between 1X and 2X workMem), we can just use that tape as
- * the finished output, rather than doing a useless merge.
+ * volume is between 1X and 2X workMem), we can just use that tape as the
+ * finished output, rather than doing a useless merge.
*/
if (state->currentRun == 1)
{
}
/*
- * If we don't have to produce a materialized sorted tape,
- * quit as soon as we're down to one real/dummy run per tape.
+ * If we don't have to produce a materialized sorted tape, quit as
+ * soon as we're down to one real/dummy run per tape.
*/
if (!state->randomAccess && allOneRun)
{
state->tp_runs[TAPERANGE - 1] = 0;
/*
- * reassign tape units per step D6; note we no longer care about
- * A[]
+ * reassign tape units per step D6; note we no longer care about A[]
*/
svTape = state->tp_tapenum[TAPERANGE];
svDummy = state->tp_dummy[TAPERANGE];
}
/*
- * Done. Knuth says that the result is on TAPE[1], but since we
- * exited the loop without performing the last iteration of step D6,
- * we have not rearranged the tape unit assignment, and therefore the
- * result is on TAPE[T]. We need to do it this way so that we can
- * freeze the final output tape while rewinding it. The last
- * iteration of step D6 would be a waste of cycles anyway...
+ * Done. Knuth says that the result is on TAPE[1], but since we exited
+ * the loop without performing the last iteration of step D6, we have not
+ * rearranged the tape unit assignment, and therefore the result is on
+ * TAPE[T]. We need to do it this way so that we can freeze the final
+ * output tape while rewinding it. The last iteration of step D6 would be
+ * a waste of cycles anyway...
*/
state->result_tape = state->tp_tapenum[TAPERANGE];
LogicalTapeFreeze(state->tapeset, state->result_tape);
spaceFreed;
/*
- * Start the merge by loading one tuple from each active source tape
- * into the heap. We can also decrease the input run/dummy run
- * counts.
+ * Start the merge by loading one tuple from each active source tape into
+ * the heap. We can also decrease the input run/dummy run counts.
*/
beginmerge(state);
/*
- * Execute merge by repeatedly extracting lowest tuple in heap,
- * writing it out, and replacing it with next tuple from same tape (if
- * there is another one).
+ * Execute merge by repeatedly extracting lowest tuple in heap, writing it
+ * out, and replacing it with next tuple from same tape (if there is
+ * another one).
*/
while (state->memtupcount > 0)
{
}
/*
- * When the heap empties, we're done. Write an end-of-run marker on
- * the output tape, and increment its count of real runs.
+ * When the heap empties, we're done. Write an end-of-run marker on the
+ * output tape, and increment its count of real runs.
*/
markrunend(state, destTape);
state->tp_runs[TAPERANGE]++;
memset(state->mergelast, 0, sizeof(state->mergelast));
memset(state->mergeavailmem, 0, sizeof(state->mergeavailmem));
state->mergefreelist = 0; /* nothing in the freelist */
- state->mergefirstfree = MAXTAPES; /* first slot available for
- * preread */
+ state->mergefirstfree = MAXTAPES; /* first slot available for preread */
/* Adjust run counts and mark the active tapes */
activeTapes = 0;
}
/*
- * Initialize space allocation to let each active input tape have an
- * equal share of preread space.
+ * Initialize space allocation to let each active input tape have an equal
+ * share of preread space.
*/
Assert(activeTapes > 0);
state->spacePerTape = state->availMem / activeTapes;
}
/*
- * Preread as many tuples as possible (and at least one) from each
- * active tape
+ * Preread as many tuples as possible (and at least one) from each active
+ * tape
*/
mergepreread(state);
continue;
/*
- * Read tuples from this tape until it has used up its free
- * memory, but ensure that we have at least one.
+ * Read tuples from this tape until it has used up its free memory,
+ * but ensure that we have at least one.
*/
priorAvail = state->availMem;
state->availMem = state->mergeavailmem[srcTape];
(LACKMEM(state) && state->memtupcount > 1))
{
/*
- * Dump the heap's frontmost entry, and sift up to remove it from
- * the heap.
+ * Dump the heap's frontmost entry, and sift up to remove it from the
+ * heap.
*/
Assert(state->memtupcount > 0);
WRITETUP(state, state->tp_tapenum[state->destTape],
memtupindex = state->memtupindex;
/*
- * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth
- * is using 1-based array indexes, not 0-based.
+ * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
+ * using 1-based array indexes, not 0-based.
*/
j = state->memtupcount++;
while (j > 0)
Oid opclass = InvalidOid;
/*
- * Search pg_amop to see if the target operator is registered as the
- * "<" or ">" operator of any btree opclass. It's possible that it
- * might be registered both ways (eg, if someone were to build a
- * "reverse sort" opclass for some reason); prefer the "<" case if so.
- * If the operator is registered the same way in multiple opclasses,
- * assume we can use the associated comparator function from any one.
+ * Search pg_amop to see if the target operator is registered as the "<"
+ * or ">" operator of any btree opclass. It's possible that it might be
+ * registered both ways (eg, if someone were to build a "reverse sort"
+ * opclass for some reason); prefer the "<" case if so. If the operator is
+ * registered the same way in multiple opclasses, assume we can use the
+ * associated comparator function from any one.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(sortOperator),
}
/*
- * Can't find a comparator, so use the operator as-is. Decide whether
- * it is forward or reverse sort by looking at its name (grotty, but
- * this only matters for deciding which end NULLs should get sorted
- * to). XXX possibly better idea: see whether its selectivity
- * function is scalargtcmp?
+ * Can't find a comparator, so use the operator as-is. Decide whether it
+ * is forward or reverse sort by looking at its name (grotty, but this
+ * only matters for deciding which end NULLs should get sorted to). XXX
+ * possibly better idea: see whether its selectivity function is
+ * scalargtcmp?
*/
tuple = SearchSysCache(OPEROID,
ObjectIdGetDatum(sortOperator),
* If btree has asked us to enforce uniqueness, complain if two equal
* tuples are detected (unless there was at least one NULL field).
*
- * It is sufficient to make the test here, because if two tuples are
- * equal they *must* get compared at some stage of the sort ---
- * otherwise the sort algorithm wouldn't have checked whether one must
- * appear before the other.
+ * It is sufficient to make the test here, because if two tuples are equal
+ * they *must* get compared at some stage of the sort --- otherwise the
+ * sort algorithm wouldn't have checked whether one must appear before the
+ * other.
*
- * Some rather brain-dead implementations of qsort will sometimes call
- * the comparison routine to compare a value to itself. (At this
- * writing only QNX 4 is known to do such silly things.) Don't raise
- * a bogus error in that case.
+ * Some rather brain-dead implementations of qsort will sometimes call the
+ * comparison routine to compare a value to itself. (At this writing only
+ * QNX 4 is known to do such silly things.) Don't raise a bogus error in
+ * that case.
*/
if (state->enforceUnique && !equal_hasnull && tuple1 != tuple2)
ereport(ERROR,
errdetail("Table contains duplicated values.")));
/*
- * If key values are equal, we sort on ItemPointer. This does not
- * affect validity of the finished index, but it offers cheap
- * insurance against performance problems with bad qsort
- * implementations that have trouble with large numbers of equal keys.
+ * If key values are equal, we sort on ItemPointer. This does not affect
+ * validity of the finished index, but it offers cheap insurance against
+ * performance problems with bad qsort implementations that have trouble
+ * with large numbers of equal keys.
*/
{
BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.22 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.23 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
BufFile *myfile; /* underlying file, or NULL if none */
/*
- * These function pointers decouple the routines that must know what
- * kind of tuple we are handling from the routines that don't need to
- * know it. They are set up by the tuplestore_begin_xxx routines.
+ * These function pointers decouple the routines that must know what kind
+ * of tuple we are handling from the routines that don't need to know it.
+ * They are set up by the tuplestore_begin_xxx routines.
*
- * (Although tuplestore.c currently only supports heap tuples, I've
- * copied this part of tuplesort.c so that extension to other kinds of
- * objects will be easy if it's ever needed.)
+ * (Although tuplestore.c currently only supports heap tuples, I've copied
+ * this part of tuplesort.c so that extension to other kinds of objects
+ * will be easy if it's ever needed.)
*
* Function to copy a supplied input tuple into palloc'd space. (NB: we
- * assume that a single pfree() is enough to release the tuple later,
- * so the representation must be "flat" in one palloc chunk.)
- * state->availMem must be decreased by the amount of space used.
+ * assume that a single pfree() is enough to release the tuple later, so
+ * the representation must be "flat" in one palloc chunk.) state->availMem
+ * must be decreased by the amount of space used.
*/
void *(*copytup) (Tuplestorestate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of
- * the tuple on tape need not be the same as it is in memory;
- * requirements on the tape representation are given below. After
- * writing the tuple, pfree() it, and increase state->availMem by the
- * amount of memory space thereby released.
+ * Function to write a stored tuple onto tape. The representation of the
+ * tuple on tape need not be the same as it is in memory; requirements on
+ * the tape representation are given below. After writing the tuple,
+ * pfree() it, and increase state->availMem by the amount of memory space
+ * thereby released.
*/
void (*writetup) (Tuplestorestate *state, void *tup);
/*
- * Function to read a stored tuple from tape back into memory. 'len'
- * is the already-read length of the stored tuple. Create and return
- * a palloc'd copy, and decrease state->availMem by the amount of
- * memory space consumed.
+ * Function to read a stored tuple from tape back into memory. 'len' is
+ * the already-read length of the stored tuple. Create and return a
+ * palloc'd copy, and decrease state->availMem by the amount of memory
+ * space consumed.
*/
void *(*readtup) (Tuplestorestate *state, unsigned int len);
/*
- * This array holds pointers to tuples in memory if we are in state
- * INMEM. In states WRITEFILE and READFILE it's not used.
+ * This array holds pointers to tuples in memory if we are in state INMEM.
+ * In states WRITEFILE and READFILE it's not used.
*/
void **memtuples; /* array of pointers to palloc'd tuples */
int memtupcount; /* number of tuples currently present */
/*
* These variables are used to keep track of the current position.
*
- * In state WRITEFILE, the current file seek position is the write point,
- * and the read position is remembered in readpos_xxx; in state
- * READFILE, the current file seek position is the read point, and the
- * write position is remembered in writepos_xxx. (The write position
- * is the same as EOF, but since BufFileSeek doesn't currently
- * implement SEEK_END, we have to remember it explicitly.)
+ * In state WRITEFILE, the current file seek position is the write point, and
+ * the read position is remembered in readpos_xxx; in state READFILE, the
+ * current file seek position is the read point, and the write position is
+ * remembered in writepos_xxx. (The write position is the same as EOF,
+ * but since BufFileSeek doesn't currently implement SEEK_END, we have to
+ * remember it explicitly.)
*
- * Special case: if we are in WRITEFILE state and eof_reached is true,
- * then the read position is implicitly equal to the write position
- * (and hence to the file seek position); this way we need not update
- * the readpos_xxx variables on each write.
+ * Special case: if we are in WRITEFILE state and eof_reached is true, then
+ * the read position is implicitly equal to the write position (and hence
+ * to the file seek position); this way we need not update the readpos_xxx
+ * variables on each write.
*/
bool eof_reached; /* read reached EOF (always valid) */
int current; /* next array index (valid if INMEM) */
&state->writepos_file, &state->writepos_offset);
if (!state->eof_reached)
if (BufFileSeek(state->myfile,
- state->readpos_file, state->readpos_offset,
+ state->readpos_file, state->readpos_offset,
SEEK_SET) != 0)
elog(ERROR, "seek failed");
state->status = TSS_READFILE;
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple,
- * else - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple, else
+ * - tuple before last returned.
*
- * Back up to fetch previously-returned tuple's ending length
- * word. If seek fails, assume we are at start of file.
+ * Back up to fetch previously-returned tuple's ending length word.
+ * If seek fails, assume we are at start of file.
*/
if (BufFileSeek(state->myfile, 0, -(long) sizeof(unsigned int),
SEEK_CUR) != 0)
* Back up to get ending length word of tuple before it.
*/
if (BufFileSeek(state->myfile, 0,
- -(long) (tuplen + 2 * sizeof(unsigned int)),
+ -(long) (tuplen + 2 * sizeof(unsigned int)),
SEEK_CUR) != 0)
{
/*
- * If that fails, presumably the prev tuple is the
- * first in the file. Back up so that it becomes next
- * to read in forward direction (not obviously right,
- * but that is what in-memory case does).
+ * If that fails, presumably the prev tuple is the first
+ * in the file. Back up so that it becomes next to read
+ * in forward direction (not obviously right, but that is
+ * what in-memory case does).
*/
if (BufFileSeek(state->myfile, 0,
- -(long) (tuplen + sizeof(unsigned int)),
+ -(long) (tuplen + sizeof(unsigned int)),
SEEK_CUR) != 0)
elog(ERROR, "bogus tuple length in backward scan");
return NULL;
}
/*
- * Now we have the length of the prior tuple, back up and read
- * it. Note: READTUP expects we are positioned after the
- * initial length word of the tuple, so back up to that point.
+ * Now we have the length of the prior tuple, back up and read it.
+ * Note: READTUP expects we are positioned after the initial
+ * length word of the tuple, so back up to that point.
*/
if (BufFileSeek(state->myfile, 0,
-(long) tuplen,
* TransactionIdDidCommit will both return true. If we check only
* TransactionIdDidCommit, we could consider a tuple committed when a
* later GetSnapshotData call will still think the originating transaction
- * is in progress, which leads to application-level inconsistency. The
+ * is in progress, which leads to application-level inconsistency. The
* upshot is that we gotta check TransactionIdIsInProgress first in all
* code paths, except for a few cases where we are looking at
* subtransactions of our own main transaction and so there can't be any
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.90 2005/08/20 00:39:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.91 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
else if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple)))
{
if (HeapTupleHeaderGetCmin(tuple) >= curcid)
- return HeapTupleInvisible; /* inserted after scan
- * started */
+ return HeapTupleInvisible; /* inserted after scan started */
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return HeapTupleMayBeUpdated;
Assert(TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(tuple)));
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan
- * started */
+ return HeapTupleSelfUpdated; /* updated after scan started */
else
- return HeapTupleInvisible; /* updated before scan
- * started */
+ return HeapTupleInvisible; /* updated before scan started */
}
else if (TransactionIdIsInProgress(HeapTupleHeaderGetXmin(tuple)))
return HeapTupleInvisible;
if (tuple->t_infomask & HEAP_IS_LOCKED)
return HeapTupleMayBeUpdated;
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan
- * started */
+ return HeapTupleSelfUpdated; /* updated after scan started */
else
return HeapTupleInvisible; /* updated before scan started */
}
* By here, the inserting transaction has committed - have to check
* when...
*
- * Note that the provided snapshot contains only top-level XIDs, so we
- * have to convert a subxact XID to its parent for comparison.
- * However, we can make first-pass range checks with the given XID,
- * because a subxact with XID < xmin has surely also got a parent with
- * XID < xmin, while one with XID >= xmax must belong to a parent that
- * was not yet committed at the time of this snapshot.
+ * Note that the provided snapshot contains only top-level XIDs, so we have
+ * to convert a subxact XID to its parent for comparison. However, we can
+ * make first-pass range checks with the given XID, because a subxact with
+ * XID < xmin has surely also got a parent with XID < xmin, while one with
+ * XID >= xmax must belong to a parent that was not yet committed at the
+ * time of this snapshot.
*/
if (TransactionIdFollowsOrEquals(HeapTupleHeaderGetXmin(tuple),
snapshot->xmin))
/*
* Has inserting transaction committed?
*
- * If the inserting transaction aborted, then the tuple was never visible
- * to any other transaction, so we can delete it immediately.
+ * If the inserting transaction aborted, then the tuple was never visible to
+ * any other transaction, so we can delete it immediately.
*/
if (!(tuple->t_infomask & HEAP_XMIN_COMMITTED))
{
else
{
/*
- * Not in Progress, Not Committed, so either Aborted or
- * crashed
+ * Not in Progress, Not Committed, so either Aborted or crashed
*/
tuple->t_infomask |= HEAP_XMIN_INVALID;
SetBufferCommitInfoNeedsSave(buffer);
}
/*
- * Okay, the inserter committed, so it was good at some point. Now
- * what about the deleting transaction?
+ * Okay, the inserter committed, so it was good at some point. Now what
+ * about the deleting transaction?
*/
if (tuple->t_infomask & HEAP_XMAX_INVALID)
return HEAPTUPLE_LIVE;
if (tuple->t_infomask & HEAP_IS_LOCKED)
{
/*
- * "Deleting" xact really only locked it, so the tuple
- * is live in any case. However, we must make sure that either
- * XMAX_COMMITTED or XMAX_INVALID gets set once the xact is gone;
- * otherwise it is unsafe to recycle CLOG status after vacuuming.
+ * "Deleting" xact really only locked it, so the tuple is live in any
+ * case. However, we must make sure that either XMAX_COMMITTED or
+ * XMAX_INVALID gets set once the xact is gone; otherwise it is unsafe
+ * to recycle CLOG status after vacuuming.
*/
if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED))
{
}
/*
- * We don't really care whether xmax did commit, abort or
- * crash. We know that xmax did lock the tuple, but
- * it did not and will never actually update it.
+ * We don't really care whether xmax did commit, abort or crash.
+ * We know that xmax did lock the tuple, but it did not and will
+ * never actually update it.
*/
tuple->t_infomask |= HEAP_XMAX_INVALID;
SetBufferCommitInfoNeedsSave(buffer);
else
{
/*
- * Not in Progress, Not Committed, so either Aborted or
- * crashed
+ * Not in Progress, Not Committed, so either Aborted or crashed
*/
tuple->t_infomask |= HEAP_XMAX_INVALID;
SetBufferCommitInfoNeedsSave(buffer);
HeapTupleHeaderGetXmax(tuple)))
{
/*
- * Inserter also deleted it, so it was never visible to anyone
- * else. However, we can only remove it early if it's not an
- * updated tuple; else its parent tuple is linking to it via t_ctid,
- * and this tuple mustn't go away before the parent does.
+ * Inserter also deleted it, so it was never visible to anyone else.
+ * However, we can only remove it early if it's not an updated tuple;
+ * else its parent tuple is linking to it via t_ctid, and this tuple
+ * mustn't go away before the parent does.
*/
if (!(tuple->t_infomask & HEAP_UPDATED))
return HEAPTUPLE_DEAD;
* to produce a new database.
*
* For largely-historical reasons, the template1 database is the one built
- * by the basic bootstrap process. After it is complete, template0 and
+ * by the basic bootstrap process. After it is complete, template0 and
* the default database, postgres, are made just by copying template1.
*
* To create template1, we run the postgres (backend) program in bootstrap
* Portions Copyright (c) 1994, Regents of the University of California
* Portions taken from FreeBSD.
*
- * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.98 2005/08/28 22:21:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.99 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* these values are passed in by makefile defines
*/
-static char *share_path = NULL;
+static char *share_path = NULL;
/* values to be obtained from arguments */
-static char *pg_data = "";
-static char *encoding = "";
-static char *locale = "";
-static char *lc_collate = "";
-static char *lc_ctype = "";
-static char *lc_monetary = "";
-static char *lc_numeric = "";
-static char *lc_time = "";
-static char *lc_messages = "";
-static char *username = "";
-static bool pwprompt = false;
-static char *pwfilename = NULL;
-static char *authmethod = "";
-static bool debug = false;
-static bool noclean = false;
-static bool show_setting = false;
+static char *pg_data = "";
+static char *encoding = "";
+static char *locale = "";
+static char *lc_collate = "";
+static char *lc_ctype = "";
+static char *lc_monetary = "";
+static char *lc_numeric = "";
+static char *lc_time = "";
+static char *lc_messages = "";
+static char *username = "";
+static bool pwprompt = false;
+static char *pwfilename = NULL;
+static char *authmethod = "";
+static bool debug = false;
+static bool noclean = false;
+static bool show_setting = false;
/* internal vars */
static const char *progname;
-static char *encodingid = "0";
-static char *bki_file;
-static char *desc_file;
-static char *hba_file;
-static char *ident_file;
-static char *conf_file;
-static char *conversion_file;
-static char *info_schema_file;
-static char *features_file;
-static char *system_views_file;
-static char *effective_user;
-static bool made_new_pgdata = false;
-static bool found_existing_pgdata = false;
-static char infoversion[100];
-static bool caught_signal = false;
-static bool output_failed = false;
-static int output_errno = 0;
+static char *encodingid = "0";
+static char *bki_file;
+static char *desc_file;
+static char *hba_file;
+static char *ident_file;
+static char *conf_file;
+static char *conversion_file;
+static char *info_schema_file;
+static char *features_file;
+static char *system_views_file;
+static char *effective_user;
+static bool made_new_pgdata = false;
+static bool found_existing_pgdata = false;
+static char infoversion[100];
+static bool caught_signal = false;
+static bool output_failed = false;
+static int output_errno = 0;
/* defaults */
-static int n_connections = 10;
-static int n_buffers = 50;
+static int n_connections = 10;
+static int n_buffers = 50;
/*
* Warning messages for authentication methods
"# any local user to connect as any PostgreSQL user, including the database\n" \
"# superuser. If you do not trust all your local users, use another\n" \
"# authentication method.\n"
-static char *authwarning = NULL;
+static char *authwarning = NULL;
/*
* Centralized knowledge of switches to pass to backend
/* path to 'initdb' binary directory */
-static char bin_path[MAXPGPATH];
-static char backend_exec[MAXPGPATH];
+static char bin_path[MAXPGPATH];
+static char backend_exec[MAXPGPATH];
static void *pg_malloc(size_t size);
static char *xstrdup(const char *s);
static char **replace_token(char **lines,
- const char *token, const char *replacement);
+ const char *token, const char *replacement);
+
#ifndef HAVE_UNIX_SOCKETS
static char **filter_lines_with_token(char **lines, const char *token);
#endif
filter_lines_with_token(char **lines, const char *token)
{
int numlines = 1;
- int i, src, dst;
+ int i,
+ src,
+ dst;
char **result;
for (i = 0; lines[i]; i++)
static FILE *
popen_check(const char *command, const char *mode)
{
- FILE *cmdfd;
+ FILE *cmdfd;
fflush(stdout);
fflush(stderr);
{
/*
* POSIX 1003.2: For each dir operand that does not name an
- * existing directory, effects equivalent to those caused by
- * the following command shall occcur:
+ * existing directory, effects equivalent to those caused by the
+ * following command shall occcur:
*
- * mkdir -p -m $(umask -S),u+wx $(dirname dir) && mkdir [-m mode]
- * dir
+ * mkdir -p -m $(umask -S),u+wx $(dirname dir) && mkdir [-m mode] dir
*
- * We change the user's umask and then restore it, instead of
- * doing chmod's.
+ * We change the user's umask and then restore it, instead of doing
+ * chmod's.
*/
oumask = umask(0);
numask = oumask & ~(S_IWUSR | S_IXUSR);
{
if (made_new_pgdata || found_existing_pgdata)
fprintf(stderr,
- _("%s: data directory \"%s\" not removed at user's request\n"),
+ _("%s: data directory \"%s\" not removed at user's request\n"),
progname, pg_data);
}
exit(1);
}
#endif
-
#else /* the windows code */
struct passwd_win32
for (i = 0; encoding_match_list[i].system_enc_name; i++)
{
if (pg_enc == encoding_match_list[i].pg_enc_code
- && pg_strcasecmp(sys, encoding_match_list[i].system_enc_name) == 0)
+ && pg_strcasecmp(sys, encoding_match_list[i].system_enc_name) == 0)
{
free(sys);
return;
fprintf(stderr,
_("%s: warning: encoding mismatch\n"), progname);
fprintf(stderr,
- _("The encoding you selected (%s) and the encoding that the selected\n"
- "locale uses (%s) are not known to match. This may lead to\n"
- "misbehavior in various character string processing functions. To fix\n"
- "this situation, rerun %s and either do not specify an encoding\n"
- "explicitly, or choose a matching combination.\n"),
+ _("The encoding you selected (%s) and the encoding that the selected\n"
+ "locale uses (%s) are not known to match. This may lead to\n"
+ "misbehavior in various character string processing functions. To fix\n"
+ "this situation, rerun %s and either do not specify an encoding\n"
+ "explicitly, or choose a matching combination.\n"),
pg_encoding_to_char(pg_enc), sys, progname);
free(sys);
}
#ifdef WIN32
+
/*
- * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- * not in released version
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but not in
+ * released version
*/
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
char *path;
path = pg_malloc(strlen(pg_data) + 2 +
- (subdir == NULL ? 0 : strlen(subdir)));
+ (subdir == NULL ? 0 : strlen(subdir)));
if (subdir != NULL)
sprintf(path, "%s/%s", pg_data, subdir);
{
fprintf(stderr,
_("%s: file \"%s\" does not exist\n"
- "This means you have a corrupted installation or identified\n"
+ "This means you have a corrupted installation or identified\n"
"the wrong directory with the invocation option -L.\n"),
progname, path);
exit(1);
conflines = readfile(hba_file);
#ifndef HAVE_UNIX_SOCKETS
- conflines = filter_lines_with_token(conflines,"@remove-line-for-nolocal@");
+ conflines = filter_lines_with_token(conflines, "@remove-line-for-nolocal@");
#else
- conflines = replace_token(conflines,"@remove-line-for-nolocal@","");
+ conflines = replace_token(conflines, "@remove-line-for-nolocal@", "");
#endif
#ifdef HAVE_IPV6
- /*
+
+ /*
* Probe to see if there is really any platform support for IPv6, and
* comment out the relevant pg_hba line if not. This avoids runtime
* warnings if getaddrinfo doesn't actually cope with IPv6. Particularly
- * useful on Windows, where executables built on a machine with IPv6
- * may have to run on a machine without.
+ * useful on Windows, where executables built on a machine with IPv6 may
+ * have to run on a machine without.
*/
{
struct addrinfo *gai_result;
struct addrinfo hints;
- int err = 0;
+ int err = 0;
+
#ifdef WIN32
/* need to call WSAStartup before calling getaddrinfo */
- WSADATA wsaData;
+ WSADATA wsaData;
- err = WSAStartup(MAKEWORD(2,2), &wsaData);
+ err = WSAStartup(MAKEWORD(2, 2), &wsaData);
#endif
/* for best results, this code should match parse_hba() */
"host all all ::1",
"#host all all ::1");
}
-#else /* !HAVE_IPV6 */
+#else /* !HAVE_IPV6 */
/* If we didn't compile IPV6 support at all, always comment it out */
conflines = replace_token(conflines,
"host all all ::1",
"#host all all ::1");
-#endif /* HAVE_IPV6 */
+#endif /* HAVE_IPV6 */
/* Replace default authentication methods */
conflines = replace_token(conflines,
conflines = replace_token(conflines,
"@authcomment@",
- strcmp(authmethod, "trust") ? "" : AUTHTRUST_WARNING);
+ strcmp(authmethod, "trust") ? "" : AUTHTRUST_WARNING);
snprintf(path, sizeof(path), "%s/pg_hba.conf", pg_data);
if (strcmp(headerline, *bki_lines) != 0)
{
fprintf(stderr,
- _("%s: input file \"%s\" does not belong to PostgreSQL %s\n"
- "Check your installation or specify the correct path "
- "using the option -L.\n"),
+ _("%s: input file \"%s\" does not belong to PostgreSQL %s\n"
+ "Check your installation or specify the correct path "
+ "using the option -L.\n"),
progname, bki_file, PG_VERSION);
exit_nicely();
}
/*
* Pass correct LC_xxx environment to bootstrap.
*
- * The shell script arranged to restore the LC settings afterwards, but
- * there doesn't seem to be any compelling reason to do that.
+ * The shell script arranged to restore the LC settings afterwards, but there
+ * doesn't seem to be any compelling reason to do that.
*/
snprintf(cmd, sizeof(cmd), "LC_COLLATE=%s", lc_collate);
putenv(xstrdup(cmd));
char **line;
static char *pg_authid_setup[] = {
/*
- * Create triggers to ensure manual updates to shared catalogs
- * will be reflected into their "flat file" copies.
+ * Create triggers to ensure manual updates to shared catalogs will be
+ * reflected into their "flat file" copies.
*/
"CREATE TRIGGER pg_sync_pg_database "
" AFTER INSERT OR UPDATE OR DELETE ON pg_database "
" FOR EACH STATEMENT EXECUTE PROCEDURE flatfile_update_trigger();\n",
/*
- * The authid table shouldn't be readable except through views,
- * to ensure passwords are not publicly visible.
+ * The authid table shouldn't be readable except through views, to
+ * ensure passwords are not publicly visible.
*/
"REVOKE ALL on pg_authid FROM public;\n",
NULL
* Read password from file
*
* Ideally this should insist that the file not be world-readable.
- * However, this option is mainly intended for use on Windows
- * where file permissions may not exist at all, so we'll skip the
- * paranoia for now.
+ * However, this option is mainly intended for use on Windows where
+ * file permissions may not exist at all, so we'll skip the paranoia
+ * for now.
*/
FILE *pwf = fopen(pwfilename, "r");
char pwdbuf[MAXPGPATH];
char **line;
static char *pg_depend_setup[] = {
/*
- * Make PIN entries in pg_depend for all objects made so far in
- * the tables that the dependency code handles. This is overkill
- * (the system doesn't really depend on having every last weird
- * datatype, for instance) but generating only the minimum
- * required set of dependencies seems hard.
+ * Make PIN entries in pg_depend for all objects made so far in the
+ * tables that the dependency code handles. This is overkill (the
+ * system doesn't really depend on having every last weird datatype,
+ * for instance) but generating only the minimum required set of
+ * dependencies seems hard.
*
- * Note that we deliberately do not pin the system views, which
- * haven't been created yet.
+ * Note that we deliberately do not pin the system views, which haven't
+ * been created yet.
*
* First delete any already-made entries; PINs override all else, and
* must be the only entries for their objects.
/*
* Set up privileges
*
- * We mark most system catalogs as world-readable. We don't currently have
+ * We mark most system catalogs as world-readable. We don't currently have
* to touch functions, languages, or databases, because their default
* permissions are OK.
*
lines = readfile(info_schema_file);
/*
- * We use -N here to avoid backslashing stuff in
- * information_schema.sql
+ * We use -N here to avoid backslashing stuff in information_schema.sql
*/
snprintf(cmd, sizeof(cmd),
"\"%s\" %s -N template1 >%s",
/*
* Explicitly revoke public create-schema and create-temp-table
- * privileges in template1 and template0; else the latter would be
- * on by default
+ * privileges in template1 and template0; else the latter would be on
+ * by default
*/
"REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n",
"REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n",
escape_quotes(const char *src)
{
int len = strlen(src),
- i, j;
- char *result = pg_malloc(len * 2 + 1);
-
+ i,
+ j;
+ char *result = pg_malloc(len * 2 + 1);
+
for (i = 0, j = 0; i < len; i++)
{
if (SQL_STR_DOUBLE(src[i]))
}
/*
- * override absent/invalid config settings from initdb's locale
- * settings
+ * override absent/invalid config settings from initdb's locale settings
*/
if (strlen(lc_ctype) == 0 || !chklocale(lc_ctype))
char *pgdenv; /* PGDATA value gotten from and sent to
* environment */
char bin_dir[MAXPGPATH];
- char *pg_data_native;
+ char *pg_data_native;
static const char *subdirs[] = {
"global",
"pg_xlog",
if (strcmp(authmethod, "md5") &&
strcmp(authmethod, "ident") &&
- strncmp(authmethod, "ident ", 6) && /* ident with space =
- * param */
+ strncmp(authmethod, "ident ", 6) && /* ident with space = param */
strcmp(authmethod, "trust") &&
#ifdef USE_PAM
strcmp(authmethod, "pam") &&
canonicalize_path(pg_data);
/*
- * we have to set PGDATA for postgres rather than pass it on the
- * command line to avoid dumb quoting problems on Windows, and we
- * would especially need quotes otherwise on Windows because paths
- * there are most likely to have embedded spaces.
+ * we have to set PGDATA for postgres rather than pass it on the command
+ * line to avoid dumb quoting problems on Windows, and we would especially
+ * need quotes otherwise on Windows because paths there are most likely to
+ * have embedded spaces.
*/
pgdenv = pg_malloc(8 + strlen(pg_data));
sprintf(pgdenv, "PGDATA=%s", pg_data);
if ((ret = find_other_exec(argv[0], "postgres", PG_VERSIONSTR,
backend_exec)) < 0)
{
- char full_path[MAXPGPATH];
+ char full_path[MAXPGPATH];
if (find_my_exec(argv[0], full_path) < 0)
StrNCpy(full_path, progname, MAXPGPATH);
umask(077);
/*
- * now we are starting to do real work, trap signals so we can clean
- * up
+ * now we are starting to do real work, trap signals so we can clean up
*/
/* some of these are not valid on Windows */
/*
* Determine platform-specific config settings
*
- * Use reasonable values if kernel will let us, else scale back. Probe
- * for max_connections first since it is subject to more constraints
- * than shared_buffers.
+ * Use reasonable values if kernel will let us, else scale back. Probe for
+ * max_connections first since it is subject to more constraints than
+ * shared_buffers.
*/
set_null_conf();
bootstrap_template1(short_version);
/*
- * Make the per-database PG_VERSION for template1 only after init'ing
- * it
+ * Make the per-database PG_VERSION for template1 only after init'ing it
*/
set_short_version(short_version, "base/1");
make_template0();
make_postgres();
-
+
if (authwarning != NULL)
fprintf(stderr, "%s", authwarning);
/* Get directory specification used to start this executable */
strcpy(bin_dir, argv[0]);
get_parent_directory(bin_dir);
-
+
printf(_("\nSuccess. You can now start the database server using:\n\n"
" %s%s%spostmaster%s -D %s%s%s\n"
"or\n"
" %s%s%spg_ctl%s -D %s%s%s -l logfile start\n\n"),
- QUOTE_PATH, bin_dir, (strlen(bin_dir) > 0) ? DIR_SEP : "", QUOTE_PATH,
- QUOTE_PATH, pg_data_native, QUOTE_PATH,
- QUOTE_PATH, bin_dir, (strlen(bin_dir) > 0) ? DIR_SEP : "", QUOTE_PATH,
- QUOTE_PATH, pg_data_native, QUOTE_PATH);
+ QUOTE_PATH, bin_dir, (strlen(bin_dir) > 0) ? DIR_SEP : "", QUOTE_PATH,
+ QUOTE_PATH, pg_data_native, QUOTE_PATH,
+ QUOTE_PATH, bin_dir, (strlen(bin_dir) > 0) ? DIR_SEP : "", QUOTE_PATH,
+ QUOTE_PATH, pg_data_native, QUOTE_PATH);
return 0;
}
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/pg_config/pg_config.c,v 1.16 2005/10/13 17:58:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_config/pg_config.c,v 1.17 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "port.h"
static const char *progname;
-static char mypath[MAXPGPATH];
+static char mypath[MAXPGPATH];
/*
* C:/Progra~1/
*
* This can fail in 2 ways - if the path doesn't exist, or short names are
- * disabled. In the first case, don't return any path. In the second case,
+ * disabled. In the first case, don't return any path. In the second case,
* we leave the path in the long form. In this case, it does still seem to
* fix elements containing spaces which is all we actually need.
*/
if (GetShortPathName(path, path, MAXPGPATH - 1) == 0)
{
- /* Ignore ERROR_INVALID_PARAMETER as it almost certainly
- * means that short names are disabled
+ /*
+ * Ignore ERROR_INVALID_PARAMETER as it almost certainly means that
+ * short names are disabled
*/
if (GetLastError() != ERROR_INVALID_PARAMETER)
{
static void
show_bindir(bool all)
{
- char path[MAXPGPATH];
- char *lastsep;
+ char path[MAXPGPATH];
+ char *lastsep;
if (all)
printf("BINDIR = ");
static void
show_docdir(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("DOCDIR = ");
static void
show_includedir(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("INCLUDEDIR = ");
static void
show_pkgincludedir(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("PKGINCLUDEDIR = ");
static void
show_includedir_server(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("INCLUDEDIR-SERVER = ");
static void
show_libdir(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("LIBDIR = ");
static void
show_pkglibdir(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("PKGLIBDIR = ");
static void
show_localedir(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("LOCALEDIR = ");
static void
show_mandir(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("MANDIR = ");
static void
show_sharedir(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("SHAREDIR = ");
static void
show_sysconfdir(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("SYSCONFDIR = ");
static void
show_pgxs(bool all)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
if (all)
printf("PGXS = ");
} InfoItem;
static const InfoItem info_items[] = {
- { "--bindir", show_bindir },
- { "--docdir", show_docdir },
- { "--includedir", show_includedir },
- { "--pkgincludedir", show_pkgincludedir },
- { "--includedir-server", show_includedir_server },
- { "--libdir", show_libdir },
- { "--pkglibdir", show_pkglibdir },
- { "--localedir", show_localedir },
- { "--mandir", show_mandir },
- { "--sharedir", show_sharedir },
- { "--sysconfdir", show_sysconfdir },
- { "--pgxs", show_pgxs },
- { "--configure", show_configure },
- { "--cc", show_cc },
- { "--cppflags", show_cppflags },
- { "--cflags", show_cflags },
- { "--cflags_sl", show_cflags_sl },
- { "--ldflags", show_ldflags },
- { "--ldflags_sl", show_ldflags_sl },
- { "--libs", show_libs },
- { "--version", show_version },
- { NULL, NULL }
+ {"--bindir", show_bindir},
+ {"--docdir", show_docdir},
+ {"--includedir", show_includedir},
+ {"--pkgincludedir", show_pkgincludedir},
+ {"--includedir-server", show_includedir_server},
+ {"--libdir", show_libdir},
+ {"--pkglibdir", show_pkglibdir},
+ {"--localedir", show_localedir},
+ {"--mandir", show_mandir},
+ {"--sharedir", show_sharedir},
+ {"--sysconfdir", show_sysconfdir},
+ {"--pgxs", show_pgxs},
+ {"--configure", show_configure},
+ {"--cc", show_cc},
+ {"--cppflags", show_cppflags},
+ {"--cflags", show_cflags},
+ {"--cflags_sl", show_cflags_sl},
+ {"--ldflags", show_ldflags},
+ {"--ldflags_sl", show_ldflags_sl},
+ {"--libs", show_libs},
+ {"--version", show_version},
+ {NULL, NULL}
};
* copyright (c) Oliver Elphick
, 2001;
* licence: BSD
*
- * $PostgreSQL: pgsql/src/bin/pg_controldata/pg_controldata.c,v 1.26 2005/10/03 00:28:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_controldata/pg_controldata.c,v 1.27 2005/10/15 02:49:37 momjian Exp $
*/
#include "postgres.h"
if (!EQ_CRC32(crc, ControlFile.crc))
printf(_("WARNING: Calculated CRC checksum does not match value stored in file.\n"
"Either the file is corrupt, or it has a different layout than this program\n"
- "is expecting. The results below are untrustworthy.\n\n"));
+ "is expecting. The results below are untrustworthy.\n\n"));
/*
- * Use variable for format to suppress overly-anal-retentive gcc
- * warning about %c
+ * Use variable for format to suppress overly-anal-retentive gcc warning
+ * about %c
*/
strftime(pgctime_str, sizeof(pgctime_str), strftime_fmt,
localtime(&(ControlFile.time)));
localtime(&(ControlFile.checkPointCopy.time)));
/*
- * Format system_identifier separately to keep platform-dependent
- * format code out of the translatable message string.
+ * Format system_identifier separately to keep platform-dependent format
+ * code out of the translatable message string.
*/
snprintf(sysident_str, sizeof(sysident_str), UINT64_FORMAT,
ControlFile.system_identifier);
printf(_("Latest checkpoint location: %X/%X\n"),
ControlFile.checkPoint.xlogid, ControlFile.checkPoint.xrecoff);
printf(_("Prior checkpoint location: %X/%X\n"),
- ControlFile.prevCheckPoint.xlogid, ControlFile.prevCheckPoint.xrecoff);
+ ControlFile.prevCheckPoint.xlogid, ControlFile.prevCheckPoint.xrecoff);
printf(_("Latest checkpoint's REDO location: %X/%X\n"),
ControlFile.checkPointCopy.redo.xlogid, ControlFile.checkPointCopy.redo.xrecoff);
printf(_("Latest checkpoint's UNDO location: %X/%X\n"),
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.60 2005/07/25 04:52:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.61 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
start_postmaster(void)
{
/*
- * Since there might be quotes to handle here, it is easier simply to
- * pass everything to a shell to process them.
+ * Since there might be quotes to handle here, it is easier simply to pass
+ * everything to a shell to process them.
*/
char cmd[MAXPGPATH];
/*
* Win32 needs START /B rather than "&".
*
- * Win32 has a problem with START and quoted executable names. You must
- * add a "" as the title at the beginning so you can quote the
- * executable name:
+ * Win32 has a problem with START and quoted executable names. You must add a
+ * "" as the title at the beginning so you can quote the executable name:
* http://www.winnetmag.com/Article/ArticleID/14589/14589.html
* http://dev.remotenetworktechnology.com/cmd/cmdfaq.htm
*/
if (log_file != NULL)
-#ifndef WIN32 /* Cygwin doesn't have START */
+#ifndef WIN32 /* Cygwin doesn't have START */
snprintf(cmd, MAXPGPATH, "%s\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1 &%s",
SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
DEVNULL, log_file, SYSTEMQUOTE);
DEVNULL, log_file, SYSTEMQUOTE);
#endif
else
-#ifndef WIN32 /* Cygwin doesn't have START */
+#ifndef WIN32 /* Cygwin doesn't have START */
snprintf(cmd, MAXPGPATH, "%s\"%s\" %s%s < \"%s\" 2>&1 &%s",
SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
DEVNULL, SYSTEMQUOTE);
if ((ret = find_other_exec(argv0, "postmaster", PM_VERSIONSTR,
postmaster_path)) < 0)
{
- char full_path[MAXPGPATH];
-
+ char full_path[MAXPGPATH];
+
if (find_my_exec(argv0, full_path) < 0)
StrNCpy(full_path, progname, MAXPGPATH);
-
+
if (ret == -1)
write_stderr(_("The program \"postmaster\" is needed by %s "
"but was not found in the\n"
print_msg(_("waiting for postmaster to shut down..."));
- /* always wait for restart */
+ /* always wait for restart */
for (cnt = 0; cnt < wait_seconds; cnt++)
{
if ((pid = get_pgpid()) != 0)
{
print_msg(".");
- pg_usleep(1000000); /* 1 sec */
+ pg_usleep(1000000); /* 1 sec */
}
else
break;
}
- if (pid != 0) /* pid file still exists */
+ if (pid != 0) /* pid file still exists */
{
print_msg(_(" failed\n"));
/*
* Test to see if the process is still there. Note that we do not
* consider an EPERM failure to mean that the process is still there;
- * EPERM must mean that the given PID belongs to some other userid,
- * and considering the permissions on $PGDATA, that means it's not
- * the postmaster we are after.
+ * EPERM must mean that the given PID belongs to some other userid, and
+ * considering the permissions on $PGDATA, that means it's not the
+ * postmaster we are after.
*
* Don't believe that our own PID or parent shell's PID is the postmaster,
- * either. (Windows hasn't got getppid(), though.)
+ * either. (Windows hasn't got getppid(), though.)
*/
if (pid == getpid())
return false;
return;
}
}
- else /* postmaster */
+ else
+ /* postmaster */
{
if (postmaster_is_alive((pid_t) pid))
{
{
static char cmdLine[MAXPGPATH];
int ret;
+
#ifdef __CYGWIN__
char buf[MAXPGPATH];
#endif
}
if ((hService = CreateService(hSCM, register_servicename, register_servicename,
- SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS,
- SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
+ SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS,
+ SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
pgwin32_CommandLine(true),
- NULL, NULL, "RPCSS\0", register_username, register_password)) == NULL)
+ NULL, NULL, "RPCSS\0", register_username, register_password)) == NULL)
{
CloseServiceHandle(hSCM);
write_stderr(_("%s: could not register service \"%s\": error code %d\n"), progname, register_servicename, (int) GetLastError());
case SERVICE_CONTROL_SHUTDOWN:
/*
- * We only need a short wait hint here as it just needs to
- * wait for the next checkpoint. They occur every 5 seconds
- * during shutdown
+ * We only need a short wait hint here as it just needs to wait
+ * for the next checkpoint. They occur every 5 seconds during
+ * shutdown
*/
status.dwWaitHint = 10000;
pgwin32_SetServiceStatus(SERVICE_STOP_PENDING);
set_pglocale_pgservice(argv[0], "pg_ctl");
/*
- * save argv[0] so do_start() can look for the postmaster if
- * necessary. we don't look for postmaster here because in many cases
- * we won't need it.
+ * save argv[0] so do_start() can look for the postmaster if necessary. we
+ * don't look for postmaster here because in many cases we won't need it.
*/
argv0 = argv[0];
/*
* 'Action' can be before or after args so loop over both. Some
- * getopt_long() implementations will reorder argv[] to place all
- * flags first (GNU?), but we don't rely on it. Our /port version
- * doesn't do that.
+ * getopt_long() implementations will reorder argv[] to place all flags
+ * first (GNU?), but we don't rely on it. Our /port version doesn't do
+ * that.
*/
optind = 1;
putenv(env_var);
/*
- * We could pass PGDATA just in an environment
- * variable but we do -D too for clearer
- * postmaster 'ps' display
+ * We could pass PGDATA just in an environment
+ * variable but we do -D too for clearer postmaster
+ * 'ps' display
*/
pgdata_opt = pg_malloc(strlen(pgdata_D) + 7);
snprintf(pgdata_opt, strlen(pgdata_D) + 7,
if (strchr(optarg, '\\'))
register_username = xstrdup(optarg);
else
- /* Prepend .\ for local accounts */
+ /* Prepend .\ for local accounts */
{
register_username = malloc(strlen(optarg) + 3);
if (!register_username)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/common.c,v 1.86 2005/06/27 02:17:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/common.c,v 1.87 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
foundAttr = true;
foundNotNull |= parent->notnull[inhAttrInd];
- if (attrDef != NULL) /* If we have a default,
- * check parent */
+ if (attrDef != NULL) /* If we have a default, check
+ * parent */
{
AttrDefInfo *inhDef;
{
defaultsFound = true;
defaultsMatch &= (strcmp(attrDef->adef_expr,
- inhDef->adef_expr) == 0);
+ inhDef->adef_expr) == 0);
}
}
}
}
/*
- * Based on the scan of the parents, decide if we can rely on
- * the inherited attr
+ * Based on the scan of the parents, decide if we can rely on the
+ * inherited attr
*/
if (foundAttr) /* Attr was inherited */
{
}
/*
- * Clear it if NOT NULL and none of the parents were NOT
- * NULL
+ * Clear it if NOT NULL and none of the parents were NOT NULL
*/
if (tbinfo->notnull[j] && !foundNotNull)
{
}
/*
- * Check for inherited CHECK constraints. We assume a constraint
- * is inherited if its name matches the name of any constraint in
- * the parent. Originally this code tried to compare the expression
+ * Check for inherited CHECK constraints. We assume a constraint is
+ * inherited if its name matches the name of any constraint in the
+ * parent. Originally this code tried to compare the expression
* texts, but that can fail if the parent and child tables are in
* different schemas, because reverse-listing of function calls may
* produce different text (schema-qualified or not) depending on
int cmpval;
/*
- * Compare OID first since it's usually unique, whereas there will
- * only be a few distinct values of tableoid.
+ * Compare OID first since it's usually unique, whereas there will only be
+ * a few distinct values of tableoid.
*/
cmpval = oidcmp(obj1->catId.oid, obj2->catId.oid);
if (cmpval == 0)
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.19 2005/07/02 17:01:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.20 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
id_return = createPQExpBuffer();
/*
- * These checks need to match the identifier production in scan.l.
- * Don't use islower() etc.
+ * These checks need to match the identifier production in scan.l. Don't
+ * use islower() etc.
*/
if (ScanKeywordLookup(rawid))
void
appendStringLiteral(PQExpBuffer buf, const char *str, bool escapeAll)
{
- char ch;
+ char ch;
const char *p;
for (p = str; *p; p++)
{
ch = *p;
if (ch == '\\' ||
- ((unsigned char)ch < (unsigned char)' ' &&
+ ((unsigned char) ch < (unsigned char) ' ' &&
(escapeAll ||
(ch != '\t' && ch != '\n' && ch != '\v' &&
ch != '\f' && ch != '\r'))))
break;
}
}
-
+
appendPQExpBufferChar(buf, '\'');
for (p = str; *p; p++)
{
appendPQExpBufferChar(buf, ch);
appendPQExpBufferChar(buf, ch);
}
- else if ((unsigned char)ch < (unsigned char)' ' &&
+ else if ((unsigned char) ch < (unsigned char) ' ' &&
(escapeAll ||
(ch != '\t' && ch != '\n' && ch != '\v' &&
ch != '\f' && ch != '\r')))
{
/*
- * generate octal escape for control chars other than
- * whitespace
+ * generate octal escape for control chars other than whitespace
*/
appendPQExpBufferChar(buf, '\\');
appendPQExpBufferChar(buf, ((ch >> 6) & 3) + '0');
appendPQExpBufferStr(delimBuf, dqprefix);
/*
- * Make sure we choose a delimiter which (without the trailing $) is
- * not present in the string being quoted. We don't check with the
- * trailing $ because a string ending in $foo must not be quoted with
- * $foo$.
+ * Make sure we choose a delimiter which (without the trailing $) is not
+ * present in the string being quoted. We don't check with the trailing $
+ * because a string ending in $foo must not be quoted with $foo$.
*/
while (strstr(str, delimBuf->data) != NULL)
{
/*
* We expect input in the form of "{item,item,item}" where any item is
- * either raw data, or surrounded by double quotes (in which case
- * embedded characters including backslashes and quotes are
- * backslashed).
+ * either raw data, or surrounded by double quotes (in which case embedded
+ * characters including backslashes and quotes are backslashed).
*
- * We build the result as an array of pointers followed by the actual
- * string data, all in one malloc block for convenience of
- * deallocation. The worst-case storage need is not more than one
- * pointer and one character for each input character (consider
- * "{,,,,,,,,,,}").
+ * We build the result as an array of pointers followed by the actual string
+ * data, all in one malloc block for convenience of deallocation. The
+ * worst-case storage need is not more than one pointer and one character
+ * for each input character (consider "{,,,,,,,,,,}").
*/
*itemarray = NULL;
*nitems = 0;
privswgo = createPQExpBuffer();
/*
- * At the end, these two will be pasted together to form the result.
- * But the owner privileges need to go before the other ones to keep
- * the dependencies valid. In recent versions this is normally the
- * case, but in old versions they come after the PUBLIC privileges and
- * that results in problems if we need to run REVOKE on the owner
- * privileges.
+ * At the end, these two will be pasted together to form the result. But
+ * the owner privileges need to go before the other ones to keep the
+ * dependencies valid. In recent versions this is normally the case, but
+ * in old versions they come after the PUBLIC privileges and that results
+ * in problems if we need to run REVOKE on the owner privileges.
*/
firstsql = createPQExpBuffer();
secondsql = createPQExpBuffer();
else if (strncmp(grantee->data, "group ",
strlen("group ")) == 0)
appendPQExpBuffer(secondsql, "GROUP %s;\n",
- fmtId(grantee->data + strlen("group ")));
+ fmtId(grantee->data + strlen("group ")));
else
appendPQExpBuffer(secondsql, "%s;\n", fmtId(grantee->data));
}
else if (strncmp(grantee->data, "group ",
strlen("group ")) == 0)
appendPQExpBuffer(secondsql, "GROUP %s",
- fmtId(grantee->data + strlen("group ")));
+ fmtId(grantee->data + strlen("group ")));
else
appendPQExpBuffer(secondsql, "%s", fmtId(grantee->data));
appendPQExpBuffer(secondsql, " WITH GRANT OPTION;\n");
}
/*
- * If we didn't find any owner privs, the owner must have revoked 'em
- * all
+ * If we didn't find any owner privs, the owner must have revoked 'em all
*/
if (!found_owner_privs && owner)
{
while (*input && *input != '=')
{
/*
- * If user name isn't quoted, then just add it to the output
- * buffer
+ * If user name isn't quoted, then just add it to the output buffer
*/
if (*input != '"')
appendPQExpBufferChar(output, *input++);
return input; /* really a syntax error... */
/*
- * Quoting convention is to escape " as "". Keep this
- * code in sync with putid() in backend's acl.c.
+ * Quoting convention is to escape " as "". Keep this code in
+ * sync with putid() in backend's acl.c.
*/
if (*input == '"' && *(input + 1) == '"')
input++;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.36 2005/06/21 20:45:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.37 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct _Archive
{
int verbose;
- char *remoteVersionStr; /* server's version string */
- int remoteVersion; /* same in numeric form */
+ char *remoteVersionStr; /* server's version string */
+ int remoteVersion; /* same in numeric form */
- int minRemoteVersion; /* allowable range */
+ int minRemoteVersion; /* allowable range */
int maxRemoteVersion;
/* error handling */
typedef struct _restoreOptions
{
int create; /* Issue commands to create the database */
- int noOwner; /* Don't try to match original object
- * owner */
- int disable_triggers; /* disable triggers during
- * data-only restore */
+ int noOwner; /* Don't try to match original object owner */
+ int disable_triggers; /* disable triggers during data-only
+ * restore */
int use_setsessauth;/* Use SET SESSION AUTHORIZATION commands
* instead of OWNER TO */
char *superuser; /* Username to use as superuser */
bool limitToList;
int compression;
- int suppressDumpWarnings; /* Suppress output of WARNING
- * entries to stderr */
+ int suppressDumpWarnings; /* Suppress output of WARNING entries
+ * to stderr */
} RestoreOptions;
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.116 2005/09/28 13:11:26 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.117 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
const int compression, ArchiveMode mode);
static void _getObjectDescription(PQExpBuffer buf, TocEntry *te,
- ArchiveHandle *AH);
+ ArchiveHandle *AH);
static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass);
* Check for nonsensical option combinations.
*
* NB: create+dropSchema is useless because if you're creating the DB,
- * there's no need to drop individual items in it. Moreover, if we
- * tried to do that then we'd issue the drops in the database
- * initially connected to, not the one we will create, which is very
- * bad...
+ * there's no need to drop individual items in it. Moreover, if we tried
+ * to do that then we'd issue the drops in the database initially
+ * connected to, not the one we will create, which is very bad...
*/
if (ropt->create && ropt->dropSchema)
die_horribly(AH, modulename, "-C and -c are incompatible options\n");
ropt->requirePassword, ropt->ignoreVersion);
/*
- * If we're talking to the DB directly, don't send comments since
- * they obscure SQL when displaying errors
+ * If we're talking to the DB directly, don't send comments since they
+ * obscure SQL when displaying errors
*/
AH->noTocComments = 1;
}
/*
- * Work out if we have an implied data-only restore. This can happen
- * if the dump was data only or if the user has used a toc list to
- * exclude all of the schema data. All we do is look for schema
- * entries - if none are found then we set the dataOnly flag.
+ * Work out if we have an implied data-only restore. This can happen if
+ * the dump was data only or if the user has used a toc list to exclude
+ * all of the schema data. All we do is look for schema entries - if none
+ * are found then we set the dataOnly flag.
*
* We could scan for wanted TABLE entries, but that is not the same as
* dataOnly. At this stage, it seems unnecessary (6-Mar-2001).
*/
if (!ropt->dataOnly)
{
- int impliedDataOnly = 1;
+ int impliedDataOnly = 1;
for (te = AH->toc->next; te != AH->toc; te = te->next)
{
{
AH->currentTE = te;
- reqs = _tocEntryRequired(te, ropt, false /* needn't drop ACLs */);
+ reqs = _tocEntryRequired(te, ropt, false /* needn't drop ACLs */ );
if (((reqs & REQ_SCHEMA) != 0) && te->dropStmt)
{
/* We want the schema */
if ((reqs & REQ_DATA) != 0)
{
/*
- * hadDumper will be set if there is genuine data component
- * for this node. Otherwise, we need to check the defn field
- * for statements that need to be executed in data-only
- * restores.
+ * hadDumper will be set if there is genuine data component for
+ * this node. Otherwise, we need to check the defn field for
+ * statements that need to be executed in data-only restores.
*/
if (te->hadDumper)
{
te->tag);
/*
- * If we have a copy statement, use it. As of
- * V1.3, these are separate to allow easy import
- * from withing a database connection. Pre 1.3
- * archives can not use DB connections and are
- * sent to output only.
+ * If we have a copy statement, use it. As of V1.3,
+ * these are separate to allow easy import from
+ * withing a database connection. Pre 1.3 archives can
+ * not use DB connections and are sent to output only.
*
- * For V1.3+, the table data MUST have a copy
- * statement so that we can go into appropriate
- * mode with libpq.
+ * For V1.3+, the table data MUST have a copy statement
+ * so that we can go into appropriate mode with libpq.
*/
if (te->copyStmt && strlen(te->copyStmt) > 0)
ahprintf(AH, "%s", te->copyStmt);
CatalogId catalogId, DumpId dumpId,
const char *tag,
const char *namespace,
- const char *tablespace,
+ const char *tablespace,
const char *owner, bool withOids,
const char *desc, const char *defn,
const char *dropStmt, const char *copyStmt,
int cnt = -1;
/*
- * This is paranoid: deal with the possibility that vsnprintf is
- * willing to ignore trailing null or returns > 0 even if string does
- * not fit. It may be the case that it returns cnt = bufsize
+ * This is paranoid: deal with the possibility that vsnprintf is willing
+ * to ignore trailing null or returns > 0 even if string does not fit. It
+ * may be the case that it returns cnt = bufsize
*/
while (cnt < 0 || cnt >= (bSize - 1))
{
int cnt = -1;
/*
- * This is paranoid: deal with the possibility that vsnprintf is
- * willing to ignore trailing null
+ * This is paranoid: deal with the possibility that vsnprintf is willing
+ * to ignore trailing null
*/
/*
- * or returns > 0 even if string does not fit. It may be the case that
- * it returns cnt = bufsize
+ * or returns > 0 even if string does not fit. It may be the case that it
+ * returns cnt = bufsize
*/
while (cnt < 0 || cnt >= (bSize - 1))
{
(unsigned long) AH->lo_buf_used, (unsigned long) res);
if (res != AH->lo_buf_used)
die_horribly(AH, modulename,
- "could not write to large object (result: %lu, expected: %lu)\n",
- (unsigned long) res, (unsigned long) AH->lo_buf_used);
+ "could not write to large object (result: %lu, expected: %lu)\n",
+ (unsigned long) res, (unsigned long) AH->lo_buf_used);
}
else
{
unsigned char *str;
- size_t len;
+ size_t len;
str = PQescapeBytea((const unsigned char *) AH->lo_buf,
AH->lo_buf_used, &len);
if (AH->writingBlob)
{
- size_t remaining = size * nmemb;
+ size_t remaining = size * nmemb;
while (AH->lo_buf_used + remaining > AH->lo_buf_size)
{
{
write_msg(modulename, "Error from TOC entry %d; %u %u %s %s %s\n",
AH->currentTE->dumpId,
- AH->currentTE->catalogId.tableoid, AH->currentTE->catalogId.oid,
- AH->currentTE->desc, AH->currentTE->tag, AH->currentTE->owner);
+ AH->currentTE->catalogId.tableoid, AH->currentTE->catalogId.oid,
+ AH->currentTE->desc, AH->currentTE->tag, AH->currentTE->owner);
}
AH->lastErrorStage = AH->stage;
AH->lastErrorTE = AH->currentTE;
}
/*
- * Read the flag indicating the state of the data pointer. Check if
- * valid and die if not.
+ * Read the flag indicating the state of the data pointer. Check if valid
+ * and die if not.
*
- * This used to be handled by a negative or zero pointer, now we use an
- * extra byte specifically for the state.
+ * This used to be handled by a negative or zero pointer, now we use an extra
+ * byte specifically for the state.
*/
offsetFlg = (*AH->ReadBytePtr) (AH) & 0xFF;
int b;
/*
- * This is a bit yucky, but I don't want to make the binary format
- * very dependent on representation, and not knowing much about it, I
- * write out a sign byte. If you change this, don't forget to change
- * the file version #, and modify readInt to read the new format AS
- * WELL AS the old formats.
+ * This is a bit yucky, but I don't want to make the binary format very
+ * dependent on representation, and not knowing much about it, I write out
+ * a sign byte. If you change this, don't forget to change the file
+ * version #, and modify readInt to read the new format AS WELL AS the old
+ * formats.
*/
/* SIGN byte */
if (fseeko(fh, 0, SEEK_SET) != 0)
{
/*
- * NOTE: Formats that use the lookahead buffer can unset this in
- * their Init routine.
+ * NOTE: Formats that use the lookahead buffer can unset this in their
+ * Init routine.
*/
AH->readHeader = 1;
}
/*
* Not used; maybe later....
*
- * AH->workDir = strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ;
- * i--) if (AH->workDir[i-1] == '/')
+ * AH->workDir = strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ; i--)
+ * if (AH->workDir[i-1] == '/')
*/
}
else
AH->fSpec = NULL;
- AH->currUser = strdup(""); /* So it's valid, but we can free() it
- * later if necessary */
+ AH->currUser = strdup(""); /* So it's valid, but we can free() it later
+ * if necessary */
AH->currSchema = strdup(""); /* ditto */
AH->currWithOids = -1; /* force SET */
/*
* On Windows, we need to use binary mode to read/write non-text archive
- * formats. Force stdin/stdout into binary mode if that is what
- * we are using.
+ * formats. Force stdin/stdout into binary mode if that is what we are
+ * using.
*/
#ifdef WIN32
if (fmt != archNull &&
(*startPtr) (AH, te);
/*
- * printf("Dumper arg for %d is %x\n", te->id,
- * te->dataDumperArg);
+ * printf("Dumper arg for %d is %x\n", te->id, te->dataDumperArg);
*/
/*
/* Sanity check */
if (te->dumpId <= 0)
die_horribly(AH, modulename,
- "entry ID %d out of range -- perhaps a corrupt TOC\n",
+ "entry ID %d out of range -- perhaps a corrupt TOC\n",
te->dumpId);
te->hadDumper = ReadInt(AH);
/* If no namespace is specified, it means all. */
if (!te->namespace)
return 0;
- if(strcmp(ropt->schemaNames, te->namespace) != 0)
+ if (strcmp(ropt->schemaNames, te->namespace) != 0)
return 0;
}
if ((strcmp(te->desc, "TABLE") == 0) || (strcmp(te->desc, "TABLE DATA") == 0))
}
/*
- * Check if we had a dataDumper. Indicates if the entry is schema or
- * data
+ * Check if we had a dataDumper. Indicates if the entry is schema or data
*/
if (!te->hadDumper)
{
/*
- * Special Case: If 'SEQUENCE SET' then it is considered a data
- * entry
+ * Special Case: If 'SEQUENCE SET' then it is considered a data entry
*/
if (strcmp(te->desc, "SEQUENCE SET") == 0)
res = res & REQ_DATA;
}
/*
- * Special case: type with tag; this is obsolete
- * and we always ignore it.
+ * Special case: type with tag; this is obsolete and we
+ * always ignore it.
*/
if ((strcmp(te->desc, "") == 0) && (strcmp(te->tag, "Max OID") == 0))
return 0;
}
/*
- * NOTE: currUser keeps track of what the imaginary session user in
- * our script is. It's now effectively reset to the original userID.
+ * NOTE: currUser keeps track of what the imaginary session user in our
+ * script is. It's now effectively reset to the original userID.
*/
if (AH->currUser)
free(AH->currUser);
_doSetSessionAuth(AH, user);
/*
- * NOTE: currUser keeps track of what the imaginary session user in
- * our script is
+ * NOTE: currUser keeps track of what the imaginary session user in our
+ * script is
*/
if (AH->currUser)
free(AH->currUser);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_die_horribly(AH, modulename,
- "could not set search_path to \"%s\": %s",
- schemaName, PQerrorMessage(AH->connection));
+ "could not set search_path to \"%s\": %s",
+ schemaName, PQerrorMessage(AH->connection));
PQclear(res);
}
_selectTablespace(ArchiveHandle *AH, const char *tablespace)
{
PQExpBuffer qry;
- const char *want, *have;
+ const char *want,
+ *have;
have = AH->currTablespace;
want = tablespace;
res = PQexec(AH->connection, qry->data);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
- warn_or_die_horribly(AH, modulename,
+ warn_or_die_horribly(AH, modulename,
"could not set default_tablespace to %s: %s",
fmtId(want), PQerrorMessage(AH->connection));
strcmp(type, "TYPE") == 0)
{
appendPQExpBuffer(buf, "%s ", type);
- if (te->namespace && te->namespace[0]) /* is null pre-7.3 */
+ if (te->namespace && te->namespace[0]) /* is null pre-7.3 */
appendPQExpBuffer(buf, "%s.", fmtId(te->namespace));
+
/*
- * Pre-7.3 pg_dump would sometimes (not always) put
- * a fmtId'd name into te->tag for an index.
- * This check is heuristic, so make its scope as
- * narrow as possible.
+ * Pre-7.3 pg_dump would sometimes (not always) put a fmtId'd name
+ * into te->tag for an index. This check is heuristic, so make its
+ * scope as narrow as possible.
*/
if (AH->version < K_VERS_1_7 &&
te->tag[0] == '"' &&
- te->tag[strlen(te->tag)-1] == '"' &&
+ te->tag[strlen(te->tag) - 1] == '"' &&
strcmp(type, "INDEX") == 0)
appendPQExpBuffer(buf, "%s", te->tag);
else
}
/*
- * These object types require additional decoration. Fortunately,
- * the information needed is exactly what's in the DROP command.
+ * These object types require additional decoration. Fortunately, the
+ * information needed is exactly what's in the DROP command.
*/
if (strcmp(type, "AGGREGATE") == 0 ||
strcmp(type, "FUNCTION") == 0 ||
/*
* Avoid dumping the public schema, as it will already be created ...
- * unless we are using --clean mode, in which case it's been deleted
- * and we'd better recreate it.
+ * unless we are using --clean mode, in which case it's been deleted and
+ * we'd better recreate it.
*/
if (!ropt->dropSchema &&
strcmp(te->desc, "SCHEMA") == 0 && strcmp(te->tag, "public") == 0)
pfx, te->tag, te->desc,
te->namespace ? te->namespace : "-",
te->owner);
- if (te->tablespace)
+ if (te->tablespace)
ahprintf(AH, "; Tablespace: %s", te->tablespace);
ahprintf(AH, "\n");
- if (AH->PrintExtraTocPtr != NULL)
+ if (AH->PrintExtraTocPtr !=NULL)
(*AH->PrintExtraTocPtr) (AH, te);
ahprintf(AH, "--\n\n");
}
/*
* Actually print the definition.
*
- * Really crude hack for suppressing AUTHORIZATION clause that old
- * pg_dump versions put into CREATE SCHEMA. We have to do this when
- * --no-owner mode is selected. This is ugly, but I see
- * no other good way ...
+ * Really crude hack for suppressing AUTHORIZATION clause that old pg_dump
+ * versions put into CREATE SCHEMA. We have to do this when --no-owner
+ * mode is selected. This is ugly, but I see no other good way ...
*/
if (ropt->noOwner && strcmp(te->desc, "SCHEMA") == 0)
{
/*
* If it's an ACL entry, it might contain SET SESSION AUTHORIZATION
- * commands, so we can no longer assume we know the current auth
- * setting.
+ * commands, so we can no longer assume we know the current auth setting.
*/
if (strncmp(te->desc, "ACL", 3) == 0)
{
else if (sizeof(off_t) > sizeof(long))
/*
- * At this point, off_t is too large for long, so we return based
- * on whether an off_t version of fseek is available.
+ * At this point, off_t is too large for long, so we return based on
+ * whether an off_t version of fseek is available.
*/
#ifdef HAVE_FSEEKO
return true;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.67 2005/09/11 04:10:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.68 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char vrev;
int version; /* Conveniently formatted version */
- char *archiveRemoteVersion; /* When reading an archive,
- * the version of the dumped DB */
- char *archiveDumpVersion; /* When reading an archive,
- * the version of the dumper */
+ char *archiveRemoteVersion; /* When reading an archive, the
+ * version of the dumped DB */
+ char *archiveDumpVersion; /* When reading an archive, the
+ * version of the dumper */
int debugLevel; /* Used for logging (currently only by
* --verbose) */
* Fields used when discovering header. A format can always get the
* previous read bytes from here...
*/
- int readHeader; /* Used if file header has been read
- * already */
- char *lookahead; /* Buffer used when reading header to
- * discover format */
+ int readHeader; /* Used if file header has been read already */
+ char *lookahead; /* Buffer used when reading header to discover
+ * format */
size_t lookaheadSize; /* Size of allocated buffer */
size_t lookaheadLen; /* Length of data in lookahead */
- off_t lookaheadPos; /* Current read position in lookahead
- * buffer */
+ off_t lookaheadPos; /* Current read position in lookahead buffer */
ArchiveEntryPtr ArchiveEntryPtr; /* Called for each metadata object */
StartDataPtr StartDataPtr; /* Called when table data is about to be
ReadBufPtr ReadBufPtr; /* Read a buffer of input from the archive */
ClosePtr ClosePtr; /* Close the archive */
WriteExtraTocPtr WriteExtraTocPtr; /* Write extra TOC entry data
- * associated with the current
- * archive format */
+ * associated with the current archive
+ * format */
ReadExtraTocPtr ReadExtraTocPtr; /* Read extr info associated with
* archie format */
PrintExtraTocPtr PrintExtraTocPtr; /* Extra TOC info for format */
char *archdbname; /* DB name *read* from archive */
bool requirePassword;
PGconn *connection;
- int connectToDB; /* Flag to indicate if direct DB
- * connection is required */
+ int connectToDB; /* Flag to indicate if direct DB connection is
+ * required */
int pgCopyIn; /* Currently in libpq 'COPY IN' mode. */
PQExpBuffer pgCopyBuf; /* Left-over data from incomplete lines in
* COPY IN */
ArchiveMode mode; /* File mode - r or w */
void *formatData; /* Header data specific to file format */
- RestoreOptions *ropt; /* Used to check restore options in
- * ahwrite etc */
+ RestoreOptions *ropt; /* Used to check restore options in ahwrite
+ * etc */
/* these vars track state to avoid sending redundant SET commands */
char *currUser; /* current username */
char *currSchema; /* current schema */
- char *currTablespace; /* current tablespace */
+ char *currTablespace; /* current tablespace */
bool currWithOids; /* current default_with_oids setting */
void *lo_buf;
struct _tocEntry *next;
CatalogId catalogId;
DumpId dumpId;
- bool hadDumper; /* Archiver was passed a dumper routine
- * (used in restore) */
+ bool hadDumper; /* Archiver was passed a dumper routine (used
+ * in restore) */
char *tag; /* index tag */
char *namespace; /* null or empty string if not in a schema */
char *tablespace; /* null if not in a tablespace; empty string
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.32 2005/09/24 17:53:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.33 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* zlibOutSize is the buffer size we tell zlib it can output to. We
- * actually allocate one extra byte because some routines want to
- * append a trailing zero byte to the zlib output. The input buffer
- * is expansible and is always of size ctx->inSize; zlibInSize is just
- * the initial default size for it.
+ * actually allocate one extra byte because some routines want to append a
+ * trailing zero byte to the zlib output. The input buffer is expansible
+ * and is always of size ctx->inSize; zlibInSize is just the initial
+ * default size for it.
*/
ctx->zlibOut = (char *) malloc(zlibOutSize + 1);
ctx->zlibIn = (char *) malloc(zlibInSize);
ctx->dataState = ReadOffset(AH, &(ctx->dataPos));
/*
- * Prior to V1.7 (pg7.3), we dumped the data size as an int now we
- * don't dump it at all.
+ * Prior to V1.7 (pg7.3), we dumped the data size as an int now we don't
+ * dump it at all.
*/
if (AH->version < K_VERS_1_7)
junk = ReadInt(AH);
if ((TocIDRequired(AH, id, ropt) & REQ_DATA) != 0)
die_horribly(AH, modulename,
"Dumping a specific TOC data block out of order is not supported"
- " without ID on this input stream (fseek required)\n");
+ " without ID on this input stream (fseek required)\n");
switch (blkType)
{
cnt = fread(in, 1, blkLen, AH->FH);
if (cnt != blkLen)
die_horribly(AH, modulename,
- "could not read data block -- expected %lu, got %lu\n",
+ "could not read data block -- expected %lu, got %lu\n",
(unsigned long) blkLen, (unsigned long) cnt);
ctx->filePos += blkLen;
cnt = fread(in, 1, blkLen, AH->FH);
if (cnt != blkLen)
die_horribly(AH, modulename,
- "could not read data block -- expected %lu, got %lu\n",
+ "could not read data block -- expected %lu, got %lu\n",
(unsigned long) blkLen, (unsigned long) cnt);
ctx->filePos += blkLen;
WriteDataChunks(AH);
/*
- * This is not an essential operation - it is really only needed
- * if we expect to be doing seeks to read the data back - it may
- * be ok to just use the existing self-consistent block
- * formatting.
+ * This is not an essential operation - it is really only needed if we
+ * expect to be doing seeks to read the data back - it may be ok to
+ * just use the existing self-consistent block formatting.
*/
if (ctx->hasSeek)
{
if (deflateInit(zp, AH->compression) != Z_OK)
die_horribly(AH, modulename, "could not initialize compression library: %s\n", zp->msg);
}
-
#else
AH->compression = 0;
)
{
/*
- * Extra paranoia: avoid zero-length chunks since a zero
- * length chunk is the EOF marker. This should never happen
- * but...
+ * Extra paranoia: avoid zero-length chunks since a zero length
+ * chunk is the EOF marker. This should never happen but...
*/
if (zp->avail_out < zlibOutSize)
{
/*
- * printf("Wrote %lu byte deflated chunk\n", (unsigned
- * long) (zlibOutSize - zp->avail_out));
+ * printf("Wrote %lu byte deflated chunk\n", (unsigned long)
+ * (zlibOutSize - zp->avail_out));
*/
WriteInt(AH, zlibOutSize - zp->avail_out);
if (fwrite(out, 1, zlibOutSize - zp->avail_out, AH->FH) != (zlibOutSize - zp->avail_out))
* Implements the basic DB functions used by the archiver.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.65 2005/09/11 04:10:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.66 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
noPwd = (strcmp(PQerrorMessage(newConn),
PQnoPasswordSupplied) == 0);
badPwd = (strncmp(PQerrorMessage(newConn),
- "Password authentication failed for user", 39) == 0);
+ "Password authentication failed for user", 39) == 0);
if (noPwd || badPwd)
{
AH->requirePassword = false;
/*
- * Start the connection. Loop until we have a password if requested
- * by backend.
+ * Start the connection. Loop until we have a password if requested by
+ * backend.
*/
do
{
die_horribly(AH, modulename, "failed to connect to database\n");
if (PQstatus(AH->connection) == CONNECTION_BAD &&
- strcmp(PQerrorMessage(AH->connection), PQnoPasswordSupplied) == 0 &&
+ strcmp(PQerrorMessage(AH->connection), PQnoPasswordSupplied) == 0 &&
!feof(stdin))
{
PQfinish(AH->connection);
}
/*
- * fprintf(stderr, "Found cr at %d, prev char was %c, next was
- * %c\n", loc, qry[loc-1], qry[loc+1]);
+ * fprintf(stderr, "Found cr at %d, prev char was %c, next was %c\n",
+ * loc, qry[loc-1], qry[loc+1]);
*/
/* Count the number of preceding slashes */
sPos = loc - sPos;
/*
- * If an odd number of preceding slashes, then \n was escaped so
- * set the next search pos, and loop (if any left).
+ * If an odd number of preceding slashes, then \n was escaped so set
+ * the next search pos, and loop (if any left).
*/
if ((sPos & 1) == 1)
{
{
/*
* The following is a mini state machine to assess the end of an SQL
- * statement. It really only needs to parse good SQL, or at least
- * that's the theory... End-of-statement is assumed to be an unquoted,
+ * statement. It really only needs to parse good SQL, or at least that's
+ * the theory... End-of-statement is assumed to be an unquoted,
* un-commented semi-colon that's not within any parentheses.
*
* Note: the input can be split into bufferloads at arbitrary boundaries.
* Therefore all state must be kept in AH->sqlparse, not in local
- * variables of this routine. We assume that AH->sqlparse was
- * filled with zeroes when created.
+ * variables of this routine. We assume that AH->sqlparse was filled with
+ * zeroes when created.
*/
for (; qry < eos; qry++)
{
if (*qry == ';' && AH->sqlparse.braceDepth == 0)
{
/*
- * We've found the end of a statement. Send it and
- * reset the buffer.
+ * We've found the end of a statement. Send it and reset
+ * the buffer.
*/
- appendPQExpBufferChar(AH->sqlBuf, ';'); /* inessential */
+ appendPQExpBufferChar(AH->sqlBuf, ';'); /* inessential */
ExecuteSqlCommand(AH, AH->sqlBuf,
"could not execute query");
resetPQExpBuffer(AH->sqlBuf);
AH->sqlparse.lastChar = '\0';
/*
- * Remove any following newlines - so that
- * embedded COPY commands don't get a starting newline.
+ * Remove any following newlines - so that embedded COPY
+ * commands don't get a starting newline.
*/
qry++;
while (qry < eos && *qry == '\n')
{
AH->sqlparse.state = SQL_IN_DOUBLE_QUOTE;
}
+
/*
* Look for dollar-quotes. We make the assumption that
- * $-quotes will not have an ident character just
- * before them in pg_dump output. XXX is this
- * good enough?
+ * $-quotes will not have an ident character just before them
+ * in pg_dump output. XXX is this good enough?
*/
else if (*qry == '$' && !_isIdentChar(AH->sqlparse.lastChar))
{
break;
case SQL_IN_EXT_COMMENT:
+
/*
* This isn't fully correct, because we don't account for
* nested slash-stars, but pg_dump never emits such.
break;
case SQL_IN_E_QUOTE:
+
/*
* Eventually we will need to handle '' specially, because
* after E'...''... we should still be in E_QUOTE state.
*
- * XXX problem: how do we tell whether the dump was made
- * by a version that thinks backslashes aren't special
- * in non-E literals??
+ * XXX problem: how do we tell whether the dump was made by a
+ * version that thinks backslashes aren't special in non-E
+ * literals??
*/
if (*qry == '\'' && !AH->sqlparse.backSlash)
AH->sqlparse.state = SQL_SCAN;
{
/*
* Ooops, we're not really in a dollar-tag. Valid tag
- * chars do not include the various chars we look for
- * in this state machine, so it's safe to just jump
- * from this state back to SCAN. We have to back up
- * the qry pointer so that the current character gets
- * rescanned in SCAN state; and then "continue" so that
- * the bottom-of-loop actions aren't done yet.
+ * chars do not include the various chars we look for in
+ * this state machine, so it's safe to just jump from this
+ * state back to SCAN. We have to back up the qry pointer
+ * so that the current character gets rescanned in SCAN
+ * state; and then "continue" so that the bottom-of-loop
+ * actions aren't done yet.
*/
AH->sqlparse.state = SQL_SCAN;
qry--;
break;
case SQL_IN_DOLLAR_QUOTE:
+
/*
* If we are at a $, see whether what precedes it matches
- * tagBuf. (Remember that the trailing $ of the tag was
- * not added to tagBuf.) However, don't compare until we
- * have enough data to be a possible match --- this is
- * needed to avoid false match on '$a$a$...'
+ * tagBuf. (Remember that the trailing $ of the tag was not
+ * added to tagBuf.) However, don't compare until we have
+ * enough data to be a possible match --- this is needed to
+ * avoid false match on '$a$a$...'
*/
if (*qry == '$' &&
AH->sqlBuf->len >= AH->sqlparse.minTagEndPos &&
char *eos = qry + bufLen;
/*
- * fprintf(stderr, "\n\n*****\n
- * Buffer:\n\n%s\n*******************\n\n", qry);
+ * fprintf(stderr, "\n\n*****\n Buffer:\n\n%s\n*******************\n\n",
+ * qry);
*/
/* Could switch between command and COPY IN mode at each line */
|| (c >= '0' && c <= '9')
|| (c == '_')
|| (c == '$')
- || (c >= (unsigned char) '\200') /* no need to check <=
- * \377 */
+ || (c >= (unsigned char) '\200') /* no need to check <= \377 */
)
return true;
else
|| (c >= 'A' && c <= 'Z')
|| (c == '_')
|| (!atStart && c >= '0' && c <= '9')
- || (c >= (unsigned char) '\200') /* no need to check <=
- * \377 */
+ || (c >= (unsigned char) '\200') /* no need to check <= \377 */
)
return true;
else
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.26 2005/06/21 20:45:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.27 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (ctx->blobToc == NULL)
die_horribly(AH, modulename,
- "could not open large object TOC for output: %s\n", strerror(errno));
+ "could not open large object TOC for output: %s\n", strerror(errno));
}
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.15 2005/06/21 20:45:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.16 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (dLen > 0)
{
unsigned char *str;
- size_t len;
+ size_t len;
str = PQescapeBytea((const unsigned char *) data, dLen, &len);
if (!str)
_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
AH->WriteDataPtr = _WriteData;
+
ahprintf(AH, "SELECT lo_close(0);\n\n");
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.48 2005/06/22 02:00:47 neilc Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.49 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifdef HAVE_LIBZ
/* typedef gzFile ThingFile; */
typedef FILE ThingFile;
-
#else
typedef FILE ThingFile;
#endif
if (ctx->tarFH == NULL)
die_horribly(NULL, modulename,
- "could not open TOC file for output: %s\n", strerror(errno));
+ "could not open TOC file for output: %s\n", strerror(errno));
ctx->tarFHpos = 0;
/*
- * Make unbuffered since we will dup() it, and the buffers screw
- * each other
+ * Make unbuffered since we will dup() it, and the buffers screw each
+ * other
*/
/* setvbuf(ctx->tarFH, NULL, _IONBF, 0); */
AH->compression = 0;
/*
- * We don't support compression because reading the files back is
- * not possible since gzdopen uses buffered IO which totally
- * screws file positioning.
+ * We don't support compression because reading the files back is not
+ * possible since gzdopen uses buffered IO which totally screws file
+ * positioning.
*/
if (AH->compression != 0)
die_horribly(NULL, modulename, "compression not supported by tar output format\n");
die_horribly(NULL, modulename, "could not open TOC file for input: %s\n", strerror(errno));
/*
- * Make unbuffered since we will dup() it, and the buffers screw
- * each other
+ * Make unbuffered since we will dup() it, and the buffers screw each
+ * other
*/
/* setvbuf(ctx->tarFH, NULL, _IONBF, 0); */
tm = _tarPositionTo(AH, filename);
if (!tm) /* Not found */
{
- if (filename) /* Couldn't find the requested file.
- * Future: DO SEEK(0) and retry. */
+ if (filename) /* Couldn't find the requested file. Future:
+ * DO SEEK(0) and retry. */
die_horribly(AH, modulename, "could not find file %s in archive\n", filename);
else
/* Any file OK, non left, so return NULL */
else
die_horribly(AH, modulename, "compression support is disabled in this format\n");
/* tm->zFH = gzdopen(dup(fileno(ctx->tarFH)), "rb"); */
-
#else
tm->nFH = ctx->tarFH;
#endif
}
else
tm->nFH = tm->tmpFH;
-
#else
tm->nFH = tm->tmpFH;
if (res != len)
die_horribly(th->AH, modulename,
- "could not write to tar member (wrote %lu, attempted %lu)\n",
+ "could not write to tar member (wrote %lu, attempted %lu)\n",
(unsigned long) res, (unsigned long) len);
th->pos += res;
tmpCopy[i] = pg_tolower((unsigned char) tmpCopy[i]);
/*
- * This is very nasty; we don't know if the archive used WITH
- * OIDS, so we search the string for it in a paranoid sort of way.
+ * This is very nasty; we don't know if the archive used WITH OIDS, so
+ * we search the string for it in a paranoid sort of way.
*/
if (strncmp(tmpCopy, "copy ", 5) != 0)
die_horribly(AH, modulename,
WriteDataChunks(AH);
/*
- * Now this format wants to append a script which does a full
- * restore if the files have been extracted.
+ * Now this format wants to append a script which does a full restore
+ * if the files have been extracted.
*/
th = tarOpen(AH, "restore.sql", 'w');
tarPrintf(AH, th, "create temporary table pgdump_restore_path(p text);\n");
tarPrintf(AH, th, "--\n"
"-- NOTE:\n"
"--\n"
- "-- File paths need to be edited. Search for $$PATH$$ and\n"
- "-- replace it with the path to the directory containing\n"
+ "-- File paths need to be edited. Search for $$PATH$$ and\n"
+ "-- replace it with the path to the directory containing\n"
"-- the extracted data files.\n"
"--\n"
"-- Edit the following to match the path where the\n"
{
if (fputc(0, ctx->tarFH) == EOF)
die_horribly(AH, modulename,
- "could not write null block at end of tar archive\n");
+ "could not write null block at end of tar archive\n");
}
}
int cnt = -1;
/*
- * This is paranoid: deal with the possibility that vsnprintf is
- * willing to ignore trailing null
+ * This is paranoid: deal with the possibility that vsnprintf is willing
+ * to ignore trailing null
*/
/*
- * or returns > 0 even if string does not fit. It may be the case that
- * it returns cnt = bufsize
+ * or returns > 0 even if string does not fit. It may be the case that it
+ * returns cnt = bufsize
*/
while (cnt < 0 || cnt >= (bSize - 1))
{
*/
fseeko(tmp, 0, SEEK_END);
th->fileLen = ftello(tmp);
+
/*
- * Some compilers with throw a warning knowing this test can never be
- * true because off_t can't exceed the compared maximum.
+ * Some compilers with throw a warning knowing this test can never be true
+ * because off_t can't exceed the compared maximum.
*/
if (th->fileLen > MAX_TAR_MEMBER_FILELEN)
die_horribly(AH, modulename, "archive member too large for tar format\n");
if (filename)
die_horribly(AH, modulename, "could not find header for file %s in tar archive\n", filename);
else
- /* We're just scanning the archibe for the next file, so return null */
+
+ /*
+ * We're just scanning the archibe for the next file, so return
+ * null
+ */
{
free(th);
return NULL;
id = atoi(th->targetFile);
if ((TocIDRequired(AH, id, AH->ropt) & REQ_DATA) != 0)
die_horribly(AH, modulename, "dumping data out of order is not supported in this archive format: "
- "%s is required, but comes before %s in the archive file.\n",
+ "%s is required, but comes before %s in the archive file.\n",
th->targetFile, filename);
/* Header doesn't match, so read to next header */
snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) ftello(ctx->tarFH));
snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) ftello(ctx->tarFHpos));
die_horribly(AH, modulename,
- "mismatch in actual vs. predicted file position (%s vs. %s)\n",
+ "mismatch in actual vs. predicted file position (%s vs. %s)\n",
buf1, buf2);
}
#endif
sscanf(&h[148], "%8o", &sum);
/*
- * If the checksum failed, see if it is a null block. If so,
- * silently continue to the next block.
+ * If the checksum failed, see if it is a null block. If so, silently
+ * continue to the next block.
*/
if (chk == sum)
gotBlock = true;
#if 0
/* User 32 */
- sprintf(&h[265], "%.31s", ""); /* How do I get username reliably?
- * Do I need to? */
+ sprintf(&h[265], "%.31s", ""); /* How do I get username reliably? Do
+ * I need to? */
/* Group 32 */
- sprintf(&h[297], "%.31s", ""); /* How do I get group reliably? Do
- * I need to? */
+ sprintf(&h[297], "%.31s", ""); /* How do I get group reliably? Do I
+ * need to? */
/* Maj Dev 8 */
sprintf(&h[329], "%6o ", 0);
* by PostgreSQL
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.421 2005/09/21 19:58:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.422 2005/10/15 02:49:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{"version", no_argument, NULL, 'V'},
/*
- * the following options don't have an equivalent short option
- * letter, but are available as '-X long-name'
+ * the following options don't have an equivalent short option letter,
+ * but are available as '-X long-name'
*/
{"disable-dollar-quoting", no_argument, &disable_dollar_quoting, 1},
{"disable-triggers", no_argument, &disable_triggers, 1},
/* this is now default, so just ignore the switch */
break;
- case 'c': /* clean (i.e., drop) schema prior to
- * create */
+ case 'c': /* clean (i.e., drop) schema prior to create */
outputClean = 1;
break;
outputBlobs = false;
break;
- case 'S': /* Username for superuser in plain text
- * output */
+ case 'S': /* Username for superuser in plain text output */
outputSuperuser = strdup(optarg);
break;
break;
/*
- * Option letters were getting scarce, so I invented this
- * new scheme: '-X feature' turns on some feature. Compare
- * to the -f option in GCC. You should also add an
- * equivalent GNU-style option --feature. Features that
- * require arguments should use '-X feature=foo'.
+ * Option letters were getting scarce, so I invented this new
+ * scheme: '-X feature' turns on some feature. Compare to the
+ * -f option in GCC. You should also add an equivalent
+ * GNU-style option --feature. Features that require
+ * arguments should use '-X feature=foo'.
*/
case 'X':
if (strcmp(optarg, "disable-dollar-quoting") == 0)
}
/*
- * Open the database using the Archiver, so it knows about it. Errors
- * mean death.
+ * Open the database using the Archiver, so it knows about it. Errors mean
+ * death.
*/
g_conn = ConnectDatabase(g_fout, dbname, pghost, pgport,
username, force_password, ignore_version);
/* Set the client encoding */
if (dumpencoding)
{
- char *cmd = malloc(strlen(dumpencoding) + 32);
- sprintf(cmd,"SET client_encoding='%s'", dumpencoding);
+ char *cmd = malloc(strlen(dumpencoding) + 32);
+
+ sprintf(cmd, "SET client_encoding='%s'", dumpencoding);
do_sql_command(g_conn, cmd);
free(cmd);
}
*
* In 7.3 or later, we can rely on dependency information to help us
* determine a safe order, so the initial sort is mostly for cosmetic
- * purposes: we sort by name to ensure that logically identical
- * schemas will dump identically. Before 7.3 we don't have
- * dependencies and we use OID ordering as an (unreliable) guide to
- * creation order.
+ * purposes: we sort by name to ensure that logically identical schemas
+ * will dump identically. Before 7.3 we don't have dependencies and we
+ * use OID ordering as an (unreliable) guide to creation order.
*/
getDumpableObjects(&dobjs, &numObjs);
sortDumpableObjects(dobjs, numObjs);
/*
- * Create archive TOC entries for all the objects to be dumped, in a
- * safe order.
+ * Create archive TOC entries for all the objects to be dumped, in a safe
+ * order.
*/
/* First the special encoding entry. */
else
ropt->compression = compressLevel;
- ropt->suppressDumpWarnings = true; /* We've already shown
- * them */
+ ropt->suppressDumpWarnings = true; /* We've already shown them */
RestoreArchive(g_fout, ropt);
}
{
/*
* If a specific table is being dumped, do not dump any complete
- * namespaces. If a specific namespace is being dumped, dump just
- * that namespace. Otherwise, dump all non-system namespaces.
+ * namespaces. If a specific namespace is being dumped, dump just that
+ * namespace. Otherwise, dump all non-system namespaces.
*/
if (selectTableName != NULL)
nsinfo->dump = false;
{
/*
* Always dump if dumping parent namespace; else, if a particular
- * tablename has been specified, dump matching table name; else, do
- * not dump.
+ * tablename has been specified, dump matching table name; else, do not
+ * dump.
*/
tbinfo->dump = false;
if (tbinfo->dobj.namespace->dump)
/*
* Make sure we are in proper schema. We will qualify the table name
- * below anyway (in case its name conflicts with a pg_catalog table);
- * but this ensures reproducible results in case the table contains
- * regproc, regclass, etc columns.
+ * below anyway (in case its name conflicts with a pg_catalog table); but
+ * this ensures reproducible results in case the table contains regproc,
+ * regclass, etc columns.
*/
selectSourceSchema(tbinfo->dobj.namespace->dobj.name);
/*
* If possible, specify the column list explicitly so that we have no
- * possibility of retrieving data in the wrong column order. (The
- * default column ordering of COPY will not be what we want in certain
- * corner cases involving ADD COLUMN and inheritance.)
+ * possibility of retrieving data in the wrong column order. (The default
+ * column ordering of COPY will not be what we want in certain corner
+ * cases involving ADD COLUMN and inheritance.)
*/
if (g_fout->remoteVersion >= 70300)
column_list = fmtCopyColumnList(tbinfo);
if (oids && hasoids)
{
appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
- fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
- classname),
+ fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
+ classname),
column_list);
}
else
{
appendPQExpBuffer(q, "COPY %s %s TO stdout;",
- fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
- classname),
+ fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
+ classname),
column_list);
}
res = PQexec(g_conn, q->data);
/*
* THROTTLE:
*
- * There was considerable discussion in late July, 2000 regarding
- * slowing down pg_dump when backing up large tables. Users with
- * both slow & fast (muti-processor) machines experienced
- * performance degradation when doing a backup.
+ * There was considerable discussion in late July, 2000 regarding slowing
+ * down pg_dump when backing up large tables. Users with both slow &
+ * fast (muti-processor) machines experienced performance degradation
+ * when doing a backup.
*
- * Initial attempts based on sleeping for a number of ms for each ms
- * of work were deemed too complex, then a simple 'sleep in each
- * loop' implementation was suggested. The latter failed because
- * the loop was too tight. Finally, the following was implemented:
+ * Initial attempts based on sleeping for a number of ms for each ms of
+ * work were deemed too complex, then a simple 'sleep in each loop'
+ * implementation was suggested. The latter failed because the loop
+ * was too tight. Finally, the following was implemented:
*
- * If throttle is non-zero, then See how long since the last sleep.
- * Work out how long to sleep (based on ratio). If sleep is more
- * than 100ms, then sleep reset timer EndIf EndIf
+ * If throttle is non-zero, then See how long since the last sleep. Work
+ * out how long to sleep (based on ratio). If sleep is more than
+ * 100ms, then sleep reset timer EndIf EndIf
*
- * where the throttle value was the number of ms to sleep per ms of
- * work. The calculation was done in each loop.
+ * where the throttle value was the number of ms to sleep per ms of work.
+ * The calculation was done in each loop.
*
- * Most of the hard work is done in the backend, and this solution
- * still did not work particularly well: on slow machines, the
- * ratio was 50:1, and on medium paced machines, 1:1, and on fast
- * multi-processor machines, it had little or no effect, for
- * reasons that were unclear.
+ * Most of the hard work is done in the backend, and this solution still
+ * did not work particularly well: on slow machines, the ratio was
+ * 50:1, and on medium paced machines, 1:1, and on fast
+ * multi-processor machines, it had little or no effect, for reasons
+ * that were unclear.
*
* Further discussion ensued, and the proposal was dropped.
*
- * For those people who want this feature, it can be implemented
- * using gettimeofday in each loop, calculating the time since
- * last sleep, multiplying that by the sleep ratio, then if the
- * result is more than a preset 'minimum sleep time' (say 100ms),
- * call the 'select' function to sleep for a subsecond period ie.
+ * For those people who want this feature, it can be implemented using
+ * gettimeofday in each loop, calculating the time since last sleep,
+ * multiplying that by the sleep ratio, then if the result is more
+ * than a preset 'minimum sleep time' (say 100ms), call the 'select'
+ * function to sleep for a subsecond period ie.
*
* select(0, NULL, NULL, NULL, &tvi);
*
- * This will return after the interval specified in the structure
- * tvi. Finally, call gettimeofday again to save the 'last sleep
- * time'.
+ * This will return after the interval specified in the structure tvi.
+ * Finally, call gettimeofday again to save the 'last sleep time'.
*/
}
archprintf(fout, "\\.\n\n\n");
/*
* Make sure we are in proper schema. We will qualify the table name
- * below anyway (in case its name conflicts with a pg_catalog table);
- * but this ensures reproducible results in case the table contains
- * regproc, regclass, etc columns.
+ * below anyway (in case its name conflicts with a pg_catalog table); but
+ * this ensures reproducible results in case the table contains regproc,
+ * regclass, etc columns.
*/
selectSourceSchema(tbinfo->dobj.namespace->dobj.name);
{
appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
"SELECT * FROM ONLY %s",
- fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
- classname));
+ fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
+ classname));
}
else
{
appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
"SELECT * FROM %s",
- fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
- classname));
+ fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
+ classname));
}
res = PQexec(g_conn, q->data);
case NUMERICOID:
{
/*
- * These types are printed without quotes
- * unless they contain values that aren't
- * accepted by the scanner unquoted (e.g.,
- * 'NaN'). Note that strtod() and friends
- * might accept NaN, so we can't use that to
- * test.
+ * These types are printed without quotes unless
+ * they contain values that aren't accepted by the
+ * scanner unquoted (e.g., 'NaN'). Note that
+ * strtod() and friends might accept NaN, so we
+ * can't use that to test.
*
- * In reality we only need to defend against
- * infinity and NaN, so we need not get too
- * crazy about pattern matching here.
+ * In reality we only need to defend against infinity
+ * and NaN, so we need not get too crazy about
+ * pattern matching here.
*/
const char *s = PQgetvalue(res, tuple, field);
fmtId(tbinfo->dobj.name));
appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
fmtCopyColumnList(tbinfo),
- (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
+ (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
copyStmt = copyBuf->data;
}
else
tdinfo->dobj.objType = DO_TABLE_DATA;
/*
- * Note: use tableoid 0 so that this object won't be mistaken
- * for something that pg_depend entries apply to.
+ * Note: use tableoid 0 so that this object won't be mistaken for
+ * something that pg_depend entries apply to.
*/
tdinfo->dobj.catId.tableoid = 0;
tdinfo->dobj.catId.oid = tblinfo[i].dobj.catId.oid;
dbDumpId, /* dump ID */
datname, /* Name */
NULL, /* Namespace */
- NULL, /* Tablespace */
+ NULL, /* Tablespace */
dba, /* Owner */
false, /* with oids */
"DATABASE", /* Desc */
/* Process the tuples, if any */
for (i = 0; i < PQntuples(res); i++)
{
- Oid blobOid;
- char *comment;
+ Oid blobOid;
+ char *comment;
/* ignore blobs without comments */
if (PQgetisnull(res, i, 1))
int i_nspacl;
/*
- * Before 7.3, there are no real namespaces; create two dummy entries,
- * one for user stuff and one for system stuff.
+ * Before 7.3, there are no real namespaces; create two dummy entries, one
+ * for user stuff and one for system stuff.
*/
if (g_fout->remoteVersion < 70300)
{
selectSourceSchema("pg_catalog");
/*
- * we fetch all namespaces including system ones, so that every object
- * we read in can be linked to a containing namespace.
+ * we fetch all namespaces including system ones, so that every object we
+ * read in can be linked to a containing namespace.
*/
appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
"(%s nspowner) as rolname, "
int i_typisdefined;
/*
- * we include even the built-in types because those may be used as
- * array elements by user-defined types
+ * we include even the built-in types because those may be used as array
+ * elements by user-defined types
*
* we filter out the built-in types when we dump out the types
*
"typnamespace, "
"(%s typowner) as rolname, "
"typinput::oid as typinput, "
- "typoutput::oid as typoutput, typelem, typrelid, "
+ "typoutput::oid as typoutput, typelem, typrelid, "
"CASE WHEN typrelid = 0 THEN ' '::\"char\" "
"ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END as typrelkind, "
"typtype, typisdefined "
"0::oid as typnamespace, "
"(%s typowner) as rolname, "
"typinput::oid as typinput, "
- "typoutput::oid as typoutput, typelem, typrelid, "
+ "typoutput::oid as typoutput, typelem, typrelid, "
"CASE WHEN typrelid = 0 THEN ' '::\"char\" "
"ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END as typrelkind, "
"typtype, typisdefined "
else
{
appendPQExpBuffer(query, "SELECT "
- "(SELECT oid FROM pg_class WHERE relname = 'pg_type') AS tableoid, "
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_type') AS tableoid, "
"oid, typname, "
"0::oid as typnamespace, "
"(%s typowner) as rolname, "
"typinput::oid as typinput, "
- "typoutput::oid as typoutput, typelem, typrelid, "
+ "typoutput::oid as typoutput, typelem, typrelid, "
"CASE WHEN typrelid = 0 THEN ' '::\"char\" "
"ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END as typrelkind, "
"typtype, typisdefined "
/*
* If it's a table's rowtype, use special type code to facilitate
- * sorting into the desired order. (We don't want to consider it
- * an ordinary type because that would bring the table up into the
+ * sorting into the desired order. (We don't want to consider it an
+ * ordinary type because that would bring the table up into the
* datatype part of the dump order.)
*/
if (OidIsValid(tinfo[i].typrelid) && tinfo[i].typrelkind != 'c')
/*
* Make sure there are dependencies from the type to its input and
- * output functions. (We don't worry about typsend, typreceive,
- * or typanalyze since those are only valid in 7.4 and later,
- * wherein the standard dependency mechanism will pick them up.)
+ * output functions. (We don't worry about typsend, typreceive, or
+ * typanalyze since those are only valid in 7.4 and later, wherein the
+ * standard dependency mechanism will pick them up.)
*/
funcInfo = findFuncByOid(tinfo[i].typinput);
if (funcInfo)
AssignDumpId(&oprinfo[i].dobj);
oprinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_oprname));
oprinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)),
- oprinfo[i].dobj.catId.oid);
+ oprinfo[i].dobj.catId.oid);
oprinfo[i].rolname = strdup(PQgetvalue(res, i, i_rolname));
oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
AssignDumpId(&convinfo[i].dobj);
convinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_conname));
convinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_connamespace)),
- convinfo[i].dobj.catId.oid);
+ convinfo[i].dobj.catId.oid);
convinfo[i].rolname = strdup(PQgetvalue(res, i, i_rolname));
}
AssignDumpId(&opcinfo[i].dobj);
opcinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_opcname));
opcinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace)),
- opcinfo[i].dobj.catId.oid);
+ opcinfo[i].dobj.catId.oid);
opcinfo[i].rolname = strdup(PQgetvalue(res, i, i_rolname));
if (g_fout->remoteVersion >= 70300)
"FROM pg_proc "
"WHERE proisagg "
"AND pronamespace != "
- "(select oid from pg_namespace where nspname = 'pg_catalog')",
+ "(select oid from pg_namespace where nspname = 'pg_catalog')",
username_subquery);
}
else if (g_fout->remoteVersion >= 70100)
AssignDumpId(&agginfo[i].aggfn.dobj);
agginfo[i].aggfn.dobj.name = strdup(PQgetvalue(res, i, i_aggname));
agginfo[i].aggfn.dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_aggnamespace)),
- agginfo[i].aggfn.dobj.catId.oid);
+ agginfo[i].aggfn.dobj.catId.oid);
agginfo[i].aggfn.rolname = strdup(PQgetvalue(res, i, i_rolname));
if (strlen(agginfo[i].aggfn.rolname) == 0)
write_msg(NULL, "WARNING: owner of aggregate function \"%s\" appears to be invalid\n",
agginfo[i].aggfn.dobj.name);
- agginfo[i].aggfn.lang = InvalidOid; /* not currently
- * interesting */
+ agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
agginfo[i].aggfn.nargs = 1;
agginfo[i].aggfn.argtypes = (Oid *) malloc(sizeof(Oid));
agginfo[i].aggfn.argtypes[0] = atooid(PQgetvalue(res, i, i_aggbasetype));
finfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
AssignDumpId(&finfo[i].dobj);
finfo[i].dobj.name = strdup(PQgetvalue(res, i, i_proname));
- finfo[i].dobj.namespace =
+ finfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_pronamespace)),
- finfo[i].dobj.catId.oid);
+ finfo[i].dobj.catId.oid);
finfo[i].rolname = strdup(PQgetvalue(res, i, i_rolname));
finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang));
finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype));
}
if (strlen(finfo[i].rolname) == 0)
- write_msg(NULL,
- "WARNING: owner of function \"%s\" appears to be invalid\n",
+ write_msg(NULL,
+ "WARNING: owner of function \"%s\" appears to be invalid\n",
finfo[i].dobj.name);
}
/*
* Find all the tables (including views and sequences).
*
- * We include system catalogs, so that we can work if a user table is
- * defined to inherit from a system catalog (pretty weird, but...)
+ * We include system catalogs, so that we can work if a user table is defined
+ * to inherit from a system catalog (pretty weird, but...)
*
* We ignore tables that are not type 'r' (ordinary relation), 'S'
* (sequence), 'v' (view), or 'c' (composite type).
*
- * Composite-type table entries won't be dumped as such, but we have
- * to make a DumpableObject for them so that we can track dependencies
- * of the composite type (pg_depend entries for columns of the composite
- * type link to the pg_class entry not the pg_type entry).
+ * Composite-type table entries won't be dumped as such, but we have to make
+ * a DumpableObject for them so that we can track dependencies of the
+ * composite type (pg_depend entries for columns of the composite type
+ * link to the pg_class entry not the pg_type entry).
*
- * Note: in this phase we should collect only a minimal amount of
- * information about each table, basically just enough to decide if it
- * is interesting. We must fetch all tables in this phase because
- * otherwise we cannot correctly identify inherited columns, serial
- * columns, etc.
+ * Note: in this phase we should collect only a minimal amount of information
+ * about each table, basically just enough to decide if it is interesting.
+ * We must fetch all tables in this phase because otherwise we cannot
+ * correctly identify inherited columns, serial columns, etc.
*/
if (g_fout->remoteVersion >= 80000)
"from pg_class c "
"left join pg_depend d on "
"(c.relkind = '%c' and "
- "d.classid = c.tableoid and d.objid = c.oid and "
+ "d.classid = c.tableoid and d.objid = c.oid and "
"d.objsubid = 0 and "
- "d.refclassid = c.tableoid and d.deptype = 'i') "
+ "d.refclassid = c.tableoid and d.deptype = 'i') "
"where relkind in ('%c', '%c', '%c', '%c') "
"order by c.oid",
username_subquery,
"from pg_class c "
"left join pg_depend d on "
"(c.relkind = '%c' and "
- "d.classid = c.tableoid and d.objid = c.oid and "
+ "d.classid = c.tableoid and d.objid = c.oid and "
"d.objsubid = 0 and "
- "d.refclassid = c.tableoid and d.deptype = 'i') "
+ "d.refclassid = c.tableoid and d.deptype = 'i') "
"where relkind in ('%c', '%c', '%c', '%c') "
"order by c.oid",
username_subquery,
else if (g_fout->remoteVersion >= 70200)
{
appendPQExpBuffer(query,
- "SELECT tableoid, oid, relname, relacl, relkind, "
+ "SELECT tableoid, oid, relname, relacl, relkind, "
"0::oid as relnamespace, "
"(%s relowner) as rolname, "
"relchecks, reltriggers, "
{
/* all tables have oids in 7.1 */
appendPQExpBuffer(query,
- "SELECT tableoid, oid, relname, relacl, relkind, "
+ "SELECT tableoid, oid, relname, relacl, relkind, "
"0::oid as relnamespace, "
"(%s relowner) as rolname, "
"relchecks, reltriggers, "
else
{
/*
- * Before 7.1, view relkind was not set to 'v', so we must check
- * if we have a view by looking for a rule in pg_rewrite.
+ * Before 7.1, view relkind was not set to 'v', so we must check if we
+ * have a view by looking for a rule in pg_rewrite.
*/
appendPQExpBuffer(query,
"SELECT "
- "(SELECT oid FROM pg_class WHERE relname = 'pg_class') AS tableoid, "
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_class') AS tableoid, "
"oid, relname, relacl, "
"CASE WHEN relhasrules and relkind = 'r' "
- " and EXISTS(SELECT rulename FROM pg_rewrite r WHERE "
- " r.ev_class = c.oid AND r.ev_type = '1') "
+ " and EXISTS(SELECT rulename FROM pg_rewrite r WHERE "
+ " r.ev_class = c.oid AND r.ev_type = '1') "
"THEN '%c'::\"char\" "
"ELSE relkind END AS relkind,"
"0::oid as relnamespace, "
*numTables = ntups;
/*
- * Extract data from result and lock dumpable tables. We do the
- * locking before anything else, to minimize the window wherein a
- * table could disappear under us.
+ * Extract data from result and lock dumpable tables. We do the locking
+ * before anything else, to minimize the window wherein a table could
+ * disappear under us.
*
- * Note that we have to save info about all tables here, even when
- * dumping only one, because we don't yet know which tables might be
- * inheritance ancestors of the target table.
+ * Note that we have to save info about all tables here, even when dumping
+ * only one, because we don't yet know which tables might be inheritance
+ * ancestors of the target table.
*/
tblinfo = (TableInfo *) calloc(ntups, sizeof(TableInfo));
AssignDumpId(&tblinfo[i].dobj);
tblinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_relname));
tblinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_relnamespace)),
- tblinfo[i].dobj.catId.oid);
+ tblinfo[i].dobj.catId.oid);
tblinfo[i].rolname = strdup(PQgetvalue(res, i, i_rolname));
tblinfo[i].relacl = strdup(PQgetvalue(res, i, i_relacl));
tblinfo[i].relkind = *(PQgetvalue(res, i, i_relkind));
/*
* Decide whether we want to dump this table. Sequences owned by
- * serial columns are never dumpable on their own; we will
- * transpose their owning table's dump flag to them below.
+ * serial columns are never dumpable on their own; we will transpose
+ * their owning table's dump flag to them below.
*/
if (tblinfo[i].relkind == RELKIND_COMPOSITE_TYPE)
tblinfo[i].dump = false;
tblinfo[i].interesting = tblinfo[i].dump;
/*
- * Read-lock target tables to make sure they aren't DROPPED or
- * altered in schema before we get around to dumping them.
+ * Read-lock target tables to make sure they aren't DROPPED or altered
+ * in schema before we get around to dumping them.
*
- * Note that we don't explicitly lock parents of the target tables;
- * we assume our lock on the child is enough to prevent schema
+ * Note that we don't explicitly lock parents of the target tables; we
+ * assume our lock on the child is enough to prevent schema
* alterations to parent tables.
*
* NOTE: it'd be kinda nice to lock views and sequences too, not only
resetPQExpBuffer(lockquery);
appendPQExpBuffer(lockquery,
"LOCK TABLE %s IN ACCESS SHARE MODE",
- fmtQualifiedId(tblinfo[i].dobj.namespace->dobj.name,
- tblinfo[i].dobj.name));
+ fmtQualifiedId(tblinfo[i].dobj.namespace->dobj.name,
+ tblinfo[i].dobj.name));
do_sql_command(g_conn, lockquery->data);
}
/*
* If the user is attempting to dump a specific table, check to ensure
- * that the specified table actually exists. (This is a bit
- * simplistic since we don't fully check the combination of -n and -t
- * switches.)
+ * that the specified table actually exists. (This is a bit simplistic
+ * since we don't fully check the combination of -n and -t switches.)
*/
if (selectTableName)
{
selectSourceSchema(tbinfo->dobj.namespace->dobj.name);
/*
- * The point of the messy-looking outer join is to find a
- * constraint that is related by an internal dependency link to
- * the index. If we find one, create a CONSTRAINT entry linked to
- * the INDEX entry. We assume an index won't have more than one
- * internal dependency.
+ * The point of the messy-looking outer join is to find a constraint
+ * that is related by an internal dependency link to the index. If we
+ * find one, create a CONSTRAINT entry linked to the INDEX entry. We
+ * assume an index won't have more than one internal dependency.
*/
resetPQExpBuffer(query);
if (g_fout->remoteVersion >= 80000)
appendPQExpBuffer(query,
"SELECT t.tableoid, t.oid, "
"t.relname as indexname, "
- "pg_catalog.pg_get_indexdef(i.indexrelid) as indexdef, "
+ "pg_catalog.pg_get_indexdef(i.indexrelid) as indexdef, "
"t.relnatts as indnkeys, "
"i.indkey, i.indisclustered, "
"c.contype, c.conname, "
"c.oid as conoid, "
"(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) as tablespace "
"FROM pg_catalog.pg_index i "
- "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
+ "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_depend d "
"ON (d.classid = t.tableoid "
"AND d.objid = t.oid "
appendPQExpBuffer(query,
"SELECT t.tableoid, t.oid, "
"t.relname as indexname, "
- "pg_catalog.pg_get_indexdef(i.indexrelid) as indexdef, "
+ "pg_catalog.pg_get_indexdef(i.indexrelid) as indexdef, "
"t.relnatts as indnkeys, "
"i.indkey, i.indisclustered, "
"c.contype, c.conname, "
"c.oid as conoid, "
"NULL as tablespace "
"FROM pg_catalog.pg_index i "
- "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
+ "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_depend d "
"ON (d.classid = t.tableoid "
"AND d.objid = t.oid "
appendPQExpBuffer(query,
"SELECT t.tableoid, t.oid, "
"t.relname as indexname, "
- "pg_get_indexdef(i.indexrelid) as indexdef, "
+ "pg_get_indexdef(i.indexrelid) as indexdef, "
"t.relnatts as indnkeys, "
"i.indkey, false as indisclustered, "
"CASE WHEN i.indisprimary THEN 'p'::char "
"(SELECT oid FROM pg_class WHERE relname = 'pg_class') AS tableoid, "
"t.oid, "
"t.relname as indexname, "
- "pg_get_indexdef(i.indexrelid) as indexdef, "
+ "pg_get_indexdef(i.indexrelid) as indexdef, "
"t.relnatts as indnkeys, "
"i.indkey, false as indisclustered, "
"CASE WHEN i.indisprimary THEN 'p'::char "
/*
* In pre-7.4 releases, indkeys may contain more entries than
* indnkeys says (since indnkeys will be 1 for a functional
- * index). We don't actually care about this case since we
- * don't examine indkeys except for indexes associated with
- * PRIMARY and UNIQUE constraints, which are never functional
- * indexes. But we have to allocate enough space to keep
- * parseOidArray from complaining.
+ * index). We don't actually care about this case since we don't
+ * examine indkeys except for indexes associated with PRIMARY and
+ * UNIQUE constraints, which are never functional indexes. But we
+ * have to allocate enough space to keep parseOidArray from
+ * complaining.
*/
indxinfo[j].indkeys = (Oid *) malloc(INDEX_MAX_KEYS * sizeof(Oid));
parseOidArray(PQgetvalue(res, j, i_indkey),
resetPQExpBuffer(query);
appendPQExpBuffer(query,
"SELECT tableoid, oid, conname, "
- "pg_catalog.pg_get_constraintdef(oid) as condef "
+ "pg_catalog.pg_get_constraintdef(oid) as condef "
"FROM pg_catalog.pg_constraint "
"WHERE conrelid = '%u'::pg_catalog.oid "
"AND contype = 'f'",
return;
/*
- * select appropriate schema to ensure names in constraint are
- * properly qualified
+ * select appropriate schema to ensure names in constraint are properly
+ * qualified
*/
selectSourceSchema(tinfo->dobj.namespace->dobj.name);
if (g_fout->remoteVersion >= 70400)
appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
- "pg_catalog.pg_get_constraintdef(oid) AS consrc "
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc "
"FROM pg_catalog.pg_constraint "
"WHERE contypid = '%u'::pg_catalog.oid "
"ORDER BY conname",
if (ruleinfo[i].ruletable)
{
/*
- * If the table is a view, force its ON SELECT rule to be
- * sorted before the view itself --- this ensures that any
- * dependencies for the rule affect the table's positioning.
- * Other rules are forced to appear after their table.
+ * If the table is a view, force its ON SELECT rule to be sorted
+ * before the view itself --- this ensures that any dependencies
+ * for the rule affect the table's positioning. Other rules are
+ * forced to appear after their table.
*/
if (ruleinfo[i].ruletable->relkind == RELKIND_VIEW &&
ruleinfo[i].ev_type == '1' && ruleinfo[i].is_instead)
tbinfo->dobj.name);
/*
- * select table schema to ensure regproc name is qualified if
- * needed
+ * select table schema to ensure regproc name is qualified if needed
*/
selectSourceSchema(tbinfo->dobj.namespace->dobj.name);
if (g_fout->remoteVersion >= 70300)
{
/*
- * We ignore triggers that are tied to a foreign-key
- * constraint
+ * We ignore triggers that are tied to a foreign-key constraint
*/
appendPQExpBuffer(query,
"SELECT tgname, "
"tgfoid::pg_catalog.regproc as tgfname, "
"tgtype, tgnargs, tgargs, tgenabled, "
- "tgisconstraint, tgconstrname, tgdeferrable, "
- "tgconstrrelid, tginitdeferred, tableoid, oid, "
- "tgconstrrelid::pg_catalog.regclass as tgconstrrelname "
+ "tgisconstraint, tgconstrname, tgdeferrable, "
+ "tgconstrrelid, tginitdeferred, tableoid, oid, "
+ "tgconstrrelid::pg_catalog.regclass as tgconstrrelname "
"from pg_catalog.pg_trigger t "
"where tgrelid = '%u'::pg_catalog.oid "
"and (not tgisconstraint "
else if (g_fout->remoteVersion >= 70100)
{
appendPQExpBuffer(query,
- "SELECT tgname, tgfoid::regproc as tgfname, "
+ "SELECT tgname, tgfoid::regproc as tgfname, "
"tgtype, tgnargs, tgargs, tgenabled, "
- "tgisconstraint, tgconstrname, tgdeferrable, "
- "tgconstrrelid, tginitdeferred, tableoid, oid, "
- "(select relname from pg_class where oid = tgconstrrelid) "
+ "tgisconstraint, tgconstrname, tgdeferrable, "
+ "tgconstrrelid, tginitdeferred, tableoid, oid, "
+ "(select relname from pg_class where oid = tgconstrrelid) "
" as tgconstrrelname "
"from pg_trigger "
"where tgrelid = '%u'::oid",
else
{
appendPQExpBuffer(query,
- "SELECT tgname, tgfoid::regproc as tgfname, "
+ "SELECT tgname, tgfoid::regproc as tgfname, "
"tgtype, tgnargs, tgargs, tgenabled, "
- "tgisconstraint, tgconstrname, tgdeferrable, "
+ "tgisconstraint, tgconstrname, tgdeferrable, "
"tgconstrrelid, tginitdeferred, "
"(SELECT oid FROM pg_class WHERE relname = 'pg_trigger') AS tableoid, "
"oid, "
- "(select relname from pg_class where oid = tgconstrrelid) "
+ "(select relname from pg_class where oid = tgconstrrelid) "
" as tgconstrrelname "
"from pg_trigger "
"where tgrelid = '%u'::oid",
"FROM pg_type t1, pg_type t2, pg_proc p "
"WHERE p.pronargs = 1 AND "
"p.proargtypes[0] = t1.oid AND "
- "p.prorettype = t2.oid AND p.proname = t2.typname "
+ "p.prorettype = t2.oid AND p.proname = t2.typname "
"ORDER BY 3,4");
}
castinfo[i].castcontext = *(PQgetvalue(res, i, i_castcontext));
/*
- * Try to name cast as concatenation of typnames. This is only
- * used for purposes of sorting. If we fail to find either type,
- * the name will be an empty string.
+ * Try to name cast as concatenation of typnames. This is only used
+ * for purposes of sorting. If we fail to find either type, the name
+ * will be an empty string.
*/
initPQExpBuffer(&namebuf);
sTypeInfo = findTypeByOid(castinfo[i].castsource);
/* find all the user attributes and their types */
/*
- * we must read the attribute names in attribute number order!
- * because we will use the attnum to index into the attnames array
- * later. We actually ask to order by "attrelid, attnum" because
- * (at least up to 7.3) the planner is not smart enough to realize
- * it needn't re-sort the output of an indexscan on
- * pg_attribute_relid_attnum_index.
+ * we must read the attribute names in attribute number order! because
+ * we will use the attnum to index into the attnames array later. We
+ * actually ask to order by "attrelid, attnum" because (at least up to
+ * 7.3) the planner is not smart enough to realize it needn't re-sort
+ * the output of an indexscan on pg_attribute_relid_attnum_index.
*/
if (g_verbose)
write_msg(NULL, "finding the columns and types of table \"%s\"\n",
{
/* need left join here to not fail on dropped columns ... */
appendPQExpBuffer(q, "SELECT a.attnum, a.attname, a.atttypmod, a.attstattarget, a.attstorage, t.typstorage, "
- "a.attnotnull, a.atthasdef, a.attisdropped, a.attislocal, "
- "pg_catalog.format_type(t.oid,a.atttypmod) as atttypname "
- "from pg_catalog.pg_attribute a left join pg_catalog.pg_type t "
+ "a.attnotnull, a.atthasdef, a.attisdropped, a.attislocal, "
+ "pg_catalog.format_type(t.oid,a.atttypmod) as atttypname "
+ "from pg_catalog.pg_attribute a left join pg_catalog.pg_type t "
"on a.atttypid = t.oid "
"where a.attrelid = '%u'::pg_catalog.oid "
"and a.attnum > 0::pg_catalog.int2 "
else if (g_fout->remoteVersion >= 70100)
{
/*
- * attstattarget doesn't exist in 7.1. It does exist in 7.2,
- * but we don't dump it because we can't tell whether it's
- * been explicitly set or was just a default.
+ * attstattarget doesn't exist in 7.1. It does exist in 7.2, but
+ * we don't dump it because we can't tell whether it's been
+ * explicitly set or was just a default.
*/
appendPQExpBuffer(q, "SELECT a.attnum, a.attname, a.atttypmod, -1 as attstattarget, a.attstorage, t.typstorage, "
"a.attnotnull, a.atthasdef, false as attisdropped, false as attislocal, "
- "format_type(t.oid,a.atttypmod) as atttypname "
+ "format_type(t.oid,a.atttypmod) as atttypname "
"from pg_attribute a left join pg_type t "
"on a.atttypid = t.oid "
"where a.attrelid = '%u'::oid "
if (g_fout->remoteVersion >= 70300)
{
appendPQExpBuffer(q, "SELECT tableoid, oid, adnum, "
- "pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc "
+ "pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc "
"FROM pg_catalog.pg_attrdef "
"WHERE adrelid = '%u'::pg_catalog.oid",
tbinfo->dobj.catId.oid);
attrdefs[j].dobj.namespace = tbinfo->dobj.namespace;
/*
- * Defaults on a VIEW must always be dumped as separate
- * ALTER TABLE commands. Defaults on regular tables are
- * dumped as part of the CREATE TABLE if possible. To
- * check if it's safe, we mark the default as needing to
- * appear before the CREATE.
+ * Defaults on a VIEW must always be dumped as separate ALTER
+ * TABLE commands. Defaults on regular tables are dumped as
+ * part of the CREATE TABLE if possible. To check if it's
+ * safe, we mark the default as needing to appear before the
+ * CREATE.
*/
if (tbinfo->relkind == RELKIND_VIEW)
{
if (g_fout->remoteVersion >= 70400)
{
appendPQExpBuffer(q, "SELECT tableoid, oid, conname, "
- "pg_catalog.pg_get_constraintdef(oid) AS consrc "
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc "
"FROM pg_catalog.pg_constraint "
"WHERE conrelid = '%u'::pg_catalog.oid "
" AND contype = 'c' "
constrs[j].conindex = 0;
constrs[j].coninherited = false;
constrs[j].separate = false;
+
/*
- * Mark the constraint as needing to appear before the
- * table --- this is so that any other dependencies of
- * the constraint will be emitted before we try to create
- * the table.
+ * Mark the constraint as needing to appear before the table
+ * --- this is so that any other dependencies of the
+ * constraint will be emitted before we try to create the
+ * table.
*/
addObjectDependency(&tbinfo->dobj,
constrs[j].dobj.dumpId);
}
/*
- * Check to see if any columns are serial columns. Our first
- * quick filter is that it must be integer or bigint with a
- * default. If so, we scan to see if we found a sequence linked
- * to this column. If we did, mark the column and sequence
- * appropriately.
+ * Check to see if any columns are serial columns. Our first quick
+ * filter is that it must be integer or bigint with a default. If so,
+ * we scan to see if we found a sequence linked to this column. If we
+ * did, mark the column and sequence appropriately.
*/
for (j = 0; j < ntups; j++)
{
/*
* Note assumption that format_type will show these types as
- * exactly "integer" and "bigint" regardless of schema path.
- * This is correct in 7.3 but needs to be watched.
+ * exactly "integer" and "bigint" regardless of schema path. This
+ * is correct in 7.3 but needs to be watched.
*/
if (strcmp(tbinfo->atttypnames[j], "integer") != 0 &&
strcmp(tbinfo->atttypnames[j], "bigint") != 0)
ncomments = collectComments(fout, &comments);
/*
- * Pre-7.2, pg_description does not contain classoid, so
- * collectComments just stores a zero. If there's a collision on
- * object OID, well, you get duplicate comments.
+ * Pre-7.2, pg_description does not contain classoid, so collectComments
+ * just stores a zero. If there's a collision on object OID, well, you
+ * get duplicate comments.
*/
if (fout->remoteVersion < 70200)
classoid = 0;
/*
* Now determine how many items match the object. The search loop
- * invariant still holds: only items between low and high inclusive
- * could match.
+ * invariant still holds: only items between low and high inclusive could
+ * match.
*/
nmatch = 1;
while (middle > low)
ArchiveEntry(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId,
nspinfo->dobj.name,
- NULL, NULL,
+ NULL, NULL,
nspinfo->rolname,
false, "SCHEMA", q->data, delq->data, NULL,
nspinfo->dobj.dependencies, nspinfo->dobj.nDeps,
typstorage = PQgetvalue(res, 0, PQfnumber(res, "typstorage"));
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delq, "DROP TYPE %s.",
fmtId(tinfo->dobj.namespace->dobj.name));
/* Fetch domain specific details */
/* We assume here that remoteVersion must be at least 70300 */
appendPQExpBuffer(query, "SELECT typnotnull, "
- "pg_catalog.format_type(typbasetype, typtypmod) as typdefn, "
+ "pg_catalog.format_type(typbasetype, typtypmod) as typdefn, "
"typdefault "
"FROM pg_catalog.pg_type "
"WHERE oid = '%u'::pg_catalog.oid",
if (!domcheck->separate)
appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s",
- fmtId(domcheck->dobj.name), domcheck->condef);
+ fmtId(domcheck->dobj.name), domcheck->condef);
}
appendPQExpBuffer(q, ";\n");
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delq, "DROP DOMAIN %s.",
fmtId(tinfo->dobj.namespace->dobj.name));
/* We assume here that remoteVersion must be at least 70300 */
appendPQExpBuffer(query, "SELECT a.attname, "
- "pg_catalog.format_type(a.atttypid, a.atttypmod) as atttypdefn "
- "FROM pg_catalog.pg_type t, pg_catalog.pg_attribute a "
+ "pg_catalog.format_type(a.atttypid, a.atttypmod) as atttypdefn "
+ "FROM pg_catalog.pg_type t, pg_catalog.pg_attribute a "
"WHERE t.oid = '%u'::pg_catalog.oid "
"AND a.attrelid = t.typrelid "
"AND NOT a.attisdropped "
appendPQExpBuffer(q, "\n);\n");
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delq, "DROP TYPE %s.",
fmtId(tinfo->dobj.namespace->dobj.name));
*
* For some backwards compatibility with the older behavior, we forcibly
* dump a PL if its handler function (and validator if any) are in a
- * dumpable namespace. That case is not checked here.
+ * dumpable namespace. That case is not checked here.
*/
static bool
shouldDumpProcLangs(void)
return;
/*
- * Try to find the support function(s). It is not an error if we
- * don't find them --- if the functions are in the pg_catalog schema,
- * as is standard in 8.1 and up, then we won't have loaded them.
- * (In this case we will emit a parameterless CREATE LANGUAGE command,
- * which will require PL template knowledge in the backend to reload.)
+ * Try to find the support function(s). It is not an error if we don't
+ * find them --- if the functions are in the pg_catalog schema, as is
+ * standard in 8.1 and up, then we won't have loaded them. (In this case
+ * we will emit a parameterless CREATE LANGUAGE command, which will
+ * require PL template knowledge in the backend to reload.)
*/
funcInfo = findFuncByOid(plang->lanplcallfoid);
/*
* If the functions are dumpable then emit a traditional CREATE LANGUAGE
- * with parameters. Otherwise, dump only if shouldDumpProcLangs() says
- * to dump it.
+ * with parameters. Otherwise, dump only if shouldDumpProcLangs() says to
+ * dump it.
*/
useParams = (funcInfo != NULL &&
(validatorInfo != NULL || !OidIsValid(plang->lanvalidator)));
qlanname = strdup(fmtId(plang->dobj.name));
/*
- * If dumping a HANDLER clause, treat the language as being in the
- * handler function's schema; this avoids cluttering the HANDLER clause.
- * Otherwise it doesn't really have a schema.
+ * If dumping a HANDLER clause, treat the language as being in the handler
+ * function's schema; this avoids cluttering the HANDLER clause. Otherwise
+ * it doesn't really have a schema.
*/
if (useParams)
lanschema = funcInfo->dobj.namespace->dobj.name;
/* Cope with possibility that validator is in different schema */
if (validatorInfo->dobj.namespace != funcInfo->dobj.namespace)
appendPQExpBuffer(defqry, "%s.",
- fmtId(validatorInfo->dobj.namespace->dobj.name));
+ fmtId(validatorInfo->dobj.namespace->dobj.name));
appendPQExpBuffer(defqry, "%s",
fmtId(validatorInfo->dobj.name));
}
"null as proallargtypes, "
"null as proargmodes, "
"null as proargnames, "
- "case when proiscachable then 'i' else 'v' end as provolatile, "
+ "case when proiscachable then 'i' else 'v' end as provolatile, "
"proisstrict, "
"'f'::boolean as prosecdef, "
- "(SELECT lanname FROM pg_language WHERE oid = prolang) as lanname "
+ "(SELECT lanname FROM pg_language WHERE oid = prolang) as lanname "
"FROM pg_proc "
"WHERE oid = '%u'::oid",
finfo->dobj.catId.oid);
"null as proallargtypes, "
"null as proargmodes, "
"null as proargnames, "
- "case when proiscachable then 'i' else 'v' end as provolatile, "
+ "case when proiscachable then 'i' else 'v' end as provolatile, "
"'f'::boolean as proisstrict, "
"'f'::boolean as prosecdef, "
- "(SELECT lanname FROM pg_language WHERE oid = prolang) as lanname "
+ "(SELECT lanname FROM pg_language WHERE oid = prolang) as lanname "
"FROM pg_proc "
"WHERE oid = '%u'::oid",
finfo->dobj.catId.oid);
funcsig_tag = format_function_signature(finfo, false);
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delqry, "DROP FUNCTION %s.%s;\n",
fmtId(finfo->dobj.namespace->dobj.name),
/*
* As per discussion we dump casts if one or more of the underlying
* objects (the conversion function and the two data types) are not
- * builtin AND if all of the non-builtin objects namespaces are
- * included in the dump. Builtin meaning, the namespace name does not
- * start with "pg_".
+ * builtin AND if all of the non-builtin objects namespaces are included
+ * in the dump. Builtin meaning, the namespace name does not start with
+ * "pg_".
*/
sourceInfo = findTypeByOid(cast->castsource);
targetInfo = findTypeByOid(cast->casttarget);
return;
/*
- * Skip cast if function isn't from pg_ and that namespace is not
- * dumped.
+ * Skip cast if function isn't from pg_ and that namespace is not dumped.
*/
if (funcInfo &&
strncmp(funcInfo->dobj.namespace->dobj.name, "pg_", 3) != 0 &&
else
{
/*
- * Always qualify the function name, in case it is not in
- * pg_catalog schema (format_function_signature won't qualify it).
+ * Always qualify the function name, in case it is not in pg_catalog
+ * schema (format_function_signature won't qualify it).
*/
appendPQExpBuffer(defqry, "WITH FUNCTION %s.",
fmtId(funcInfo->dobj.namespace->dobj.name));
{
appendPQExpBuffer(query, "SELECT oprkind, oprcode, "
"CASE WHEN oprleft = 0 THEN '-' "
- "ELSE format_type(oprleft, NULL) END as oprleft, "
+ "ELSE format_type(oprleft, NULL) END as oprleft, "
"CASE WHEN oprright = 0 THEN '-' "
- "ELSE format_type(oprright, NULL) END as oprright, "
+ "ELSE format_type(oprright, NULL) END as oprright, "
"oprcom, oprnegate, oprrest, oprjoin, "
"oprcanhash, oprlsortop, oprrsortop, "
"0 as oprltcmpop, 0 as oprgtcmpop "
appendPQExpBuffer(details, ",\n GTCMP = %s", name);
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delq, "DROP OPERATOR %s.%s;\n",
fmtId(oprinfo->dobj.namespace->dobj.name),
ArchiveEntry(fout, oprinfo->dobj.catId, oprinfo->dobj.dumpId,
oprinfo->dobj.name,
- oprinfo->dobj.namespace->dobj.name,
+ oprinfo->dobj.namespace->dobj.name,
NULL,
oprinfo->rolname,
false, "OPERATOR", q->data, delq->data, NULL,
appendPQExpBuffer(query, "SELECT opcintype::pg_catalog.regtype, "
"opckeytype::pg_catalog.regtype, "
"opcdefault, "
- "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcamid) AS amname "
+ "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcamid) AS amname "
"FROM pg_catalog.pg_opclass "
"WHERE oid = '%u'::pg_catalog.oid",
opcinfo->dobj.catId.oid);
amname = strdup(PQgetvalue(res, 0, i_amname));
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delq, "DROP OPERATOR CLASS %s",
fmtId(opcinfo->dobj.namespace->dobj.name));
ArchiveEntry(fout, opcinfo->dobj.catId, opcinfo->dobj.dumpId,
opcinfo->dobj.name,
- opcinfo->dobj.namespace->dobj.name,
+ opcinfo->dobj.namespace->dobj.name,
NULL,
opcinfo->rolname,
false, "OPERATOR CLASS", q->data, delq->data, NULL,
/* Get conversion-specific details */
appendPQExpBuffer(query, "SELECT conname, "
- "pg_catalog.pg_encoding_to_char(conforencoding) AS conforencoding, "
- "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, "
+ "pg_catalog.pg_encoding_to_char(conforencoding) AS conforencoding, "
+ "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, "
"conproc, condefault "
"FROM pg_catalog.pg_conversion c "
"WHERE c.oid = '%u'::pg_catalog.oid",
condefault = (PQgetvalue(res, 0, i_condefault)[0] == 't');
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delq, "DROP CONVERSION %s",
fmtId(convinfo->dobj.namespace->dobj.name));
ArchiveEntry(fout, convinfo->dobj.catId, convinfo->dobj.dumpId,
convinfo->dobj.name,
- convinfo->dobj.namespace->dobj.name,
- NULL,
+ convinfo->dobj.namespace->dobj.name,
+ NULL,
convinfo->rolname,
false, "CONVERSION", q->data, delq->data, NULL,
convinfo->dobj.dependencies, convinfo->dobj.nDeps,
"aggsortop::pg_catalog.regoperator, "
"agginitval, "
"proargtypes[0] = 'pg_catalog.\"any\"'::pg_catalog.regtype as anybasetype, "
- "proargtypes[0]::pg_catalog.regtype as fmtbasetype, "
+ "proargtypes[0]::pg_catalog.regtype as fmtbasetype, "
"'t'::boolean as convertok "
- "from pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
+ "from pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
"where a.aggfnoid = p.oid "
"and p.oid = '%u'::pg_catalog.oid",
agginfo->aggfn.dobj.catId.oid);
"0 as aggsortop, "
"agginitval, "
"proargtypes[0] = 'pg_catalog.\"any\"'::pg_catalog.regtype as anybasetype, "
- "proargtypes[0]::pg_catalog.regtype as fmtbasetype, "
+ "proargtypes[0]::pg_catalog.regtype as fmtbasetype, "
"'t'::boolean as convertok "
- "from pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
+ "from pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
"where a.aggfnoid = p.oid "
"and p.oid = '%u'::pg_catalog.oid",
agginfo->aggfn.dobj.catId.oid);
else if (g_fout->remoteVersion >= 70100)
{
appendPQExpBuffer(query, "SELECT aggtransfn, aggfinalfn, "
- "format_type(aggtranstype, NULL) as aggtranstype, "
+ "format_type(aggtranstype, NULL) as aggtranstype, "
"0 as aggsortop, "
"agginitval, "
"aggbasetype = 0 as anybasetype, "
"CASE WHEN aggbasetype = 0 THEN '-' "
- "ELSE format_type(aggbasetype, NULL) END as fmtbasetype, "
+ "ELSE format_type(aggbasetype, NULL) END as fmtbasetype, "
"'t'::boolean as convertok "
"from pg_aggregate "
"where oid = '%u'::oid",
}
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
resetPQExpBuffer(q);
appendPQExpBuffer(q, "AGGREGATE %s", aggsig);
dumpComment(fout, q->data,
- agginfo->aggfn.dobj.namespace->dobj.name, agginfo->aggfn.rolname,
+ agginfo->aggfn.dobj.namespace->dobj.name, agginfo->aggfn.rolname,
agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
/*
- * Since there is no GRANT ON AGGREGATE syntax, we have to make the
- * ACL command look like a function's GRANT; in particular this
- * affects the syntax for aggregates on ANY.
+ * Since there is no GRANT ON AGGREGATE syntax, we have to make the ACL
+ * command look like a function's GRANT; in particular this affects the
+ * syntax for aggregates on ANY.
*/
free(aggsig);
free(aggsig_tag);
if (sql->len > 0)
ArchiveEntry(fout, nilCatalogId, createDumpId(),
tag, nspname,
- NULL,
+ NULL,
owner ? owner : "",
false, "ACL", sql->data, "", NULL,
&(objDumpId), 1,
}
/*
- * Default value --- suppress if inherited, serial, or to
- * be printed separately.
+ * Default value --- suppress if inherited, serial, or to be
+ * printed separately.
*/
if (tbinfo->attrdefs[j] != NULL &&
!tbinfo->inhAttrDef[j] &&
/*
* Not Null constraint --- suppress if inherited
*
- * Note: we could suppress this for serial columns since
- * SERIAL implies NOT NULL. We choose not to for forward
+ * Note: we could suppress this for serial columns since SERIAL
+ * implies NOT NULL. We choose not to for forward
* compatibility, since there has been some talk of making
* SERIAL not imply NOT NULL, in which case the explicit
* specification would be needed.
appendPQExpBuffer(q, ", ");
if (parentRel->dobj.namespace != tbinfo->dobj.namespace)
appendPQExpBuffer(q, "%s.",
- fmtId(parentRel->dobj.namespace->dobj.name));
+ fmtId(parentRel->dobj.namespace->dobj.name));
appendPQExpBuffer(q, "%s",
fmtId(parentRel->dobj.name));
}
for (j = 0; j < tbinfo->numatts; j++)
{
/*
- * Dump per-column statistics information. We only issue an
- * ALTER TABLE statement if the attstattarget entry for this
- * column is non-negative (i.e. it's not the default value)
+ * Dump per-column statistics information. We only issue an ALTER
+ * TABLE statement if the attstattarget entry for this column is
+ * non-negative (i.e. it's not the default value)
*/
if (tbinfo->attstattarget[j] >= 0 &&
!tbinfo->attisdropped[j])
/*
* Dump per-column storage information. The statement is only
- * dumped if the storage has been changed from the type's
- * default.
+ * dumped if the storage has been changed from the type's default.
*/
if (!tbinfo->attisdropped[j] && tbinfo->attstorage[j] != tbinfo->typstorage[j])
{
}
/*
- * Only dump the statement if it's a storage type we
- * recognize
+ * Only dump the statement if it's a storage type we recognize
*/
if (storage != NULL)
{
ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name,
- (tbinfo->relkind == RELKIND_VIEW) ? NULL : tbinfo->reltablespace,
+ (tbinfo->relkind == RELKIND_VIEW) ? NULL : tbinfo->reltablespace,
tbinfo->rolname,
- (strcmp(reltypename, "TABLE") == 0) ? tbinfo->hasoids : false,
+ (strcmp(reltypename, "TABLE") == 0) ? tbinfo->hasoids : false,
reltypename, q->data, delq->data, NULL,
tbinfo->dobj.dependencies, tbinfo->dobj.nDeps,
NULL, NULL);
adinfo->adef_expr);
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delq, "ALTER TABLE %s.",
fmtId(tbinfo->dobj.namespace->dobj.name));
ArchiveEntry(fout, adinfo->dobj.catId, adinfo->dobj.dumpId,
tbinfo->attnames[adnum - 1],
- tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "DEFAULT", q->data, delq->data, NULL,
delq = createPQExpBuffer();
/*
- * If there's an associated constraint, don't dump the index per se,
- * but do dump any comment for it. (This is safe because dependency
- * ordering will have ensured the constraint is emitted first.)
+ * If there's an associated constraint, don't dump the index per se, but
+ * do dump any comment for it. (This is safe because dependency ordering
+ * will have ensured the constraint is emitted first.)
*/
if (indxinfo->indexconstraint == 0)
{
fmtId(tbinfo->dobj.name));
appendPQExpBuffer(q, " ADD CONSTRAINT %s %s (",
fmtId(coninfo->dobj.name),
- coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
+ coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
for (k = 0; k < indxinfo->indnkeys; k++)
{
else if (coninfo->contype == 'f')
{
/*
- * XXX Potentially wrap in a 'SET CONSTRAINTS OFF' block so that
- * the current table data is not processed
+ * XXX Potentially wrap in a 'SET CONSTRAINTS OFF' block so that the
+ * current table data is not processed
*/
appendPQExpBuffer(q, "ALTER TABLE ONLY %s\n",
fmtId(tbinfo->dobj.name));
tbinfo->dobj.namespace->dobj.name,
tbinfo->rolname,
coninfo->dobj.catId, 0,
- coninfo->separate ? coninfo->dobj.dumpId : tbinfo->dobj.dumpId);
+ coninfo->separate ? coninfo->dobj.dumpId : tbinfo->dobj.dumpId);
destroyPQExpBuffer(q);
}
res = PQexec(g_conn,
"SELECT oid FROM pg_class WHERE relname = 'pg_indexes'");
check_sql_result(res, g_conn,
- "SELECT oid FROM pg_class WHERE relname = 'pg_indexes'",
+ "SELECT oid FROM pg_class WHERE relname = 'pg_indexes'",
PGRES_TUPLES_OK);
ntups = PQntuples(res);
if (ntups < 1)
appendPQExpBuffer(query,
"SELECT sequence_name, last_value, increment_by, "
- "CASE WHEN increment_by > 0 AND max_value = %s THEN NULL "
- " WHEN increment_by < 0 AND max_value = -1 THEN NULL "
+ "CASE WHEN increment_by > 0 AND max_value = %s THEN NULL "
+ " WHEN increment_by < 0 AND max_value = -1 THEN NULL "
" ELSE max_value "
"END AS max_value, "
- "CASE WHEN increment_by > 0 AND min_value = 1 THEN NULL "
- " WHEN increment_by < 0 AND min_value = %s THEN NULL "
+ "CASE WHEN increment_by > 0 AND min_value = 1 THEN NULL "
+ " WHEN increment_by < 0 AND min_value = %s THEN NULL "
" ELSE min_value "
"END AS min_value, "
"cache_value, is_cycled, is_called from %s",
/*
* The logic we use for restoring sequences is as follows:
*
- * Add a basic CREATE SEQUENCE statement (use last_val for start if
- * called is false, else use min_val for start_val). Skip this if the
- * sequence came from a SERIAL column.
+ * Add a basic CREATE SEQUENCE statement (use last_val for start if called is
+ * false, else use min_val for start_val). Skip this if the sequence came
+ * from a SERIAL column.
*
- * Add a 'SETVAL(seq, last_val, iscalled)' at restore-time iff we load
- * data. We do this for serial sequences too.
+ * Add a 'SETVAL(seq, last_val, iscalled)' at restore-time iff we load data.
+ * We do this for serial sequences too.
*/
if (!dataOnly && !OidIsValid(tbinfo->owning_tab))
ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
tbinfo->dobj.name,
- tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
false, "SEQUENCE", query->data, delqry->data, NULL,
appendPQExpBuffer(query, "SELECT pg_catalog.setval(");
/*
- * If this is a SERIAL sequence, then use the
- * pg_get_serial_sequence function to avoid hard-coding the
- * sequence name. Note that this implicitly assumes that the
- * sequence and its owning table are in the same schema, because
- * we don't schema-qualify the reference.
+ * If this is a SERIAL sequence, then use the pg_get_serial_sequence
+ * function to avoid hard-coding the sequence name. Note that this
+ * implicitly assumes that the sequence and its owning table are in
+ * the same schema, because we don't schema-qualify the reference.
*/
if (OidIsValid(tbinfo->owning_tab) &&
(owning_tab = findTableByOid(tbinfo->owning_tab)) != NULL)
delqry = createPQExpBuffer();
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delqry, "DROP TRIGGER %s ",
fmtId(tginfo->dobj.name));
p = tginfo->tgargs;
for (findx = 0; findx < tginfo->tgnargs; findx++)
{
- const char *s = p, *s2 = p;
+ const char *s = p,
+ *s2 = p;
/* Set 'p' to end of arg string. marked by '\000' */
for (;;)
p++;
continue;
}
- if (p[0] == '0' && p[1] == '0' && p[2] == '0') /* is it '\000'? */
+ if (p[0] == '0' && p[1] == '0' && p[2] == '0') /* is it '\000'? */
break;
}
p--;
printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0));
/*
- * DROP must be fully qualified in case same name appears in
- * pg_catalog
+ * DROP must be fully qualified in case same name appears in pg_catalog
*/
appendPQExpBuffer(delcmd, "DROP RULE %s ",
fmtId(rinfo->dobj.name));
/*
* Since we ordered the SELECT by referencing ID, we can expect that
- * multiple entries for the same object will appear together; this
- * saves on searches.
+ * multiple entries for the same object will appear together; this saves
+ * on searches.
*/
dobj = NULL;
dobj = findObjectByCatalogId(objId);
/*
- * Failure to find objects mentioned in pg_depend is not
- * unexpected, since for example we don't collect info about TOAST
- * tables.
+ * Failure to find objects mentioned in pg_depend is not unexpected,
+ * since for example we don't collect info about TOAST tables.
*/
if (dobj == NULL)
{
/*
* Ordinarily, table rowtypes have implicit dependencies on their
- * tables. However, for a composite type the implicit dependency
- * goes the other way in pg_depend; which is the right thing for
- * DROP but it doesn't produce the dependency ordering we need.
- * So in that one case, we reverse the direction of the dependency.
+ * tables. However, for a composite type the implicit dependency goes
+ * the other way in pg_depend; which is the right thing for DROP but
+ * it doesn't produce the dependency ordering we need. So in that one
+ * case, we reverse the direction of the dependency.
*/
if (deptype == 'i' &&
dobj->objType == DO_TABLE &&
refdobj->objType == DO_TYPE)
addObjectDependency(refdobj, dobj->dumpId);
- else /* normal case */
+ else
+ /* normal case */
addObjectDependency(dobj, refdobj->dumpId);
}
}
/*
- * char is an internal single-byte data type; Let's make sure we force
- * it through with quotes. - thomas 1998-12-13
+ * char is an internal single-byte data type; Let's make sure we force it
+ * through with quotes. - thomas 1998-12-13
*/
else if (strcmp(typname, "char") == 0)
appendPQExpBuffer(buf, "\"char\"");
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.121 2005/09/05 23:50:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.122 2005/10/15 02:49:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool dump; /* true if we want to dump it */
/*
- * These fields are computed only if we decide the table is
- * interesting (it's either a table to dump, or a direct parent of a
- * dumpable table).
+ * These fields are computed only if we decide the table is interesting
+ * (it's either a table to dump, or a direct parent of a dumpable table).
*/
int numatts; /* number of attributes */
char **attnames; /* the attribute names */
bool *attisserial; /* true if attr is serial or bigserial */
/*
- * Note: we need to store per-attribute notnull, default, and
- * constraint stuff for all interesting tables so that we can tell
- * which constraints were inherited.
+ * Note: we need to store per-attribute notnull, default, and constraint
+ * stuff for all interesting tables so that we can tell which constraints
+ * were inherited.
*/
bool *notnull; /* Not null constraints on attributes */
struct _attrDefInfo **attrdefs; /* DEFAULT expressions */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump_sort.c,v 1.10 2005/06/30 03:03:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump_sort.c,v 1.11 2005/10/15 02:49:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Sort by namespace. Note that all objects of the same type should
- * either have or not have a namespace link, so we needn't be fancy
- * about cases where one link is null and the other not.
+ * either have or not have a namespace link, so we needn't be fancy about
+ * cases where one link is null and the other not.
*/
if (obj1->namespace && obj2->namespace)
{
k;
/*
- * This is basically the same algorithm shown for topological sorting
- * in Knuth's Volume 1. However, we would like to minimize
- * unnecessary rearrangement of the input ordering; that is, when we
- * have a choice of which item to output next, we always want to take
- * the one highest in the original list. Therefore, instead of
- * maintaining an unordered linked list of items-ready-to-output as
- * Knuth does, we maintain a heap of their item numbers, which we can
- * use as a priority queue. This turns the algorithm from O(N) to O(N
- * log N) because each insertion or removal of a heap item takes O(log
- * N) time. However, that's still plenty fast enough for this
- * application.
+ * This is basically the same algorithm shown for topological sorting in
+ * Knuth's Volume 1. However, we would like to minimize unnecessary
+ * rearrangement of the input ordering; that is, when we have a choice of
+ * which item to output next, we always want to take the one highest in
+ * the original list. Therefore, instead of maintaining an unordered
+ * linked list of items-ready-to-output as Knuth does, we maintain a heap
+ * of their item numbers, which we can use as a priority queue. This
+ * turns the algorithm from O(N) to O(N log N) because each insertion or
+ * removal of a heap item takes O(log N) time. However, that's still
+ * plenty fast enough for this application.
*/
*nOrdering = numObjs; /* for success return */
exit_horribly(NULL, modulename, "out of memory\n");
/*
- * Scan the constraints, and for each item in the input, generate a
- * count of the number of constraints that say it must be before
- * something else. The count for the item with dumpId j is stored in
- * beforeConstraints[j]. We also make a map showing the input-order
- * index of the item with dumpId j.
+ * Scan the constraints, and for each item in the input, generate a count
+ * of the number of constraints that say it must be before something else.
+ * The count for the item with dumpId j is stored in beforeConstraints[j].
+ * We also make a map showing the input-order index of the item with
+ * dumpId j.
*/
beforeConstraints = (int *) malloc((maxDumpId + 1) * sizeof(int));
if (beforeConstraints == NULL)
}
/*
- * Now initialize the heap of items-ready-to-output by filling it with
- * the indexes of items that already have beforeConstraints[id] == 0.
+ * Now initialize the heap of items-ready-to-output by filling it with the
+ * indexes of items that already have beforeConstraints[id] == 0.
*
- * The essential property of a heap is heap[(j-1)/2] >= heap[j] for each
- * j in the range 1..heapLength-1 (note we are using 0-based
- * subscripts here, while the discussion in Knuth assumes 1-based
- * subscripts). So, if we simply enter the indexes into pendingHeap[]
- * in decreasing order, we a-fortiori have the heap invariant
- * satisfied at completion of this loop, and don't need to do any
- * sift-up comparisons.
+ * The essential property of a heap is heap[(j-1)/2] >= heap[j] for each j in
+ * the range 1..heapLength-1 (note we are using 0-based subscripts here,
+ * while the discussion in Knuth assumes 1-based subscripts). So, if we
+ * simply enter the indexes into pendingHeap[] in decreasing order, we
+ * a-fortiori have the heap invariant satisfied at completion of this
+ * loop, and don't need to do any sift-up comparisons.
*/
heapLength = 0;
for (i = numObjs; --i >= 0;)
}
/*
- * If we failed, report the objects that couldn't be output; these are
- * the ones with beforeConstraints[] still nonzero.
+ * If we failed, report the objects that couldn't be output; these are the
+ * ones with beforeConstraints[] still nonzero.
*/
if (i != 0)
{
int j;
/*
- * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth
- * is using 1-based array indexes, not 0-based.
+ * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
+ * using 1-based array indexes, not 0-based.
*/
j = heapLength;
while (j > 0)
{
/*
* We use a workspace array, the initial part of which stores objects
- * already processed, and the rest of which is used as temporary space
- * to try to build a loop in. This is convenient because we do not
- * care about loops involving already-processed objects (see notes
- * above); we can easily reject such loops in findLoop() because of
- * this representation. After we identify and process a loop, we can
- * add it to the initial part of the workspace just by moving the
- * boundary pointer.
+ * already processed, and the rest of which is used as temporary space to
+ * try to build a loop in. This is convenient because we do not care
+ * about loops involving already-processed objects (see notes above); we
+ * can easily reject such loops in findLoop() because of this
+ * representation. After we identify and process a loop, we can add it to
+ * the initial part of the workspace just by moving the boundary pointer.
*
- * When we determine that an object is not part of any interesting loop,
- * we also add it to the initial part of the workspace. This is not
- * necessary for correctness, but saves later invocations of
- * findLoop() from uselessly chasing references to such an object.
+ * When we determine that an object is not part of any interesting loop, we
+ * also add it to the initial part of the workspace. This is not
+ * necessary for correctness, but saves later invocations of findLoop()
+ * from uselessly chasing references to such an object.
*
* We make the workspace large enough to hold all objects in the original
- * universe. This is probably overkill, but it's provably enough
- * space...
+ * universe. This is probably overkill, but it's provably enough space...
*/
DumpableObject **workspace;
int initiallen;
else
{
/*
- * Didn't find a loop, but add this object to workspace
- * anyway, unless it's already present. We piggyback on the
- * test that findLoop() already did: it won't have tentatively
- * added obj to workspace if it's already present.
+ * Didn't find a loop, but add this object to workspace anyway,
+ * unless it's already present. We piggyback on the test that
+ * findLoop() already did: it won't have tentatively added obj to
+ * workspace if it's already present.
*/
if (workspace[initiallen] == obj)
initiallen++;
int i;
/*
- * Reject if obj is already present in workspace. This test serves
- * three purposes: it prevents us from finding loops that overlap
+ * Reject if obj is already present in workspace. This test serves three
+ * purposes: it prevents us from finding loops that overlap
* previously-processed loops, it prevents us from going into infinite
* recursion if we are given a startPoint object that links to a cycle
* it's not a member of, and it guarantees that we can't overflow the
workspace[depth++] = obj;
/*
- * See if we've found a loop back to the desired startPoint; if so,
- * done
+ * See if we've found a loop back to the desired startPoint; if so, done
*/
for (i = 0; i < obj->nDeps; i++)
{
addObjectDependency(funcobj, inputFuncInfo->dobj.dumpId);
/*
- * Make sure the input function's dependency on type gets removed too;
- * if it hasn't been done yet, we'd end up with loops involving the
- * type and two or more functions, which repairDependencyLoop() is not
- * smart enough to handle.
+ * Make sure the input function's dependency on type gets removed too; if
+ * it hasn't been done yet, we'd end up with loops involving the type and
+ * two or more functions, which repairDependencyLoop() is not smart enough
+ * to handle.
*/
removeObjectDependency(&inputFuncInfo->dobj, typeobj->dumpId);
}
}
/*
- * If we can't find a principled way to break the loop, complain and
- * break it in an arbitrary fashion.
+ * If we can't find a principled way to break the loop, complain and break
+ * it in an arbitrary fashion.
*/
write_msg(modulename, "WARNING: could not resolve dependency loop among these items:\n");
for (i = 0; i < nLoop; i++)
* Portions Copyright (c) 1994, Regents of the University of California
*
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.68 2005/10/10 22:29:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.69 2005/10/15 02:49:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int runPgDump(const char *dbname);
static PGconn *connectDatabase(const char *dbname, const char *pghost, const char *pgport,
- const char *pguser, bool require_password, bool fail_on_error);
+ const char *pguser, bool require_password, bool fail_on_error);
static PGresult *executeQuery(PGconn *conn, const char *query);
static void executeCommand(PGconn *conn, const char *query);
-static char pg_dump_bin[MAXPGPATH];
+static char pg_dump_bin[MAXPGPATH];
static PQExpBuffer pgdumpopts;
-static bool output_clean = false;
-static bool skip_acls = false;
-static bool verbose = false;
-static bool ignoreVersion = false;
+static bool output_clean = false;
+static bool skip_acls = false;
+static bool verbose = false;
+static bool ignoreVersion = false;
+
/* flags for -X long options */
-static int disable_dollar_quoting = 0;
-static int disable_triggers = 0;
-static int use_setsessauth = 0;
-static int server_version;
+static int disable_dollar_quoting = 0;
+static int disable_triggers = 0;
+static int use_setsessauth = 0;
+static int server_version;
int
{"no-acl", no_argument, NULL, 'x'},
/*
- * the following options don't have an equivalent short option
- * letter, but are available as '-X long-name'
+ * the following options don't have an equivalent short option letter,
+ * but are available as '-X long-name'
*/
{"disable-dollar-quoting", no_argument, &disable_dollar_quoting, 1},
{"disable-triggers", no_argument, &disable_triggers, 1},
if ((ret = find_other_exec(argv[0], "pg_dump", PG_VERSIONSTR,
pg_dump_bin)) < 0)
{
- char full_path[MAXPGPATH];
+ char full_path[MAXPGPATH];
if (find_my_exec(argv[0], full_path) < 0)
StrNCpy(full_path, progname, MAXPGPATH);
#ifndef WIN32
appendPQExpBuffer(pgdumpopts, " -h '%s'", pghost);
#else
- appendPQExpBuffer(pgdumpopts, " -h \"%s\"", pghost);
+ appendPQExpBuffer(pgdumpopts, " -h \"%s\"", pghost);
#endif
break;
#ifndef WIN32
appendPQExpBuffer(pgdumpopts, " -p '%s'", pgport);
#else
- appendPQExpBuffer(pgdumpopts, " -p \"%s\"", pgport);
+ appendPQExpBuffer(pgdumpopts, " -p \"%s\"", pgport);
#endif
break;
#ifndef WIN32
appendPQExpBuffer(pgdumpopts, " -S '%s'", optarg);
#else
- appendPQExpBuffer(pgdumpopts, " -S \"%s\"", optarg);
+ appendPQExpBuffer(pgdumpopts, " -S \"%s\"", optarg);
#endif
break;
#ifndef WIN32
appendPQExpBuffer(pgdumpopts, " -U '%s'", pguser);
#else
- appendPQExpBuffer(pgdumpopts, " -U \"%s\"", pguser);
+ appendPQExpBuffer(pgdumpopts, " -U \"%s\"", pguser);
#endif
break;
appendPQExpBuffer(buf, "DROP ROLE %s;\n", fmtId(rolename));
/*
- * We dump CREATE ROLE followed by ALTER ROLE to ensure that the
- * role will acquire the right properties even if it already exists.
- * (The above DROP may therefore seem redundant, but it isn't really,
+ * We dump CREATE ROLE followed by ALTER ROLE to ensure that the role
+ * will acquire the right properties even if it already exists. (The
+ * above DROP may therefore seem redundant, but it isn't really,
* because this technique doesn't get rid of role memberships.)
*/
appendPQExpBuffer(buf, "CREATE ROLE %s;\n", fmtId(rolename));
int j;
printfPQExpBuffer(buf,
- "SELECT usename FROM pg_shadow WHERE usesysid = %s",
+ "SELECT usename FROM pg_shadow WHERE usesysid = %s",
tok);
res2 = executeQuery(conn, buf->data);
"pg_encoding_to_char(d.encoding), "
"datistemplate, datacl, datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
- "FROM pg_database d LEFT JOIN pg_authid u ON (datdba = u.oid) "
+ "FROM pg_database d LEFT JOIN pg_authid u ON (datdba = u.oid) "
"WHERE datallowconn ORDER BY 1");
else if (server_version >= 80000)
res = executeQuery(conn,
"pg_encoding_to_char(d.encoding), "
"datistemplate, datacl, -1 as datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
- "FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
+ "FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
"WHERE datallowconn ORDER BY 1");
else if (server_version >= 70300)
res = executeQuery(conn,
"pg_encoding_to_char(d.encoding), "
"datistemplate, datacl, -1 as datconnlimit, "
"'pg_default' AS dattablespace "
- "FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
+ "FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
"WHERE datallowconn ORDER BY 1");
else if (server_version >= 70100)
res = executeQuery(conn,
"SELECT datname, "
"coalesce("
- "(select usename from pg_shadow where usesysid=datdba), "
+ "(select usename from pg_shadow where usesysid=datdba), "
"(select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
"pg_encoding_to_char(d.encoding), "
"datistemplate, '' as datacl, -1 as datconnlimit, "
else
{
/*
- * Note: 7.0 fails to cope with sub-select in COALESCE, so just
- * deal with getting a NULL by not printing any OWNER clause.
+ * Note: 7.0 fails to cope with sub-select in COALESCE, so just deal
+ * with getting a NULL by not printing any OWNER clause.
*/
res = executeQuery(conn,
"SELECT datname, "
- "(select usename from pg_shadow where usesysid=datdba), "
+ "(select usename from pg_shadow where usesysid=datdba), "
"pg_encoding_to_char(d.encoding), "
"'f' as datistemplate, "
"'' as datacl, -1 as datconnlimit, "
appendPQExpBuffer(buf, "SET %s TO ", fmtId(mine));
/*
- * Some GUC variable names are 'LIST' type and hence must not be
- * quoted.
+ * Some GUC variable names are 'LIST' type and hence must not be quoted.
*/
if (pg_strcasecmp(mine, "DateStyle") == 0
|| pg_strcasecmp(mine, "search_path") == 0)
/*
* Win32 has to use double-quotes for args, rather than single quotes.
- * Strangely enough, this is the only place we pass a database name on
- * the command line, except "postgres" which doesn't need quoting.
+ * Strangely enough, this is the only place we pass a database name on the
+ * command line, except "postgres" which doesn't need quoting.
*/
#ifndef WIN32
appendPQExpBuffer(cmd, "%s\"%s\" %s -Fp '", SYSTEMQUOTE, pg_dump_bin,
password = simple_prompt("Password: ", 100, false);
/*
- * Start the connection. Loop until we have a password if requested
- * by backend.
+ * Start the connection. Loop until we have a password if requested by
+ * backend.
*/
do
{
}
if (my_version != server_version
- && (server_version < 70000 /* we can handle back to 7.0 */
+ && (server_version < 70000 /* we can handle back to 7.0 */
|| server_version > my_version))
{
fprintf(stderr, _("server version: %s; %s version: %s\n"),
}
/*
- * On 7.3 and later, make sure we are not fooled by non-system schemas
- * in the search path.
+ * On 7.3 and later, make sure we are not fooled by non-system schemas in
+ * the search path.
*/
if (server_version >= 70300)
executeCommand(conn, "SET search_path = pg_catalog");
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_restore.c,v 1.72 2005/09/28 13:11:26 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_restore.c,v 1.73 2005/10/15 02:49:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{"verbose", 0, NULL, 'v'},
/*
- * the following options don't have an equivalent short option
- * letter, but are available as '-X long-name'
+ * the following options don't have an equivalent short option letter,
+ * but are available as '-X long-name'
*/
{"use-set-session-authorization", no_argument, &use_setsessauth, 1},
{"disable-triggers", no_argument, &disable_triggers, 1},
case 'a': /* Dump data only */
opts->dataOnly = 1;
break;
- case 'c': /* clean (i.e., drop) schema prior to
- * create */
+ case 'c': /* clean (i.e., drop) schema prior to create */
opts->dropSchema = 1;
break;
case 'C':
AH->verbose = opts->verbose;
/*
- * Whether to keep submitting sql commands as "pg_restore ... | psql
- * ... "
+ * Whether to keep submitting sql commands as "pg_restore ... | psql ... "
*/
AH->exit_on_error = opts->exit_on_error;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.37 2005/10/03 00:28:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.38 2005/10/15 02:49:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool noupdate = false;
TransactionId set_xid = 0;
Oid set_oid = 0;
- MultiXactId set_mxid = 0;
+ MultiXactId set_mxid = 0;
MultiXactOffset set_mxoff = -1;
uint32 minXlogTli = 0,
minXlogId = 0,
exit(1);
}
- /*
- * Don't allow pg_resetxlog to be run as root, to avoid
- * overwriting the ownership of files in the data directory. We
- * need only check for root -- any other user won't have
- * sufficient permissions to modify files in the data directory.
+ /*
+ * Don't allow pg_resetxlog to be run as root, to avoid overwriting the
+ * ownership of files in the data directory. We need only check for root
+ * -- any other user won't have sufficient permissions to modify files in
+ * the data directory.
*/
#ifndef WIN32
#ifndef __BEOS__ /* no root check on BeOS */
/*
* Check for a postmaster lock file --- if there is one, refuse to
- * proceed, on grounds we might be interfering with a live
- * installation.
+ * proceed, on grounds we might be interfering with a live installation.
*/
snprintf(path, MAXPGPATH, "%s/postmaster.pid", DataDir);
GuessControlValues();
/*
- * Adjust fields if required by switches. (Do this now so that
- * printout, if any, includes these values.)
+ * Adjust fields if required by switches. (Do this now so that printout,
+ * if any, includes these values.)
*/
if (set_xid != 0)
ControlFile.checkPointCopy.nextXid = set_xid;
if (ControlFile.state != DB_SHUTDOWNED && !force)
{
printf(_("The database server was not shut down cleanly.\n"
- "Resetting the transaction log may cause data to be lost.\n"
- "If you want to proceed anyway, use -f to force reset.\n"));
+ "Resetting the transaction log may cause data to be lost.\n"
+ "If you want to proceed anyway, use -f to force reset.\n"));
exit(1);
}
if ((fd = open(XLOG_CONTROL_FILE, O_RDONLY)) < 0)
{
/*
- * If pg_control is not there at all, or we can't read it, the
- * odds are we've been handed a bad DataDir path, so give up. User
- * can do "touch pg_control" to force us to proceed.
+ * If pg_control is not there at all, or we can't read it, the odds
+ * are we've been handed a bad DataDir path, so give up. User can do
+ * "touch pg_control" to force us to proceed.
*/
fprintf(stderr, _("%s: could not open file \"%s\" for reading: %s\n"),
progname, XLOG_CONTROL_FILE, strerror(errno));
close(fd);
if (len >= sizeof(ControlFileData) &&
- ((ControlFileData *) buffer)->pg_control_version == PG_CONTROL_VERSION)
+ ((ControlFileData *) buffer)->pg_control_version == PG_CONTROL_VERSION)
{
/* Check the CRC. */
INIT_CRC32(crc);
ControlFile.catalog_version_no = CATALOG_VERSION_NO;
/*
- * Create a new unique installation identifier, since we can no longer
- * use any old XLOG records. See notes in xlog.c about the algorithm.
+ * Create a new unique installation identifier, since we can no longer use
+ * any old XLOG records. See notes in xlog.c about the algorithm.
*/
gettimeofday(&tv, NULL);
sysidentifier = ((uint64) tv.tv_sec) << 32;
StrNCpy(ControlFile.lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
/*
- * XXX eventually, should try to grovel through old XLOG to develop
- * more accurate values for TimeLineID, nextXID, etc.
+ * XXX eventually, should try to grovel through old XLOG to develop more
+ * accurate values for TimeLineID, nextXID, etc.
*/
}
printf(_("pg_control values:\n\n"));
/*
- * Format system_identifier separately to keep platform-dependent
- * format code out of the translatable message string.
+ * Format system_identifier separately to keep platform-dependent format
+ * code out of the translatable message string.
*/
snprintf(sysident_str, sizeof(sysident_str), UINT64_FORMAT,
ControlFile.system_identifier);
FIN_CRC32(ControlFile.crc);
/*
- * We write out BLCKSZ bytes into pg_control, zero-padding the excess
- * over sizeof(ControlFileData). This reduces the odds of
- * premature-EOF errors when reading pg_control. We'll still fail
- * when we check the contents of the file, but hopefully with a more
- * specific error than "couldn't read pg_control".
+ * We write out BLCKSZ bytes into pg_control, zero-padding the excess over
+ * sizeof(ControlFileData). This reduces the odds of premature-EOF errors
+ * when reading pg_control. We'll still fail when we check the contents
+ * of the file, but hopefully with a more specific error than "couldn't
+ * read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
{
#ifdef WIN32
/*
- * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- * not in released version
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but not in
+ * released version
*/
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pgevent/pgevent.c,v 1.4 2004/09/27 19:16:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pgevent/pgevent.c,v 1.5 2005/10/15 02:49:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Add PostgreSQL source name as a subkey under the Application key in
- * the EventLog registry key.
+ * Add PostgreSQL source name as a subkey under the Application key in the
+ * EventLog registry key.
*/
if (RegCreateKey(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\PostgreSQL", &key))
{
DllUnregisterServer(void)
{
/*
- * Remove PostgreSQL source name as a subkey under the Application key
- * in the EventLog registry key.
+ * Remove PostgreSQL source name as a subkey under the Application key in
+ * the EventLog registry key.
*/
if (RegDeleteKey(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\PostgreSQL"))
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.153 2005/09/20 18:59:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.154 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "command.h"
-#ifdef WIN32_CLIENT_ONLY /* needed for BCC */
+#ifdef WIN32_CLIENT_ONLY /* needed for BCC */
#undef mkdir
#endif
if (status == CMD_UNKNOWN && strlen(cmd) > 1)
{
/*
- * If the command was not recognized, try to parse it as a
- * one-letter command with immediately following argument (a
- * still-supported, but no longer encouraged, syntax).
+ * If the command was not recognized, try to parse it as a one-letter
+ * command with immediately following argument (a still-supported, but
+ * no longer encouraged, syntax).
*/
char new_cmd[2];
opt2q;
/*
- * Ideally we should treat the arguments as SQL identifiers. But
- * for backwards compatibility with 7.2 and older pg_dump files,
- * we have to take unquoted arguments verbatim (don't downcase
- * them). For now, double-quoted arguments may be stripped of
- * double quotes (as if SQL identifiers). By 7.4 or so, pg_dump
- * files can be expected to double-quote all mixed-case \connect
- * arguments, and then we can get rid of OT_SQLIDHACK.
+ * Ideally we should treat the arguments as SQL identifiers. But for
+ * backwards compatibility with 7.2 and older pg_dump files, we have
+ * to take unquoted arguments verbatim (don't downcase them). For now,
+ * double-quoted arguments may be stripped of double quotes (as if SQL
+ * identifiers). By 7.4 or so, pg_dump files can be expected to
+ * double-quote all mixed-case \connect arguments, and then we can get
+ * rid of OT_SQLIDHACK.
*/
opt1 = psql_scan_slash_option(scan_state,
OT_SQLIDHACK, &opt1q, true);
else if (pg_strcasecmp(cmd, "copy") == 0)
{
char *opt = psql_scan_slash_option(scan_state,
- OT_WHOLE_LINE, NULL, false);
+ OT_WHOLE_LINE, NULL, false);
success = do_copy(opt);
free(opt);
/*
- * \e or \edit -- edit the current query buffer (or a file and make it
- * the query buffer
+ * \e or \edit -- edit the current query buffer (or a file and make it the
+ * query buffer
*/
else if (strcmp(cmd, "e") == 0 || strcmp(cmd, "edit") == 0)
{
fout = stdout;
while ((value = psql_scan_slash_option(scan_state,
- OT_NORMAL, "ed, false)))
+ OT_NORMAL, "ed, false)))
{
if (!quoted && strcmp(value, "-n") == 0)
no_newline = true;
else if (strcmp(cmd, "encoding") == 0)
{
char *encoding = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, false);
+ OT_NORMAL, NULL, false);
if (!encoding)
{
else if (strcmp(cmd, "f") == 0)
{
char *fname = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, false);
+ OT_NORMAL, NULL, false);
success = do_pset("fieldsep", fname, &pset.popt, quiet);
free(fname);
else if (strcmp(cmd, "g") == 0)
{
char *fname = psql_scan_slash_option(scan_state,
- OT_FILEPIPE, NULL, false);
+ OT_FILEPIPE, NULL, false);
if (!fname)
pset.gfname = NULL;
else if (strcmp(cmd, "h") == 0 || strcmp(cmd, "help") == 0)
{
char *opt = psql_scan_slash_option(scan_state,
- OT_WHOLE_LINE, NULL, false);
+ OT_WHOLE_LINE, NULL, false);
helpSQL(opt, pset.popt.topt.pager);
free(opt);
else if (strcmp(cmd, "o") == 0 || strcmp(cmd, "out") == 0)
{
char *fname = psql_scan_slash_option(scan_state,
- OT_FILEPIPE, NULL, true);
+ OT_FILEPIPE, NULL, true);
expand_tilde(&fname);
success = setQFout(fname);
else if (strcmp(cmd, "T") == 0)
{
char *value = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, false);
+ OT_NORMAL, NULL, false);
success = do_pset("tableattr", value, &pset.popt, quiet);
free(value);
else if (strcmp(cmd, "z") == 0)
{
char *pattern = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, true);
+ OT_NORMAL, NULL, true);
success = permissionsList(pattern);
if (pattern)
else if (strcmp(cmd, "!") == 0)
{
char *opt = psql_scan_slash_option(scan_state,
- OT_WHOLE_LINE, NULL, false);
+ OT_WHOLE_LINE, NULL, false);
success = do_shell(opt);
free(opt);
#if 0
/*
- * These commands don't do anything. I just use them to test the
- * parser.
+ * These commands don't do anything. I just use them to test the parser.
*/
else if (strcmp(cmd, "void") == 0 || strcmp(cmd, "#") == 0)
{
const char *dbparam = NULL;
const char *userparam = NULL;
const char *pwparam = NULL;
- char *password_prompt = NULL;
+ char *password_prompt = NULL;
char *prompted_password = NULL;
bool need_pass;
bool success = false;
else
userparam = new_user;
- if (userparam == NULL)
+ if (userparam == NULL)
password_prompt = strdup("Password: ");
else
{
password_prompt = malloc(strlen("Password for user %s: ") - 2 +
strlen(userparam) + 1);
- sprintf(password_prompt,"Password for user %s: ", userparam);
+ sprintf(password_prompt, "Password for user %s: ", userparam);
}
/* need to prompt for password? */
pwparam = prompted_password = simple_prompt(password_prompt, 100, false);
/*
- * Use old password (if any) if no new one given and we are
- * reconnecting as same user
+ * Use old password (if any) if no new one given and we are reconnecting
+ * as same user
*/
if (!pwparam && oldconn && PQuser(oldconn) && userparam &&
strcmp(PQuser(oldconn), userparam) == 0)
free(password_prompt);
/*
- * If connection failed, try at least keep the old one. That's
- * probably more convenient than just kicking you out of the program.
+ * If connection failed, try at least keep the old one. That's probably
+ * more convenient than just kicking you out of the program.
*/
if (!pset.db || PQstatus(pset.db) == CONNECTION_BAD)
{
else
{
/*
- * we don't want unpredictable things to happen in scripting
- * mode
+ * we don't want unpredictable things to happen in scripting mode
*/
psql_error("\\connect: %s", PQerrorMessage(pset.db));
PQfinish(pset.db);
if (!tmpdir)
tmpdir = "/tmp";
#else
- char tmpdir[MAXPGPATH];
- int ret;
+ char tmpdir[MAXPGPATH];
+ int ret;
ret = GetTempPath(MAXPGPATH, tmpdir);
if (ret == 0 || ret > MAXPGPATH)
{
psql_error("cannot locate temporary directory: %s",
- !ret ? strerror(errno) : "");
+ !ret ? strerror(errno) : "");
return false;
}
+
/*
- * No canonicalize_path() here.
- * EDIT.EXE run from CMD.EXE prepends the current directory to the
- * supplied path unless we use only backslashes, so we do that.
+ * No canonicalize_path() here. EDIT.EXE run from CMD.EXE prepends the
+ * current directory to the supplied path unless we use only
+ * backslashes, so we do that.
*/
#endif
#ifndef WIN32
snprintf(fnametmp, sizeof(fnametmp), "%s%spsql.edit.%d", tmpdir,
- "/", (int)getpid());
+ "/", (int) getpid());
#else
snprintf(fnametmp, sizeof(fnametmp), "%s%spsql.edit.%d", tmpdir,
- "" /* trailing separator already present */, (int)getpid());
+ "" /* trailing separator already present */ , (int) getpid());
#endif
fname = (const char *) fnametmp;
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.107 2005/10/13 20:58:42 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.108 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
#define DIFF_MSEC(T, U) \
((((int) ((T)->tv_sec - (U)->tv_sec)) * 1000000.0 + \
((int) ((T)->tv_usec - (U)->tv_usec))) / 1000.0)
-
#else
typedef struct _timeb TimevalStruct;
*
*/
void
-psql_error(const char *fmt, ...)
+psql_error(const char *fmt,...)
{
va_list ap;
* thread is using it.
*/
static PGcancel *cancelConn = NULL;
+
#ifdef WIN32
static CRITICAL_SECTION cancelConnLock;
#endif
handle_sigint(SIGNAL_ARGS)
{
int save_errno = errno;
- char errbuf[256];
+ char errbuf[256];
/* Don't muck around if prompting for a password. */
if (prompt_state)
}
errno = save_errno; /* just in case the write changed it */
}
-
-#else /* WIN32 */
+#else /* WIN32 */
static BOOL WINAPI
consoleHandler(DWORD dwCtrlType)
{
- char errbuf[256];
+ char errbuf[256];
if (dwCtrlType == CTRL_C_EVENT ||
dwCtrlType == CTRL_BREAK_EVENT)
{
SetConsoleCtrlHandler(consoleHandler, TRUE);
}
-
-#endif /* WIN32 */
+#endif /* WIN32 */
/* ConnectionUp
initPQExpBuffer(&msg);
/*
- * The returned cursor position is measured in logical characters.
- * Each character might occupy multiple physical bytes in the string,
- * and in some Far Eastern character sets it might take more than one
- * screen column as well. We compute the starting byte offset and
- * starting screen column of each logical character, and store these
- * in qidx[] and scridx[] respectively.
+ * The returned cursor position is measured in logical characters. Each
+ * character might occupy multiple physical bytes in the string, and in
+ * some Far Eastern character sets it might take more than one screen
+ * column as well. We compute the starting byte offset and starting
+ * screen column of each logical character, and store these in qidx[] and
+ * scridx[] respectively.
*/
/* we need a safe allocation size... */
/*
* Replace tabs with spaces in the writable copy. (Later we might
- * want to think about coping with their variable screen width,
- * but not today.)
+ * want to think about coping with their variable screen width, but
+ * not today.)
*
- * Extract line number and begin and end indexes of line containing
- * error location. There will not be any newlines or carriage
- * returns in the selected extract.
+ * Extract line number and begin and end indexes of line containing error
+ * location. There will not be any newlines or carriage returns in
+ * the selected extract.
*/
for (i = 0; i < clen; i++)
{
if (i < loc)
{
/*
- * count lines before loc. Each \r or \n counts
- * as a line except when \r \n appear together.
+ * count lines before loc. Each \r or \n counts as a
+ * line except when \r \n appear together.
*/
if (wquery[qidx[i]] == '\r' ||
i == 0 ||
if (scridx[iend] - scridx[ibeg] > DISPLAY_SIZE)
{
/*
- * We first truncate right if it is enough. This code might
- * be off a space or so on enforcing MIN_RIGHT_CUT if there's
- * a wide character right there, but that should be okay.
+ * We first truncate right if it is enough. This code might be
+ * off a space or so on enforcing MIN_RIGHT_CUT if there's a wide
+ * character right there, but that should be okay.
*/
if (scridx[ibeg] + DISPLAY_SIZE >= scridx[loc] + MIN_RIGHT_CUT)
{
if (!OK)
{
const char *error = PQerrorMessage(pset.db);
+
if (strlen(error))
psql_error("%s", error);
bool
SendQuery(const char *query)
{
- PGresult *results;
- TimevalStruct before, after;
- bool OK, on_error_rollback_savepoint = false;
+ PGresult *results;
+ TimevalStruct before,
+ after;
+ bool OK,
+ on_error_rollback_savepoint = false;
PGTransactionStatusType transaction_status;
- static bool on_error_rollback_warning = false;
+ static bool on_error_rollback_warning = false;
const char *rollback_str;
-
+
if (!pset.db)
{
psql_error("You are currently not connected to a database.\n");
char buf[3];
printf(_("***(Single step mode: verify command)*******************************************\n"
- "%s\n"
- "***(press return to proceed or enter x and return to cancel)********************\n"),
+ "%s\n"
+ "***(press return to proceed or enter x and return to cancel)********************\n"),
query);
fflush(stdout);
if (fgets(buf, sizeof(buf), stdin) != NULL)
}
if (transaction_status == PQTRANS_INTRANS &&
- (rollback_str = GetVariable(pset.vars, "ON_ERROR_ROLLBACK")) != NULL &&
- /* !off and !interactive is 'on' */
+ (rollback_str = GetVariable(pset.vars, "ON_ERROR_ROLLBACK")) != NULL &&
+ /* !off and !interactive is 'on' */
pg_strcasecmp(rollback_str, "off") != 0 &&
(pset.cur_cmd_interactive ||
pg_strcasecmp(rollback_str, "interactive") != 0))
results = NULL;
else
{
- /*
- * Do nothing if they are messing with savepoints themselves:
- * If the user did RELEASE or ROLLBACK, our savepoint is gone.
- * If they issued a SAVEPOINT, releasing ours would remove theirs.
+ /*
+ * Do nothing if they are messing with savepoints themselves: If
+ * the user did RELEASE or ROLLBACK, our savepoint is gone. If
+ * they issued a SAVEPOINT, releasing ours would remove theirs.
*/
if (strcmp(PQcmdStatus(results), "SAVEPOINT") == 0 ||
strcmp(PQcmdStatus(results), "RELEASE") == 0 ||
- strcmp(PQcmdStatus(results), "ROLLBACK") ==0)
+ strcmp(PQcmdStatus(results), "ROLLBACK") == 0)
results = NULL;
else
results = PQexec(pset.db, "RELEASE pg_psql_temporary_savepoint");
static const char *
skip_white_space(const char *query)
{
- int cnestlevel = 0; /* slash-star comment nest level */
+ int cnestlevel = 0; /* slash-star comment nest level */
while (*query)
{
- int mblen = PQmblen(query, pset.encoding);
+ int mblen = PQmblen(query, pset.encoding);
/*
- * Note: we assume the encoding is a superset of ASCII, so that
- * for example "query[0] == '/'" is meaningful. However, we do NOT
- * assume that the second and subsequent bytes of a multibyte
- * character couldn't look like ASCII characters; so it is critical
- * to advance by mblen, not 1, whenever we haven't exactly identified
- * the character we are skipping over.
+ * Note: we assume the encoding is a superset of ASCII, so that for
+ * example "query[0] == '/'" is meaningful. However, we do NOT assume
+ * that the second and subsequent bytes of a multibyte character
+ * couldn't look like ASCII characters; so it is critical to advance
+ * by mblen, not 1, whenever we haven't exactly identified the
+ * character we are skipping over.
*/
if (isspace((unsigned char) *query))
query += mblen;
else if (cnestlevel == 0 && query[0] == '-' && query[1] == '-')
{
query += 2;
+
/*
- * We have to skip to end of line since any slash-star inside
- * the -- comment does NOT start a slash-star comment.
+ * We have to skip to end of line since any slash-star inside the
+ * -- comment does NOT start a slash-star comment.
*/
while (*query)
{
wordlen += PQmblen(&query[wordlen], pset.encoding);
/*
- * Transaction control commands. These should include every keyword
- * that gives rise to a TransactionStmt in the backend grammar, except
- * for the savepoint-related commands.
+ * Transaction control commands. These should include every keyword that
+ * gives rise to a TransactionStmt in the backend grammar, except for the
+ * savepoint-related commands.
*
- * (We assume that START must be START TRANSACTION, since there is
- * presently no other "START foo" command.)
+ * (We assume that START must be START TRANSACTION, since there is presently
+ * no other "START foo" command.)
*/
if (wordlen == 5 && pg_strncasecmp(query, "abort", 5) == 0)
return true;
}
/*
- * Commands not allowed within transactions. The statements checked
- * for here should be exactly those that call PreventTransactionChain()
- * in the backend.
+ * Commands not allowed within transactions. The statements checked for
+ * here should be exactly those that call PreventTransactionChain() in the
+ * backend.
*
- * Note: we are a bit sloppy about CLUSTER, which is transactional in
- * some variants but not others.
+ * Note: we are a bit sloppy about CLUSTER, which is transactional in some
+ * variants but not others.
*/
if (wordlen == 6 && pg_strncasecmp(query, "vacuum", 6) == 0)
return true;
return true;
/*
- * Note: these tests will match CREATE SYSTEM, DROP SYSTEM, and
- * REINDEX TABLESPACE, which aren't really valid commands so we don't
- * care much. The other six possible matches are correct.
+ * Note: these tests will match CREATE SYSTEM, DROP SYSTEM, and REINDEX
+ * TABLESPACE, which aren't really valid commands so we don't care much.
+ * The other six possible matches are correct.
*/
if ((wordlen == 6 && pg_strncasecmp(query, "create", 6) == 0) ||
(wordlen == 4 && pg_strncasecmp(query, "drop", 4) == 0) ||
return NULL;
/*
- * WIN32 doesn't use tilde expansion for file names.
- * Also, it uses tilde for short versions of long file names,
- * though the tilde is usually toward the end, not at the beginning.
+ * WIN32 doesn't use tilde expansion for file names. Also, it uses tilde
+ * for short versions of long file names, though the tilde is usually
+ * toward the end, not at the beginning.
*/
#ifndef WIN32
if (*(fn + 1) == '\0')
get_home_path(home); /* ~ or ~/ only */
else if ((pw = getpwnam(fn + 1)) != NULL)
- StrNCpy(home, pw->pw_dir, MAXPGPATH); /* ~user */
+ StrNCpy(home, pw->pw_dir, MAXPGPATH); /* ~user */
*p = oldp;
if (strlen(home) != 0)
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/common.h,v 1.44 2005/06/13 06:36:22 neilc Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/common.h,v 1.45 2005/10/15 02:49:40 momjian Exp $
*/
#ifndef COMMON_H
#define COMMON_H
extern bool setQFout(const char *fname);
extern void
-psql_error(const char *fmt, ...)
+psql_error(const char *fmt,...)
/* This lets gcc check the format string for consistency. */
__attribute__((format(printf, 1, 2)));
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/copy.c,v 1.57 2005/05/07 02:22:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/copy.c,v 1.58 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "copy.h"
bool binary;
bool oids;
bool csv_mode;
- bool header;
+ bool header;
char *delim;
char *null;
char *quote;
goto error;
/*
- * strtokx() will not have returned a multi-character token starting
- * with '.', so we don't need strcmp() here. Likewise for '(', etc,
- * below.
+ * strtokx() will not have returned a multi-character token starting with
+ * '.', so we don't need strcmp() here. Likewise for '(', etc, below.
*/
if (token[0] == '.')
{
if (token)
{
/*
- * WITH is optional. Also, the backend will allow WITH followed
- * by nothing, so we do too.
+ * WITH is optional. Also, the backend will allow WITH followed by
+ * nothing, so we do too.
*/
if (pg_strcasecmp(token, "with") == 0)
token = strtokx(NULL, whitespace, NULL, NULL,
{
if (!QUIET())
puts(_("Enter data to be copied followed by a newline.\n"
- "End with a backslash and a period on a line by itself."));
+ "End with a backslash and a period on a line by itself."));
prompt = get_prompt(PROMPT_COPY);
}
else
if (c == EOF && s == copybuf && firstload)
{
/*
- * We are guessing a little bit as to the right
- * line-ending here...
+ * We are guessing a little bit as to the right line-ending
+ * here...
*/
if (saw_cr)
PQputline(conn, "\\.\r\n");
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.126 2005/10/04 19:01:18 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.127 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "describe.h"
const char *schemavar, const char *namevar,
const char *altnamevar, const char *visibilityrule);
-static bool add_tablespace_footer(char relkind, Oid tablespace, char **footers,
- int *count, PQExpBufferData buf, bool newline);
+static bool add_tablespace_footer(char relkind, Oid tablespace, char **footers,
+ int *count, PQExpBufferData buf, bool newline);
/*----------------
* Handlers for various slash commands displaying some sort of list
initPQExpBuffer(&buf);
/*
- * There are two kinds of aggregates: ones that work on particular
- * types and ones that work on all (denoted by input type = "any")
+ * There are two kinds of aggregates: ones that work on particular types
+ * and ones that work on all (denoted by input type = "any")
*/
printfPQExpBuffer(&buf,
"SELECT n.nspname as \"%s\",\n"
" p.proname AS \"%s\",\n"
" CASE p.proargtypes[0]\n"
- " WHEN 'pg_catalog.\"any\"'::pg_catalog.regtype\n"
+ " WHEN 'pg_catalog.\"any\"'::pg_catalog.regtype\n"
" THEN CAST('%s' AS pg_catalog.text)\n"
- " ELSE pg_catalog.format_type(p.proargtypes[0], NULL)\n"
+ " ELSE pg_catalog.format_type(p.proargtypes[0], NULL)\n"
" END AS \"%s\",\n"
- " pg_catalog.obj_description(p.oid, 'pg_proc') as \"%s\"\n"
+ " pg_catalog.obj_description(p.oid, 'pg_proc') as \"%s\"\n"
"FROM pg_catalog.pg_proc p\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace\n"
"WHERE p.proisagg\n",
_("Schema"), _("Name"), _("(all types)"),
_("Data type"), _("Description"));
printfPQExpBuffer(&buf,
"SELECT spcname AS \"%s\",\n"
- " pg_catalog.pg_get_userbyid(spcowner) AS \"%s\",\n"
+ " pg_catalog.pg_get_userbyid(spcowner) AS \"%s\",\n"
" spclocation AS \"%s\"",
_("Name"), _("Owner"), _("Location"));
"SELECT n.nspname as \"%s\",\n"
" p.proname as \"%s\",\n"
" CASE WHEN p.proretset THEN 'setof ' ELSE '' END ||\n"
- " pg_catalog.format_type(p.prorettype, NULL) as \"%s\",\n"
+ " pg_catalog.format_type(p.prorettype, NULL) as \"%s\",\n"
" pg_catalog.oidvectortypes(p.proargtypes) as \"%s\"",
- _("Schema"), _("Name"), _("Result data type"),
+ _("Schema"), _("Name"), _("Result data type"),
_("Argument data types"));
if (verbose)
",\n r.rolname as \"%s\",\n"
" l.lanname as \"%s\",\n"
" p.prosrc as \"%s\",\n"
- " pg_catalog.obj_description(p.oid, 'pg_proc') as \"%s\"",
+ " pg_catalog.obj_description(p.oid, 'pg_proc') as \"%s\"",
_("Owner"), _("Language"),
_("Source code"), _("Description"));
else
appendPQExpBuffer(&buf,
"\nFROM pg_catalog.pg_proc p"
- "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace"
- "\n LEFT JOIN pg_catalog.pg_language l ON l.oid = p.prolang"
- "\n LEFT JOIN pg_catalog.pg_roles r ON r.oid = p.proowner\n");
+ "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace"
+ "\n LEFT JOIN pg_catalog.pg_language l ON l.oid = p.prolang"
+ "\n LEFT JOIN pg_catalog.pg_roles r ON r.oid = p.proowner\n");
/*
- * we skip in/out funcs by excluding functions that take or return
- * cstring
+ * we skip in/out funcs by excluding functions that take or return cstring
*/
appendPQExpBuffer(&buf,
- "WHERE p.prorettype <> 'pg_catalog.cstring'::pg_catalog.regtype\n"
+ "WHERE p.prorettype <> 'pg_catalog.cstring'::pg_catalog.regtype\n"
" AND (p.proargtypes[0] IS NULL\n"
" OR p.proargtypes[0] <> 'pg_catalog.cstring'::pg_catalog.regtype)\n"
" AND NOT p.proisagg\n");
printfPQExpBuffer(&buf,
"SELECT n.nspname as \"%s\",\n"
- " pg_catalog.format_type(t.oid, NULL) AS \"%s\",\n",
+ " pg_catalog.format_type(t.oid, NULL) AS \"%s\",\n",
_("Schema"), _("Name"));
if (verbose)
appendPQExpBuffer(&buf,
" END AS \"%s\",\n",
_("Internal name"), _("Size"));
appendPQExpBuffer(&buf,
- " pg_catalog.obj_description(t.oid, 'pg_type') as \"%s\"\n",
+ " pg_catalog.obj_description(t.oid, 'pg_type') as \"%s\"\n",
_("Description"));
appendPQExpBuffer(&buf, "FROM pg_catalog.pg_type t\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n");
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n");
/*
* do not include array types (start with underscore); do not include
- * complex types (typrelid!=0) unless they are standalone composite
- * types
+ * complex types (typrelid!=0) unless they are standalone composite types
*/
appendPQExpBuffer(&buf, "WHERE (t.typrelid = 0 ");
appendPQExpBuffer(&buf, "OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c "
" o.oprname AS \"%s\",\n"
" CASE WHEN o.oprkind='l' THEN NULL ELSE pg_catalog.format_type(o.oprleft, NULL) END AS \"%s\",\n"
" CASE WHEN o.oprkind='r' THEN NULL ELSE pg_catalog.format_type(o.oprright, NULL) END AS \"%s\",\n"
- " pg_catalog.format_type(o.oprresult, NULL) AS \"%s\",\n"
- " coalesce(pg_catalog.obj_description(o.oid, 'pg_operator'),\n"
- " pg_catalog.obj_description(o.oprcode, 'pg_proc')) AS \"%s\"\n"
+ " pg_catalog.format_type(o.oprresult, NULL) AS \"%s\",\n"
+ " coalesce(pg_catalog.obj_description(o.oid, 'pg_operator'),\n"
+ " pg_catalog.obj_description(o.oprcode, 'pg_proc')) AS \"%s\"\n"
"FROM pg_catalog.pg_operator o\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = o.oprnamespace\n",
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = o.oprnamespace\n",
_("Schema"), _("Name"),
_("Left arg type"), _("Right arg type"),
_("Result type"), _("Description"));
" r.rolname as \"%s\"",
_("Name"), _("Owner"));
appendPQExpBuffer(&buf,
- ",\n pg_catalog.pg_encoding_to_char(d.encoding) as \"%s\"",
+ ",\n pg_catalog.pg_encoding_to_char(d.encoding) as \"%s\"",
_("Encoding"));
if (verbose)
appendPQExpBuffer(&buf,
_("Description"));
appendPQExpBuffer(&buf,
"\nFROM pg_catalog.pg_database d"
- "\n LEFT JOIN pg_catalog.pg_roles r ON d.datdba = r.oid\n"
+ "\n LEFT JOIN pg_catalog.pg_roles r ON d.datdba = r.oid\n"
"ORDER BY 1;");
res = PSQLexec(buf.data, false);
initPQExpBuffer(&buf);
/*
- * we ignore indexes and toast tables since they have no meaningful
- * rights
+ * we ignore indexes and toast tables since they have no meaningful rights
*/
printfPQExpBuffer(&buf,
"SELECT n.nspname as \"%s\",\n"
" CASE c.relkind WHEN 'r' THEN '%s' WHEN 'v' THEN '%s' WHEN 'S' THEN '%s' END as \"%s\",\n"
" c.relacl as \"%s\"\n"
"FROM pg_catalog.pg_class c\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
"WHERE c.relkind IN ('r', 'v', 'S')\n",
_("Schema"), _("Name"), _("table"), _("view"), _("sequence"), _("Type"), _("Access privileges"));
/*
* Unless a schema pattern is specified, we suppress system and temp
- * tables, since they normally aren't very interesting from a
- * permissions point of view. You can see 'em by explicit request
- * though, eg with \z pg_catalog.*
+ * tables, since they normally aren't very interesting from a permissions
+ * point of view. You can see 'em by explicit request though, eg with \z
+ * pg_catalog.*
*/
processNamePattern(&buf, pattern, true, false,
"n.nspname", "c.relname", NULL,
- "n.nspname !~ '^pg_' AND pg_catalog.pg_table_is_visible(c.oid)");
+ "n.nspname !~ '^pg_' AND pg_catalog.pg_table_is_visible(c.oid)");
appendPQExpBuffer(&buf, "ORDER BY 1, 2;");
appendPQExpBuffer(&buf,
"SELECT DISTINCT tt.nspname AS \"%s\", tt.name AS \"%s\", tt.object AS \"%s\", d.description AS \"%s\"\n"
"FROM (\n",
- _("Schema"), _("Name"), _("Object"), _("Description"));
+ _("Schema"), _("Name"), _("Object"), _("Description"));
/* Aggregate descriptions */
appendPQExpBuffer(&buf,
" CAST(p.proname AS pg_catalog.text) as name,"
" CAST('%s' AS pg_catalog.text) as object\n"
" FROM pg_catalog.pg_proc p\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace\n"
" WHERE p.proisagg\n",
_("aggregate"));
processNamePattern(&buf, pattern, true, false,
" CAST(p.proname AS pg_catalog.text) as name,"
" CAST('%s' AS pg_catalog.text) as object\n"
" FROM pg_catalog.pg_proc p\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace\n"
- " WHERE p.prorettype <> 'pg_catalog.cstring'::pg_catalog.regtype\n"
+ " WHERE p.prorettype <> 'pg_catalog.cstring'::pg_catalog.regtype\n"
" AND (p.proargtypes[0] IS NULL\n"
" OR p.proargtypes[0] <> 'pg_catalog.cstring'::pg_catalog.regtype)\n"
" AND NOT p.proisagg\n",
" CAST(o.oprname AS pg_catalog.text) as name,"
" CAST('%s' AS pg_catalog.text) as object\n"
" FROM pg_catalog.pg_operator o\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = o.oprnamespace\n",
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = o.oprnamespace\n",
_("operator"));
processNamePattern(&buf, pattern, false, false,
"n.nspname", "o.oprname", NULL,
" pg_catalog.format_type(t.oid, NULL) as name,"
" CAST('%s' AS pg_catalog.text) as object\n"
" FROM pg_catalog.pg_type t\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n",
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n",
_("data type"));
processNamePattern(&buf, pattern, false, false,
- "n.nspname", "pg_catalog.format_type(t.oid, NULL)", NULL,
+ "n.nspname", "pg_catalog.format_type(t.oid, NULL)", NULL,
"pg_catalog.pg_type_is_visible(t.oid)");
/* Relation (tables, views, indexes, sequences) descriptions */
" CASE c.relkind WHEN 'r' THEN '%s' WHEN 'v' THEN '%s' WHEN 'i' THEN '%s' WHEN 'S' THEN '%s' END"
" AS pg_catalog.text) as object\n"
" FROM pg_catalog.pg_class c\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
" WHERE c.relkind IN ('r', 'v', 'i', 'S')\n",
_("table"), _("view"), _("index"), _("sequence"));
processNamePattern(&buf, pattern, true, false,
" CAST(r.rulename AS pg_catalog.text) as name,"
" CAST('%s' AS pg_catalog.text) as object\n"
" FROM pg_catalog.pg_rewrite r\n"
- " JOIN pg_catalog.pg_class c ON c.oid = r.ev_class\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
+ " JOIN pg_catalog.pg_class c ON c.oid = r.ev_class\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
" WHERE r.rulename != '_RETURN'\n",
_("rule"));
/* XXX not sure what to do about visibility rule here? */
" CAST(t.tgname AS pg_catalog.text) as name,"
" CAST('%s' AS pg_catalog.text) as object\n"
" FROM pg_catalog.pg_trigger t\n"
- " JOIN pg_catalog.pg_class c ON c.oid = t.tgrelid\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n",
+ " JOIN pg_catalog.pg_class c ON c.oid = t.tgrelid\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n",
_("trigger"));
/* XXX not sure what to do about visibility rule here? */
processNamePattern(&buf, pattern, false, false,
" n.nspname,\n"
" c.relname\n"
"FROM pg_catalog.pg_class c\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n");
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n");
processNamePattern(&buf, pattern, false, false,
"n.nspname", "c.relname", NULL,
/* Get general table info */
printfPQExpBuffer(&buf,
- "SELECT relhasindex, relkind, relchecks, reltriggers, relhasrules, \n"
+ "SELECT relhasindex, relkind, relchecks, reltriggers, relhasrules, \n"
"relhasoids %s \n"
"FROM pg_catalog.pg_class WHERE oid = '%s'",
pset.sversion >= 80000 ? ", reltablespace" : "",
break;
default:
printfPQExpBuffer(&title, _("?%c? \"%s.%s\""),
- tableinfo.relkind, schemaname, relationname);
+ tableinfo.relkind, schemaname, relationname);
break;
}
printfPQExpBuffer(&buf,
"SELECT i.indisunique, i.indisprimary, i.indisclustered, a.amname, c2.relname,\n"
- " pg_catalog.pg_get_expr(i.indpred, i.indrelid, true)\n"
+ " pg_catalog.pg_get_expr(i.indpred, i.indrelid, true)\n"
"FROM pg_catalog.pg_index i, pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_am a\n"
- "WHERE i.indexrelid = c.oid AND c.oid = '%s' AND c.relam = a.oid\n"
+ "WHERE i.indexrelid = c.oid AND c.oid = '%s' AND c.relam = a.oid\n"
"AND i.indrelid = c2.oid",
oid);
printfPQExpBuffer(&buf,
"SELECT r.rulename, trim(trailing ';' from pg_catalog.pg_get_ruledef(r.oid, true))\n"
"FROM pg_catalog.pg_rewrite r\n"
- "WHERE r.ev_class = '%s' AND r.rulename != '_RETURN' ORDER BY 1",
+ "WHERE r.ev_class = '%s' AND r.rulename != '_RETURN' ORDER BY 1",
oid);
result = PSQLexec(buf.data, false);
if (!result)
{
printfPQExpBuffer(&buf,
"SELECT c2.relname, i.indisprimary, i.indisunique, i.indisclustered, "
- "pg_catalog.pg_get_indexdef(i.indexrelid, 0, true), c2.reltablespace\n"
+ "pg_catalog.pg_get_indexdef(i.indexrelid, 0, true), c2.reltablespace\n"
"FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i\n"
"WHERE c.oid = '%s' AND c.oid = i.indrelid AND i.indexrelid = c2.oid\n"
- "ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname",
+ "ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname",
oid);
result1 = PSQLexec(buf.data, false);
if (!result1)
{
printfPQExpBuffer(&buf,
"SELECT "
- "pg_catalog.pg_get_constraintdef(r.oid, true), "
+ "pg_catalog.pg_get_constraintdef(r.oid, true), "
"conname\n"
"FROM pg_catalog.pg_constraint r\n"
- "WHERE r.conrelid = '%s' AND r.contype = 'c' ORDER BY 1",
+ "WHERE r.conrelid = '%s' AND r.contype = 'c' ORDER BY 1",
oid);
result2 = PSQLexec(buf.data, false);
if (!result2)
if (tableinfo.triggers)
{
printfPQExpBuffer(&buf,
- "SELECT t.tgname, pg_catalog.pg_get_triggerdef(t.oid)\n"
+ "SELECT t.tgname, pg_catalog.pg_get_triggerdef(t.oid)\n"
"FROM pg_catalog.pg_trigger t\n"
"WHERE t.tgrelid = '%s' "
"AND (not tgisconstraint "
{
printfPQExpBuffer(&buf,
"SELECT conname,\n"
- " pg_catalog.pg_get_constraintdef(oid, true) as condef\n"
+ " pg_catalog.pg_get_constraintdef(oid, true) as condef\n"
"FROM pg_catalog.pg_constraint r\n"
- "WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1",
+ "WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1",
oid);
result5 = PSQLexec(buf.data, false);
if (!result5)
{
const char *indexdef;
const char *usingpos;
- PQExpBufferData tmpbuf;
+ PQExpBufferData tmpbuf;
/* Output index name */
printfPQExpBuffer(&buf, _(" \"%s\""),
/* Label as primary key or unique (but not both) */
appendPQExpBuffer(&buf,
- strcmp(PQgetvalue(result1, i, 1), "t") == 0
+ strcmp(PQgetvalue(result1, i, 1), "t") == 0
? " PRIMARY KEY," :
- (strcmp(PQgetvalue(result1, i, 2), "t") == 0
- ? " UNIQUE,"
- : ""));
+ (strcmp(PQgetvalue(result1, i, 2), "t") == 0
+ ? " UNIQUE,"
+ : ""));
/* Everything after "USING" is echoed verbatim */
indexdef = PQgetvalue(result1, i, 4);
usingpos = strstr(indexdef, " USING ");
/* Print tablespace of the index on the same line */
count_footers += 1;
initPQExpBuffer(&tmpbuf);
- if (add_tablespace_footer('i',
- atooid(PQgetvalue(result1, i, 5)),
- footers, &count_footers, tmpbuf, false))
+ if (add_tablespace_footer('i',
+ atooid(PQgetvalue(result1, i, 5)),
+ footers, &count_footers, tmpbuf, false))
{
appendPQExpBuffer(&buf, ", ");
appendPQExpBuffer(&buf, tmpbuf.data);
}
-/*
- * Return true if the relation uses non default tablespace;
- * otherwise return false
+/*
+ * Return true if the relation uses non default tablespace;
+ * otherwise return false
*/
static bool
add_tablespace_footer(char relkind, Oid tablespace, char **footers,
if (relkind == 'r' || relkind == 'i')
{
/*
- * We ignore the database default tablespace so that users not
- * using tablespaces don't need to know about them.
+ * We ignore the database default tablespace so that users not using
+ * tablespaces don't need to know about them.
*/
if (tablespace != 0)
{
/* Should always be the case, but.... */
if (PQntuples(result1) > 0)
{
- printfPQExpBuffer(&buf,
- newline?_("Tablespace: \"%s\""):_("tablespace \"%s\""),
- PQgetvalue(result1, 0, 0));
+ printfPQExpBuffer(&buf,
+ newline ? _("Tablespace: \"%s\"") : _("tablespace \"%s\""),
+ PQgetvalue(result1, 0, 0));
footers[(*count)++] = pg_strdup(buf.data);
}
printfPQExpBuffer(&buf,
"SELECT r.rolname AS \"%s\",\n"
- " CASE WHEN r.rolsuper THEN '%s' ELSE '%s' END AS \"%s\",\n"
- " CASE WHEN r.rolcreaterole THEN '%s' ELSE '%s' END AS \"%s\",\n"
- " CASE WHEN r.rolcreatedb THEN '%s' ELSE '%s' END AS \"%s\",\n"
- " CASE WHEN r.rolconnlimit < 0 THEN CAST('%s' AS pg_catalog.text)\n"
+ " CASE WHEN r.rolsuper THEN '%s' ELSE '%s' END AS \"%s\",\n"
+ " CASE WHEN r.rolcreaterole THEN '%s' ELSE '%s' END AS \"%s\",\n"
+ " CASE WHEN r.rolcreatedb THEN '%s' ELSE '%s' END AS \"%s\",\n"
+ " CASE WHEN r.rolconnlimit < 0 THEN CAST('%s' AS pg_catalog.text)\n"
" ELSE CAST(r.rolconnlimit AS pg_catalog.text)\n"
" END AS \"%s\", \n"
" ARRAY(SELECT b.rolname FROM pg_catalog.pg_auth_members m JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) WHERE m.member = r.oid) as \"%s\"\n"
"FROM pg_catalog.pg_roles r\n",
_("Role name"),
- _("yes"),_("no"),_("Superuser"),
- _("yes"),_("no"),_("Create role"),
- _("yes"),_("no"),_("Create DB"),
- _("no limit"),_("Connections"),
+ _("yes"), _("no"), _("Superuser"),
+ _("yes"), _("no"), _("Create role"),
+ _("yes"), _("no"), _("Create DB"),
+ _("no limit"), _("Connections"),
_("Member of"));
processNamePattern(&buf, pattern, false, false,
if (verbose)
appendPQExpBuffer(&buf,
- ",\n pg_catalog.obj_description(c.oid, 'pg_class') as \"%s\"",
+ ",\n pg_catalog.obj_description(c.oid, 'pg_class') as \"%s\"",
_("Description"));
appendPQExpBuffer(&buf,
"\nFROM pg_catalog.pg_class c"
- "\n LEFT JOIN pg_catalog.pg_roles r ON r.oid = c.relowner"
- "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace");
+ "\n LEFT JOIN pg_catalog.pg_roles r ON r.oid = c.relowner"
+ "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace");
if (showIndexes)
appendPQExpBuffer(&buf,
- "\n LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid"
- "\n LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid");
+ "\n LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid"
+ "\n LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid");
appendPQExpBuffer(&buf, "\nWHERE c.relkind IN (");
if (showTables)
/*
* If showSystem is specified, show only system objects (those in
- * pg_catalog). Otherwise, suppress system objects, including those
- * in pg_catalog and pg_toast. (We don't want to hide temp tables
- * though.)
+ * pg_catalog). Otherwise, suppress system objects, including those in
+ * pg_catalog and pg_toast. (We don't want to hide temp tables though.)
*/
if (showSystem)
appendPQExpBuffer(&buf, " AND n.nspname = 'pg_catalog'\n");
printfPQExpBuffer(&buf,
"SELECT n.nspname as \"%s\",\n"
" t.typname as \"%s\",\n"
- " pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
+ " pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
" CASE WHEN t.typnotnull AND t.typdefault IS NOT NULL THEN 'not null default '||t.typdefault\n"
- " WHEN t.typnotnull AND t.typdefault IS NULL THEN 'not null'\n"
+ " WHEN t.typnotnull AND t.typdefault IS NULL THEN 'not null'\n"
" WHEN NOT t.typnotnull AND t.typdefault IS NOT NULL THEN 'default '||t.typdefault\n"
" ELSE ''\n"
" END as \"%s\",\n"
- " pg_catalog.pg_get_constraintdef(r.oid, true) as \"%s\"\n"
+ " pg_catalog.pg_get_constraintdef(r.oid, true) as \"%s\"\n"
"FROM pg_catalog.pg_type t\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n"
- " LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n"
+ " LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid\n"
"WHERE t.typtype = 'd'\n",
_("Schema"),
_("Name"),
printfPQExpBuffer(&buf,
"SELECT n.nspname AS \"%s\",\n"
" c.conname AS \"%s\",\n"
- " pg_catalog.pg_encoding_to_char(c.conforencoding) AS \"%s\",\n"
- " pg_catalog.pg_encoding_to_char(c.contoencoding) AS \"%s\",\n"
+ " pg_catalog.pg_encoding_to_char(c.conforencoding) AS \"%s\",\n"
+ " pg_catalog.pg_encoding_to_char(c.contoencoding) AS \"%s\",\n"
" CASE WHEN c.condefault THEN '%s'\n"
" ELSE '%s' END AS \"%s\"\n"
- "FROM pg_catalog.pg_conversion c, pg_catalog.pg_namespace n\n"
+ "FROM pg_catalog.pg_conversion c, pg_catalog.pg_namespace n\n"
"WHERE n.oid = c.connamespace\n",
_("Schema"),
_("Name"),
initPQExpBuffer(&buf);
/* NEED LEFT JOIN FOR BINARY CASTS */
printfPQExpBuffer(&buf,
- "SELECT pg_catalog.format_type(castsource, NULL) AS \"%s\",\n"
- " pg_catalog.format_type(casttarget, NULL) AS \"%s\",\n"
+ "SELECT pg_catalog.format_type(castsource, NULL) AS \"%s\",\n"
+ " pg_catalog.format_type(casttarget, NULL) AS \"%s\",\n"
" CASE WHEN castfunc = 0 THEN '%s'\n"
" ELSE p.proname\n"
" END as \"%s\",\n"
" WHEN c.castcontext = 'a' THEN '%s'\n"
" ELSE '%s'\n"
" END as \"%s\"\n"
- "FROM pg_catalog.pg_cast c LEFT JOIN pg_catalog.pg_proc p\n"
+ "FROM pg_catalog.pg_cast c LEFT JOIN pg_catalog.pg_proc p\n"
" ON c.castfunc = p.oid\n"
"ORDER BY 1, 2",
_("Source type"),
if (verbose)
appendPQExpBuffer(&buf,
",\n n.nspacl as \"%s\","
- " pg_catalog.obj_description(n.oid, 'pg_namespace') as \"%s\"",
+ " pg_catalog.obj_description(n.oid, 'pg_namespace') as \"%s\"",
_("Access privileges"), _("Description"));
appendPQExpBuffer(&buf,
- "\nFROM pg_catalog.pg_namespace n LEFT JOIN pg_catalog.pg_roles r\n"
+ "\nFROM pg_catalog.pg_namespace n LEFT JOIN pg_catalog.pg_roles r\n"
" ON n.nspowner=r.oid\n"
"WHERE (n.nspname !~ '^pg_temp_' OR\n"
- " n.nspname = (pg_catalog.current_schemas(true))[1])\n"); /* temp schema is first */
+ " n.nspname = (pg_catalog.current_schemas(true))[1])\n"); /* temp schema is first */
processNamePattern(&buf, pattern, true, false,
NULL, "n.nspname", NULL,
initPQExpBuffer(&namebuf);
/*
- * Parse the pattern, converting quotes and lower-casing unquoted
- * letters; we assume this was NOT done by scan_option. Also, adjust
- * shell-style wildcard characters into regexp notation.
+ * Parse the pattern, converting quotes and lower-casing unquoted letters;
+ * we assume this was NOT done by scan_option. Also, adjust shell-style
+ * wildcard characters into regexp notation.
*/
inquotes = false;
cp = pattern;
/*
* Ordinary data character, transfer to pattern
*
- * Inside double quotes, or at all times if parsing an operator
- * name, quote regexp special characters with a backslash to
- * avoid regexp errors. Outside quotes, however, let them
- * pass through as-is; this lets knowledgeable users build
- * regexp expressions that are more powerful than shell-style
- * patterns.
+ * Inside double quotes, or at all times if parsing an operator name,
+ * quote regexp special characters with a backslash to avoid
+ * regexp errors. Outside quotes, however, let them pass through
+ * as-is; this lets knowledgeable users build regexp expressions
+ * that are more powerful than shell-style patterns.
*/
if ((inquotes || force_escape) &&
strchr("|*+?()[]{}.^$\\", *cp))
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/help.c,v 1.105 2005/07/18 20:57:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/help.c,v 1.106 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
puts(_(
"\nFor more information, type \"\\?\" (for internal commands) or \"\\help\"\n"
- "(for SQL commands) from within psql, or consult the psql section in\n"
+ "(for SQL commands) from within psql, or consult the psql section in\n"
"the PostgreSQL documentation.\n\n"
}
/* if you add/remove a line here, change the row count above */
/*
- * if this " is the start of the string then it ought to end there to
- * fit in 80 columns >> "
+ * if this " is the start of the string then it ought to end there to fit
+ * in 80 columns >> "
*/
fprintf(output, _("General\n"));
fprintf(output, _(" \\c[onnect] [DBNAME|- [USER]]\n"
- " connect to new database (currently \"%s\")\n"),
+ " connect to new database (currently \"%s\")\n"),
PQdb(pset.db));
fprintf(output, _(" \\cd [DIR] change the current working directory\n"));
fprintf(output, _(" \\copyright show PostgreSQL usage and distribution terms\n"));
fprintf(output, _(" \\i FILE execute commands from file\n"));
fprintf(output, _(" \\o [FILE] send all query results to file or |pipe\n"));
fprintf(output, _(" \\qecho [STRING]\n"
- " write string to query output stream (see \\o)\n"));
+ " write string to query output stream (see \\o)\n"));
fprintf(output, "\n");
fprintf(output, _("Informational\n"));
fprintf(output, _(" \\d [NAME] describe table, index, sequence, or view\n"));
fprintf(output, _(" \\d{t|i|s|v|S} [PATTERN] (add \"+\" for more detail)\n"
- " list tables/indexes/sequences/views/system tables\n"));
+ " list tables/indexes/sequences/views/system tables\n"));
fprintf(output, _(" \\da [PATTERN] list aggregate functions\n"));
fprintf(output, _(" \\db [PATTERN] list tablespaces (add \"+\" for more detail)\n"));
fprintf(output, _(" \\dc [PATTERN] list conversions\n"));
fprintf(output, _(" \\pset NAME [VALUE]\n"
" set table output option\n"
" (NAME := {format|border|expanded|fieldsep|footer|null|\n"
- " numericlocale|recordsep|tuples_only|title|tableattr|pager})\n"));
+ " numericlocale|recordsep|tuples_only|title|tableattr|pager})\n"));
fprintf(output, _(" \\t show only rows (currently %s)\n"),
ON(pset.popt.topt.tuples_only));
fprintf(output, _(" \\T [STRING] set HTML
tag attributes, or unset if none\n"));
fprintf(output, _(" \\lo_export LOBOID FILE\n"
" \\lo_import FILE [COMMENT]\n"
" \\lo_list\n"
- " \\lo_unlink LOBOID large object operations\n"));
+ " \\lo_unlink LOBOID large object operations\n"));
if (output != stdout)
{
VALUE_OR_NULL(QL_HELP[i + items_per_column].cmd));
if (i + 2 * items_per_column < QL_HELP_COUNT)
fprintf(output, "%-26s",
- VALUE_OR_NULL(QL_HELP[i + 2 * items_per_column].cmd));
+ VALUE_OR_NULL(QL_HELP[i + 2 * items_per_column].cmd));
fputc('\n', output);
}
/* Only close if we used the pager */
}
else
{
- int i,j,x=0;
+ int i,
+ j,
+ x = 0;
bool help_found = false;
FILE *output;
- size_t len, wordlen;
+ size_t len,
+ wordlen;
int nl_count = 0;
char *ch;
/* User gets two chances: exact match, then the first word */
-
+
/* First pass : strip trailing spaces and semicolons */
len = strlen(topic);
while (topic[len - 1] == ' ' || topic[len - 1] == ';')
- len--;
+ len--;
- for (x=1; x<=3; x++) /* Three chances to guess that word... */
+ for (x = 1; x <= 3; x++) /* Three chances to guess that word... */
{
- if (x>1) /* Nothing on first pass - try the opening words */
+ if (x > 1) /* Nothing on first pass - try the opening
+ * words */
+ {
+ wordlen = j = 1;
+ while (topic[j] != ' ' && j++ < len)
+ wordlen++;
+ if (x == 2)
{
- wordlen=j=1;
- while (topic[j] != ' ' && j++
- wordlen++;
- if (x==2)
- {
- j++;
- while (topic[j] != ' ' && j++<=len)
- wordlen++;
- }
- if (wordlen >= len) /* Don't try again if the same word */
- {
- output = PageOutput(nl_count, pager);
- break;
- }
- len = wordlen;
+ j++;
+ while (topic[j] != ' ' && j++ <= len)
+ wordlen++;
}
-
- /* Count newlines for pager */
- for (i = 0; QL_HELP[i].cmd; i++)
+ if (wordlen >= len) /* Don't try again if the same word */
{
- if (pg_strncasecmp(topic, QL_HELP[i].cmd, len) == 0 ||
- strcmp(topic, "*") == 0)
- {
- nl_count += 5;
- for (ch = QL_HELP[i].syntax; *ch != '\0'; ch++)
- if (*ch == '\n')
- nl_count++;
- /* If we have an exact match, exit. Fixes \h SELECT */
- if (pg_strcasecmp(topic, QL_HELP[i].cmd) == 0)
- break;
- }
+ output = PageOutput(nl_count, pager);
+ break;
}
-
- output = PageOutput(nl_count, pager);
-
- for (i = 0; QL_HELP[i].cmd; i++)
+ len = wordlen;
+ }
+
+ /* Count newlines for pager */
+ for (i = 0; QL_HELP[i].cmd; i++)
+ {
+ if (pg_strncasecmp(topic, QL_HELP[i].cmd, len) == 0 ||
+ strcmp(topic, "*") == 0)
{
- if (pg_strncasecmp(topic, QL_HELP[i].cmd, len) == 0 ||
- strcmp(topic, "*") == 0)
- {
- help_found = true;
- fprintf(output, _("Command: %s\n"
- "Description: %s\n"
- "Syntax:\n%s\n\n"),
- QL_HELP[i].cmd,
- _(QL_HELP[i].help),
- _(QL_HELP[i].syntax));
- /* If we have an exact match, exit. Fixes \h SELECT */
- if (pg_strcasecmp(topic, QL_HELP[i].cmd) == 0)
- break;
- }
+ nl_count += 5;
+ for (ch = QL_HELP[i].syntax; *ch != '\0'; ch++)
+ if (*ch == '\n')
+ nl_count++;
+ /* If we have an exact match, exit. Fixes \h SELECT */
+ if (pg_strcasecmp(topic, QL_HELP[i].cmd) == 0)
+ break;
}
- if (help_found) /* Don't keep trying if we got a match */
+ }
+
+ output = PageOutput(nl_count, pager);
+
+ for (i = 0; QL_HELP[i].cmd; i++)
+ {
+ if (pg_strncasecmp(topic, QL_HELP[i].cmd, len) == 0 ||
+ strcmp(topic, "*") == 0)
+ {
+ help_found = true;
+ fprintf(output, _("Command: %s\n"
+ "Description: %s\n"
+ "Syntax:\n%s\n\n"),
+ QL_HELP[i].cmd,
+ _(QL_HELP[i].help),
+ _(QL_HELP[i].syntax));
+ /* If we have an exact match, exit. Fixes \h SELECT */
+ if (pg_strcasecmp(topic, QL_HELP[i].cmd) == 0)
break;
+ }
+ }
+ if (help_found) /* Don't keep trying if we got a match */
+ break;
}
if (!help_found)
"Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group\n\n"
"This software is based on Postgres95, formerly known as Postgres, which\n"
"contains the following notice:\n\n"
- "Portions Copyright(c) 1994, Regents of the University of California\n\n"
- "Permission to use, copy, modify, and distribute this software and its\n"
+ "Portions Copyright(c) 1994, Regents of the University of California\n\n"
+ "Permission to use, copy, modify, and distribute this software and its\n"
"documentation for any purpose, without fee, and without a written agreement\n"
"is hereby granted, provided that the above copyright notice and this paragraph\n"
"and the following two paragraphs appear in all copies.\n\n"
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/input.c,v 1.45 2005/06/10 15:40:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/input.c,v 1.46 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#ifdef USE_READLINE
static bool useReadline;
static bool useHistory;
-char *psql_history;
+char *psql_history;
enum histcontrol
HC = GetHistControlConfig();
if (((HC & hctl_ignorespace) && s[0] == ' ') ||
- ((HC & hctl_ignoredups) && prev_hist && strcmp(s, prev_hist) == 0))
+ ((HC & hctl_ignoredups) && prev_hist && strcmp(s, prev_hist) == 0))
{
/* Ignore this line as far as history is concerned */
}
psql_error("could not save history to file \"%s\": %s\n", fname, strerror(errno));
}
#else
- psql_error("history is not supported by this installation\n");
+ psql_error("history is not supported by this installation\n");
#endif
return false;
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/large_obj.c,v 1.39 2005/07/02 17:01:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/large_obj.c,v 1.40 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "large_obj.h"
status = lo_export(pset.db, atooid(loid_arg), filename_arg);
if (status != 1)
- { /* of course this status is documented
- * nowhere :( */
+ { /* of course this status is documented nowhere
+ * :( */
fputs(PQerrorMessage(pset.db), stderr);
return fail_lo_xact("\\lo_export", own_transaction);
}
snprintf(buf, sizeof(buf),
"SELECT loid as \"ID\",\n"
- " pg_catalog.obj_description(loid, 'pg_largeobject') as \"%s\"\n"
+ " pg_catalog.obj_description(loid, 'pg_largeobject') as \"%s\"\n"
"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) x\n"
"ORDER BY 1",
_("Description"));
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/mainloop.c,v 1.67 2005/02/22 04:40:55 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/mainloop.c,v 1.68 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "mainloop.h"
{
PsqlScanState scan_state; /* lexer working state */
PQExpBuffer query_buf; /* buffer for query being accumulated */
- PQExpBuffer previous_buf; /* if there isn't anything in the new
- * buffer yet, use this one for \e, etc. */
+ PQExpBuffer previous_buf; /* if there isn't anything in the new buffer
+ * yet, use this one for \e, etc. */
char *line; /* current line of input */
int added_nl_pos;
bool success;
}
/*
- * establish the control-C handler only after main_loop_jmp is
- * ready
+ * establish the control-C handler only after main_loop_jmp is ready
*/
pqsignal(SIGINT, handle_sigint); /* control-C => cancel */
-
-#else /* WIN32 */
+#else /* WIN32 */
setup_cancel_handler();
#endif
line = gets_fromFile(source);
/*
- * query_buf holds query already accumulated. line is the
- * malloc'd new line of input (note it must be freed before
- * looping around!)
+ * query_buf holds query already accumulated. line is the malloc'd
+ * new line of input (note it must be freed before looping around!)
*/
/* No more input. Time to quit, or \i done */
prompt_status = prompt_tmp;
/*
- * Send command if semicolon found, or if end of line and
- * we're in single-line mode.
+ * Send command if semicolon found, or if end of line and we're in
+ * single-line mode.
*/
if (scan_result == PSCAN_SEMICOLON ||
(scan_result == PSCAN_EOL &&
/* handle backslash command */
/*
- * If we added a newline to query_buf, and nothing else
- * has been inserted in query_buf by the lexer, then strip
- * off the newline again. This avoids any change to
- * query_buf when a line contains only a backslash
- * command.
+ * If we added a newline to query_buf, and nothing else has
+ * been inserted in query_buf by the lexer, then strip off the
+ * newline again. This avoids any change to query_buf when a
+ * line contains only a backslash command.
*/
if (query_buf->len == added_nl_pos)
query_buf->data[--query_buf->len] = '\0';
slashCmdStatus = HandleSlashCmds(scan_state,
query_buf->len > 0 ?
- query_buf : previous_buf);
+ query_buf : previous_buf);
success = slashCmdStatus != CMD_ERROR;
}
/*
- * Reset SIGINT handler because main_loop_jmp will be invalid as soon
- * as we exit this routine. If there is an outer MainLoop instance,
- * it will re-enable ^C catching as soon as it gets back to the top of
- * its loop and resets main_loop_jmp to point to itself.
+ * Reset SIGINT handler because main_loop_jmp will be invalid as soon as
+ * we exit this routine. If there is an outer MainLoop instance, it will
+ * re-enable ^C catching as soon as it gets back to the top of its loop
+ * and resets main_loop_jmp to point to itself.
*/
#ifndef WIN32
pqsignal(SIGINT, SIG_DFL);
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/mbprint.c,v 1.17 2005/09/24 17:53:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/mbprint.c,v 1.18 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
return 0;
/*
- * if we arrive here, ucs is not a combining or C0/C1 control
- * character
+ * if we arrive here, ucs is not a combining or C0/C1 control character
*/
return 1 +
{
/*
* Unicode 3.1 compliant validation : for each category, it checks the
- * combination of each byte to make sur it maps to a valid range. It
- * also returns -1 for the following UCS values: ucs > 0x10ffff ucs &
- * 0xfffe = 0xfffe 0xfdd0 < ucs < 0xfdef ucs & 0xdb00 = 0xd800
- * (surrogates)
+ * combination of each byte to make sur it maps to a valid range. It also
+ * returns -1 for the following UCS values: ucs > 0x10ffff ucs & 0xfffe =
+ * 0xfffe 0xfdd0 < ucs < 0xfdef ucs & 0xdb00 = 0xd800 (surrogates)
*/
if ((*c & 0x80) == 0)
return 1;
/* check 0xfffe/0xffff, 0xfdd0..0xfedf range, surrogates */
if (((z == 0x0f) &&
(((yx & 0xffe) == 0xffe) ||
- (((yx & 0xf80) == 0xd80) && (lx >= 0x30) && (lx <= 0x4f)))) ||
+ (((yx & 0xf80) == 0xd80) && (lx >= 0x30) && (lx <= 0x4f)))) ||
((z == 0x0d) && ((yx & 0xb00) == 0x800)))
return -1;
return 3;
else
{
/*
- * obviously, other encodings may want to fix this, but I don't
- * know them myself, unfortunately.
+ * obviously, other encodings may want to fix this, but I don't know
+ * them myself, unfortunately.
*/
return len;
}
else
{
/*
- * other encodings needing validation should add their own
- * routines here
+ * other encodings needing validation should add their own routines
+ * here
*/
}
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.77 2005/10/04 19:01:18 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.78 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
static int
integer_digits(const char *my_str)
{
- int frac_len;
+ int frac_len;
if (my_str[0] == '-')
my_str++;
-
+
frac_len = strchr(my_str, '.') ? strlen(strchr(my_str, '.')) : 0;
return strlen(my_str) - frac_len;
static int
additional_numeric_locale_len(const char *my_str)
{
- int int_len = integer_digits(my_str), len = 0;
- int groupdigits = atoi(grouping);
+ int int_len = integer_digits(my_str),
+ len = 0;
+ int groupdigits = atoi(grouping);
if (int_len > 0)
/* Don't count a leading separator */
len = (int_len / groupdigits - (int_len % groupdigits == 0)) *
- strlen(thousands_sep);
+ strlen(thousands_sep);
if (strchr(my_str, '.') != NULL)
len += strlen(decimal_point) - strlen(".");
-
+
return len;
}
static char *
format_numeric_locale(const char *my_str)
{
- int i, j, int_len = integer_digits(my_str), leading_digits;
- int groupdigits = atoi(grouping);
- int new_str_start = 0;
- char *new_str = new_str = pg_local_malloc(
- strlen_with_numeric_locale(my_str) + 1);
+ int i,
+ j,
+ int_len = integer_digits(my_str),
+ leading_digits;
+ int groupdigits = atoi(grouping);
+ int new_str_start = 0;
+ char *new_str = new_str = pg_local_malloc(
+ strlen_with_numeric_locale(my_str) + 1);
leading_digits = (int_len % groupdigits != 0) ?
- int_len % groupdigits : groupdigits;
+ int_len % groupdigits : groupdigits;
- if (my_str[0] == '-') /* skip over sign, affects grouping calculations */
+ if (my_str[0] == '-') /* skip over sign, affects grouping
+ * calculations */
{
new_str[0] = my_str[0];
my_str++;
new_str_start = 1;
}
- for (i=0, j=new_str_start; ; i++, j++)
+ for (i = 0, j = new_str_start;; i++, j++)
{
/* Hit decimal point? */
if (my_str[i] == '.')
new_str[j] = '\0';
break;
}
-
+
/* Add separator? */
if (i != 0 && (i - leading_digits) % groupdigits == 0)
{
new_str[j] = my_str[i];
}
-
+
return new_str;
}
static void
-print_unaligned_text(const char *title, const char *const *headers,
- const char *const *cells, const char *const *footers,
+print_unaligned_text(const char *title, const char *const * headers,
+ const char *const * cells, const char *const * footers,
const char *opt_align, const char *opt_fieldsep,
const char *opt_recordsep, bool opt_tuples_only,
bool opt_numeric_locale, FILE *fout)
{
unsigned int col_count = 0;
unsigned int i;
- const char *const *ptr;
+ const char *const * ptr;
bool need_recordsep = false;
if (!opt_fieldsep)
}
if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
{
- char *my_cell = format_numeric_locale(*ptr);
+ char *my_cell = format_numeric_locale(*ptr);
fputs(my_cell, fout);
free(my_cell);
}
else
fputs(*ptr, fout);
-
+
if ((i + 1) % col_count)
fputs(opt_fieldsep, fout);
else
static void
-print_unaligned_vertical(const char *title, const char *const *headers,
- const char *const *cells,
- const char *const *footers, const char *opt_align,
+print_unaligned_vertical(const char *title, const char *const * headers,
+ const char *const * cells,
+ const char *const * footers, const char *opt_align,
const char *opt_fieldsep, const char *opt_recordsep,
- bool opt_tuples_only, bool opt_numeric_locale, FILE *fout)
+ bool opt_tuples_only, bool opt_numeric_locale, FILE *fout)
{
unsigned int col_count = 0;
unsigned int i;
- const char *const *ptr;
+ const char *const * ptr;
if (!opt_fieldsep)
opt_fieldsep = "";
fputs(opt_fieldsep, fout);
if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
{
- char *my_cell = format_numeric_locale(*ptr);
+ char *my_cell = format_numeric_locale(*ptr);
fputs(my_cell, fout);
free(my_cell);
static void
-print_aligned_text(const char *title, const char *const *headers,
- const char *const *cells, const char *const *footers,
- const char *opt_align, bool opt_tuples_only, bool opt_numeric_locale,
+print_aligned_text(const char *title, const char *const * headers,
+ const char *const * cells, const char *const * footers,
+ const char *opt_align, bool opt_tuples_only, bool opt_numeric_locale,
unsigned short int opt_border, int encoding,
FILE *fout)
{
tmp;
unsigned int *widths,
total_w;
- const char *const *ptr;
+ const char *const * ptr;
/* count columns */
for (ptr = headers; *ptr; ptr++)
for (i = 0, ptr = cells; *ptr; ptr++, i++)
{
- int add_numeric_locale_len;
+ int add_numeric_locale_len;
if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
- add_numeric_locale_len = additional_numeric_locale_len(*ptr);
- else
- add_numeric_locale_len = 0;
-
+ add_numeric_locale_len = additional_numeric_locale_len(*ptr);
+ else
+ add_numeric_locale_len = 0;
+
tmp = pg_wcswidth(*ptr, strlen(*ptr), encoding) + add_numeric_locale_len;
if (tmp > widths[i % col_count])
widths[i % col_count] = tmp;
/* content */
if (opt_align[i % col_count] == 'r')
{
- if (opt_numeric_locale)
- {
- char *my_cell = format_numeric_locale(*ptr);
+ if (opt_numeric_locale)
+ {
+ char *my_cell = format_numeric_locale(*ptr);
fprintf(fout, "%*s%s", widths[i % col_count] - cell_w[i], "", my_cell);
free(my_cell);
#ifndef __MINGW32__
/*
- * for some reason MinGW outputs an extra newline, so this supresses
- * it
+ * for some reason MinGW outputs an extra newline, so this supresses it
*/
fputc('\n', fout);
#endif
static void
-print_aligned_vertical(const char *title, const char *const *headers,
- const char *const *cells, const char *const *footers,
+print_aligned_vertical(const char *title, const char *const * headers,
+ const char *const * cells, const char *const * footers,
const char *opt_align, bool opt_tuples_only,
bool opt_numeric_locale, unsigned short int opt_border,
int encoding, FILE *fout)
{
unsigned int col_count = 0;
unsigned int record = 1;
- const char *const *ptr;
+ const char *const * ptr;
unsigned int i,
tmp = 0,
hwidth = 0,
/* find longest data cell */
for (i = 0, ptr = cells; *ptr; ptr++, i++)
{
- int add_numeric_locale_len;
+ int add_numeric_locale_len;
if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
add_numeric_locale_len = additional_numeric_locale_len(*ptr);
- else
+ else
add_numeric_locale_len = 0;
tmp = pg_wcswidth(*ptr, strlen(*ptr), encoding) + add_numeric_locale_len;
if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
{
- char *my_cell = format_numeric_locale(*ptr);
-
+ char *my_cell = format_numeric_locale(*ptr);
+
if (opt_border < 2)
fprintf(fout, "%s\n", my_cell);
else
html_escaped_print(const char *in, FILE *fout)
{
const char *p;
- bool leading_space = true;
-
+ bool leading_space = true;
+
for (p = in; *p; p++)
{
switch (*p)
static void
-print_html_text(const char *title, const char *const *headers,
- const char *const *cells, const char *const *footers,
+print_html_text(const char *title, const char *const * headers,
+ const char *const * cells, const char *const * footers,
const char *opt_align, bool opt_tuples_only,
bool opt_numeric_locale, unsigned short int opt_border,
const char *opt_table_attr, FILE *fout)
{
unsigned int col_count = 0;
unsigned int i;
- const char *const *ptr;
+ const char *const * ptr;
fprintf(fout, "
if (opt_table_attr)
fprintf(fout, "
", opt_align[(i) % col_count] == 'r' ? "right" : "left");
/* is string only whitespace? */
- if ((*ptr)[strspn(*ptr, " \t")] == '\0')
+ if ((*ptr)[strspn(*ptr, " \t")] == '\0')
fputs(" ", fout);
else if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
{
- char *my_cell = format_numeric_locale(*ptr);
+ char *my_cell = format_numeric_locale(*ptr);
- html_escaped_print(my_cell, fout);
- free(my_cell);
+ html_escaped_print(my_cell, fout);
+ free(my_cell);
}
else
html_escaped_print(*ptr, fout);
static void
-print_html_vertical(const char *title, const char *const *headers,
- const char *const *cells, const char *const *footers,
- const char *opt_align, bool opt_tuples_only,
- bool opt_numeric_locale, unsigned short int opt_border,
- const char *opt_table_attr, FILE *fout)
+print_html_vertical(const char *title, const char *const * headers,
+ const char *const * cells, const char *const * footers,
+ const char *opt_align, bool opt_tuples_only,
+ bool opt_numeric_locale, unsigned short int opt_border,
+ const char *opt_table_attr, FILE *fout)
{
unsigned int col_count = 0;
unsigned int i;
unsigned int record = 1;
- const char *const *ptr;
+ const char *const * ptr;
fprintf(fout, "
if (opt_table_attr)
fprintf(fout, " ", opt_align[i % col_count] == 'r' ? "right" : "left");
/* is string only whitespace? */
- if ((*ptr)[strspn(*ptr, " \t")] == '\0')
+ if ((*ptr)[strspn(*ptr, " \t")] == '\0')
fputs(" ", fout);
else if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
{
- char *my_cell = format_numeric_locale(*ptr);
+ char *my_cell = format_numeric_locale(*ptr);
- html_escaped_print(my_cell, fout);
- free(my_cell);
+ html_escaped_print(my_cell, fout);
+ free(my_cell);
}
else
html_escaped_print(*ptr, fout);
static void
-print_latex_text(const char *title, const char *const *headers,
- const char *const *cells, const char *const *footers,
+print_latex_text(const char *title, const char *const * headers,
+ const char *const * cells, const char *const * footers,
const char *opt_align, bool opt_tuples_only,
bool opt_numeric_locale, unsigned short int opt_border,
FILE *fout)
{
unsigned int col_count = 0;
unsigned int i;
- const char *const *ptr;
+ const char *const * ptr;
/* print title */
{
if (opt_numeric_locale)
{
- char *my_cell = format_numeric_locale(*ptr);
+ char *my_cell = format_numeric_locale(*ptr);
latex_escaped_print(my_cell, fout);
free(my_cell);
static void
-print_latex_vertical(const char *title, const char *const *headers,
- const char *const *cells, const char *const *footers,
- const char *opt_align, bool opt_tuples_only,
- bool opt_numeric_locale, unsigned short int opt_border,
- FILE *fout)
+print_latex_vertical(const char *title, const char *const * headers,
+ const char *const * cells, const char *const * footers,
+ const char *opt_align, bool opt_tuples_only,
+ bool opt_numeric_locale, unsigned short int opt_border,
+ FILE *fout)
{
unsigned int col_count = 0;
unsigned int i;
- const char *const *ptr;
+ const char *const * ptr;
unsigned int record = 1;
(void) opt_align; /* currently unused parameter */
{
if (opt_numeric_locale)
{
- char *my_cell = format_numeric_locale(*ptr);
+ char *my_cell = format_numeric_locale(*ptr);
latex_escaped_print(my_cell, fout);
free(my_cell);
static void
-print_troff_ms_text(const char *title, const char *const *headers,
- const char *const *cells, const char *const *footers,
- const char *opt_align, bool opt_tuples_only,
- bool opt_numeric_locale, unsigned short int opt_border,
- FILE *fout)
+print_troff_ms_text(const char *title, const char *const * headers,
+ const char *const * cells, const char *const * footers,
+ const char *opt_align, bool opt_tuples_only,
+ bool opt_numeric_locale, unsigned short int opt_border,
+ FILE *fout)
{
unsigned int col_count = 0;
unsigned int i;
- const char *const *ptr;
+ const char *const * ptr;
/* print title */
{
if (opt_numeric_locale)
{
- char *my_cell = format_numeric_locale(*ptr);
+ char *my_cell = format_numeric_locale(*ptr);
troff_ms_escaped_print(my_cell, fout);
free(my_cell);
static void
-print_troff_ms_vertical(const char *title, const char *const *headers,
- const char *const *cells, const char *const *footers,
- const char *opt_align, bool opt_tuples_only,
- bool opt_numeric_locale, unsigned short int opt_border,
- FILE *fout)
+print_troff_ms_vertical(const char *title, const char *const * headers,
+ const char *const * cells, const char *const * footers,
+ const char *opt_align, bool opt_tuples_only,
+ bool opt_numeric_locale, unsigned short int opt_border,
+ FILE *fout)
{
unsigned int col_count = 0;
unsigned int i;
- const char *const *ptr;
+ const char *const * ptr;
unsigned int record = 1;
- unsigned short current_format = 0; /* 0=none, 1=header, 2=body */
+ unsigned short current_format = 0; /* 0=none, 1=header, 2=body */
(void) opt_align; /* currently unused parameter */
fputs("center;\n", fout);
/* basic format */
- if (opt_tuples_only)
- fputs("c l;\n", fout);
+ if (opt_tuples_only)
+ fputs("c l;\n", fout);
/* count columns */
for (ptr = headers; *ptr; ptr++)
fputc('\t', fout);
if (opt_numeric_locale)
{
- char *my_cell = format_numeric_locale(*ptr);
+ char *my_cell = format_numeric_locale(*ptr);
troff_ms_escaped_print(my_cell, fout);
free(my_cell);
void
printTable(const char *title,
- const char *const *headers,
- const char *const *cells,
- const char *const *footers,
+ const char *const * headers,
+ const char *const * cells,
+ const char *const * footers,
const char *align,
const printTableOpt *opt, FILE *fout, FILE *flog)
{
const char *default_footer[] = {NULL};
unsigned short int border = opt->border;
FILE *output;
- bool use_expanded;
+ bool use_expanded;
if (opt->format == PRINT_NOTHING)
return;
border = 2;
/*
- * We only want to display the results in "expanded" format if
- * this is a normal (user-submitted) query, not a table we're
- * printing for a slash command.
+ * We only want to display the results in "expanded" format if this is a
+ * normal (user-submitted) query, not a table we're printing for a slash
+ * command.
*/
if (opt->expanded && opt->normal_query)
use_expanded = true;
int col_count = 0,
row_count = 0,
lines;
- const char *const *ptr;
+ const char *const * ptr;
/* rough estimate of columns and rows */
if (headers)
if (use_expanded)
print_unaligned_vertical(title, headers, cells, footers, align,
opt->fieldSep, opt->recordSep,
- opt->tuples_only, opt->numericLocale, output);
+ opt->tuples_only, opt->numericLocale, output);
else
print_unaligned_text(title, headers, cells, footers, align,
opt->fieldSep, opt->recordSep,
- opt->tuples_only, opt->numericLocale, output);
+ opt->tuples_only, opt->numericLocale, output);
break;
case PRINT_ALIGNED:
if (use_expanded)
print_aligned_vertical(title, headers, cells, footers, align,
- opt->tuples_only, opt->numericLocale, border,
+ opt->tuples_only, opt->numericLocale, border,
opt->encoding, output);
else
print_aligned_text(title, headers, cells, footers, align,
border, opt->tableAttr, output);
else
print_html_text(title, headers, cells, footers,
- align, opt->tuples_only, opt->numericLocale, border,
+ align, opt->tuples_only, opt->numericLocale, border,
opt->tableAttr, output);
break;
case PRINT_LATEX:
if (*extlconv->grouping && atoi(extlconv->grouping) > 0)
grouping = strdup(extlconv->grouping);
else
- grouping = "3"; /* most common */
+ grouping = "3"; /* most common */
if (*extlconv->thousands_sep)
thousands_sep = strdup(extlconv->thousands_sep);
else if (*decimal_point != ',')
else
thousands_sep = ".";
}
-
-
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/print.h,v 1.28 2005/07/18 20:57:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/print.h,v 1.29 2005/10/15 02:49:40 momjian Exp $
*/
#ifndef PRINT_H
#define PRINT_H
typedef struct _printTableOpt
{
enum printFormat format; /* one of the above */
- bool expanded; /* expanded/vertical output (if supported
- * by output format) */
+ bool expanded; /* expanded/vertical output (if supported by
+ * output format) */
unsigned short int pager; /* use pager for output (if to stdout and
* stdout is a tty) 0=off 1=on 2=always */
bool tuples_only; /* don't output headers, row counts, etc. */
- unsigned short int border; /* Print a border around the table.
- * 0=none, 1=dividing lines, 2=full */
+ unsigned short int border; /* Print a border around the table. 0=none,
+ * 1=dividing lines, 2=full */
char *fieldSep; /* field separator for unaligned text mode */
- char *recordSep; /* record separator for unaligned text
- * mode */
+ char *recordSep; /* record separator for unaligned text mode */
bool numericLocale; /* locale-aware numeric units separator and
- * decimal marker */
+ * decimal marker */
char *tableAttr; /* attributes for HTML */
int encoding; /* character encoding */
- bool normal_query; /* are we presenting the results of a
- * "normal" query, or a slash
- * command? */
+ bool normal_query; /* are we presenting the results of a "normal"
+ * query, or a slash command? */
} printTableOpt;
char *nullPrint; /* how to print null entities */
bool quote; /* quote all values as much as possible */
char *title; /* override title */
- char **footers; /* override footer (default is "(xx
- * rows)") */
+ char **footers; /* override footer (default is "(xx rows)") */
bool default_footer; /* print default footer if footers==NULL */
} printQueryOpt;
*
* It calls the printTable above with all the things set straight.
*/
-void printQuery(const PGresult *result, const printQueryOpt *opt,
- FILE *fout, FILE *flog);
+void printQuery(const PGresult *result, const printQueryOpt *opt,
+ FILE *fout, FILE *flog);
-void setDecimalLocale(void);
+void setDecimalLocale(void);
#ifndef __CYGWIN__
#define DEFAULT_PAGER "more"
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/prompt.c,v 1.39 2005/05/30 18:28:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/prompt.c,v 1.40 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "prompt.h"
case '5':
case '6':
case '7':
- *buf = (char) strtol(p, (char **)&p, 8);
+ *buf = (char) strtol(p, (char **) &p, 8);
--p;
break;
case 'R':
/*
* readline >=4.0 undocumented feature: non-printing
- * characters in prompt strings must be marked as
- * such, in order to properly display the line during
- * editing.
+ * characters in prompt strings must be marked as such, in
+ * order to properly display the line during editing.
*/
buf[0] = '\001';
buf[1] = (*p == '[') ? RL_PROMPT_START_IGNORE : RL_PROMPT_END_IGNORE;
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/settings.h,v 1.25 2005/06/14 02:57:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/settings.h,v 1.26 2005/10/15 02:49:40 momjian Exp $
*/
#ifndef SETTINGS_H
#define SETTINGS_H
char *gfname; /* one-shot file output argument for \g */
- bool notty; /* stdin or stdout is not a tty (as
- * determined on startup) */
- bool getPassword; /* prompt the user for a username and
- * password */
+ bool notty; /* stdin or stdout is not a tty (as determined
+ * on startup) */
+ bool getPassword; /* prompt the user for a username and password */
FILE *cur_cmd_source; /* describe the status of the current main
* loop */
bool cur_cmd_interactive;
bool timing; /* enable timing of all queries */
PGVerbosity verbosity; /* current error verbosity level */
- FILE *logfile; /* session log file handle */
+ FILE *logfile; /* session log file handle */
} PsqlSettings;
extern PsqlSettings pset;
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.124 2005/10/04 19:01:18 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.125 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
#include "getopt_long.h"
#ifndef HAVE_INT_OPTRESET
-int optreset;
+int optreset;
#endif
#include
char *username = NULL;
char *password = NULL;
- char *password_prompt = NULL;
+ char *password_prompt = NULL;
bool need_pass;
set_pglocale_pgservice(argv[0], "psql");
if (options.username)
{
/*
- * The \001 is a hack to support the deprecated -u option which
- * issues a username prompt. The recommended option is -U followed
- * by the name on the command line.
+ * The \001 is a hack to support the deprecated -u option which issues
+ * a username prompt. The recommended option is -U followed by the
+ * name on the command line.
*/
if (strcmp(options.username, "\001") == 0)
username = simple_prompt("User name: ", 100, true);
{
need_pass = false;
pset.db = PQsetdbLogin(options.host, options.port, NULL, NULL,
- options.action == ACT_LIST_DB ? "postgres" : options.dbname,
+ options.action == ACT_LIST_DB ? "postgres" : options.dbname,
username, password);
if (PQstatus(pset.db) == CONNECTION_BAD &&
if (!QUIET() && !pset.notty)
{
- int client_ver = parse_version(PG_VERSION);
+ int client_ver = parse_version(PG_VERSION);
if (pset.sversion != client_ver)
{
const char *server_version;
- char server_ver_str[16];
+ char server_ver_str[16];
/* Try to get full text form, might include "devel" etc */
server_version = PQparameterStatus(pset.db, "server_version");
pset.progname, PG_VERSION);
printf(_("Type: \\copyright for distribution terms\n"
- " \\h for help with SQL commands\n"
- " \\? for help with psql commands\n"
- " \\g or terminate with semicolon to execute query\n"
- " \\q to quit\n\n"));
+ " \\h for help with SQL commands\n"
+ " \\? for help with psql commands\n"
+ " \\g or terminate with semicolon to execute query\n"
+ " \\q to quit\n\n"));
if (pset.sversion / 100 != client_ver / 100)
printf(_("WARNING: You are connected to a server with major version %d.%d,\n"
"but your %s client is major version %d.%d. Some backslash commands,\n"
"such as \\d, might not work properly.\n\n"),
- pset.sversion / 10000, (pset.sversion / 100) % 100,
- pset.progname,
- client_ver / 10000, (client_ver / 100) % 100);
+ pset.sversion / 10000, (pset.sversion / 100) % 100,
+ pset.progname,
+ client_ver / 10000, (client_ver / 100) % 100);
#ifdef USE_SSL
printSSLInfo();
break;
case 'u':
pset.getPassword = true;
- options->username = "\001"; /* hopefully nobody has
- * that username */
+ options->username = "\001"; /* hopefully nobody has that
+ * username */
/* this option is out */
used_old_u_option = true;
break;
}
/*
- * if we still have arguments, use it as the database name and
- * username
+ * if we still have arguments, use it as the database name and username
*/
while (argc - optind >= 1)
{
sprintf(psqlrc, "%s-%s", filename, PG_VERSION);
if (access(psqlrc, R_OK) == 0)
- (void)process_file(psqlrc);
+ (void) process_file(psqlrc);
else if (access(filename, R_OK) == 0)
- (void)process_file(filename);
+ (void) process_file(filename);
free(psqlrc);
}
concp = GetConsoleCP();
if (wincp != concp)
{
- printf(_("Warning: Console code page (%u) differs from Windows code page (%u)\n"
- " 8-bit characters may not work correctly. See psql reference\n"
- " page \"Notes for Windows users\" for details.\n\n"),
+ printf(_("Warning: Console code page (%u) differs from Windows code page (%u)\n"
+ " 8-bit characters may not work correctly. See psql reference\n"
+ " page \"Notes for Windows users\" for details.\n\n"),
concp, wincp);
}
}
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/stringutils.c,v 1.40 2005/01/01 05:43:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/stringutils.c,v 1.41 2005/10/15 02:49:40 momjian Exp $
*/
#include "postgres_fe.h"
bool del_quotes,
int encoding)
{
- static char *storage = NULL;/* store the local copy of the users
- * string here */
- static char *string = NULL; /* pointer into storage where to continue
- * on next call */
+ static char *storage = NULL;/* store the local copy of the users string
+ * here */
+ static char *string = NULL; /* pointer into storage where to continue on
+ * next call */
/* variously abused variables: */
unsigned int offset;
/*
* We may need extra space to insert delimiter nulls for adjacent
- * tokens. 2X the space is a gross overestimate, but it's
- * unlikely that this code will be used on huge strings anyway.
+ * tokens. 2X the space is a gross overestimate, but it's unlikely
+ * that this code will be used on huge strings anyway.
*/
storage = pg_malloc(2 * strlen(s) + 1);
strcpy(storage, s);
if (delim && strchr(delim, *start))
{
/*
- * If not at end of string, we need to insert a null to terminate
- * the returned token. We can just overwrite the next character
- * if it happens to be in the whitespace set ... otherwise move
- * over the rest of the string to make room. (This is why we
- * allocated extra space above).
+ * If not at end of string, we need to insert a null to terminate the
+ * returned token. We can just overwrite the next character if it
+ * happens to be in the whitespace set ... otherwise move over the
+ * rest of the string to make room. (This is why we allocated extra
+ * space above).
*/
p = start + 1;
if (*p != '\0')
}
/*
- * If not at end of string, we need to insert a null to terminate
- * the returned token. See notes above.
+ * If not at end of string, we need to insert a null to terminate the
+ * returned token. See notes above.
*/
if (*p != '\0')
{
}
/*
- * Otherwise no quoting character. Scan till next whitespace,
- * delimiter or quote. NB: at this point, *start is known not to be
- * '\0', whitespace, delim, or quote, so we will consume at least one
- * character.
+ * Otherwise no quoting character. Scan till next whitespace, delimiter
+ * or quote. NB: at this point, *start is known not to be '\0',
+ * whitespace, delim, or quote, so we will consume at least one character.
*/
offset = strcspn(start, whitespace);
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.137 2005/08/14 18:49:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.138 2005/10/15 02:49:40 momjian Exp $
*/
/*----------------------------------------------------------------------
const char *catname;
/*
- * Selection condition --- only rows meeting this condition are
- * candidates to display. If catname mentions multiple tables,
- * include the necessary join condition here. For example, "c.relkind
- * = 'r'". Write NULL (not an empty string) if not needed.
+ * Selection condition --- only rows meeting this condition are candidates
+ * to display. If catname mentions multiple tables, include the necessary
+ * join condition here. For example, "c.relkind = 'r'". Write NULL (not
+ * an empty string) if not needed.
*/
const char *selcondition;
/*
* Visibility condition --- which rows are visible without schema
- * qualification? For example,
- * "pg_catalog.pg_table_is_visible(c.oid)".
+ * qualification? For example, "pg_catalog.pg_table_is_visible(c.oid)".
*/
const char *viscondition;
/*
- * Namespace --- name of field to join to pg_namespace.oid. For
- * example, "c.relnamespace".
+ * Namespace --- name of field to join to pg_namespace.oid. For example,
+ * "c.relnamespace".
*/
const char *namespace;
/*
- * Result --- the appropriately-quoted name to return, in the case of
- * an unqualified name. For example,
- * "pg_catalog.quote_ident(c.relname)".
+ * Result --- the appropriately-quoted name to return, in the case of an
+ * unqualified name. For example, "pg_catalog.quote_ident(c.relname)".
*/
const char *result;
* the completion callback functions. Ugly but there is no better way.
*/
static const char *completion_charp; /* to pass a string */
-static const char *const * completion_charpp; /* to pass a list of
- * strings */
+static const char *const * completion_charpp; /* to pass a list of strings */
static const char *completion_info_charp; /* to pass a second string */
static const SchemaQuery *completion_squery; /* to pass a SchemaQuery */
static const pgsql_thing_t words_after_create[] = {
{"AGGREGATE", NULL, &Query_for_list_of_aggregates},
- {"CAST", NULL, NULL}, /* Casts have complex structures for
- * names, so skip it */
- /* CREATE CONSTRAINT TRIGGER is not supported here because it is designed to be used only by pg_dump. */
+ {"CAST", NULL, NULL}, /* Casts have complex structures for names, so
+ * skip it */
+
+ /*
+ * CREATE CONSTRAINT TRIGGER is not supported here because it is designed
+ * to be used only by pg_dump.
+ */
{"CONVERSION", "SELECT pg_catalog.quote_ident(conname) FROM pg_catalog.pg_conversion WHERE substring(pg_catalog.quote_ident(conname),1,%d)='%s'"},
{"DATABASE", Query_for_list_of_databases},
{"DOMAIN", NULL, &Query_for_list_of_domains},
{"GROUP", Query_for_list_of_roles},
{"LANGUAGE", Query_for_list_of_languages},
{"INDEX", NULL, &Query_for_list_of_indexes},
- {"OPERATOR", NULL, NULL}, /* Querying for this is probably not such
- * a good idea. */
+ {"OPERATOR", NULL, NULL}, /* Querying for this is probably not such a
+ * good idea. */
{"ROLE", Query_for_list_of_roles},
{"RULE", "SELECT pg_catalog.quote_ident(rulename) FROM pg_catalog.pg_rules WHERE substring(pg_catalog.quote_ident(rulename),1,%d)='%s'"},
{"SCHEMA", Query_for_list_of_schemas},
completion_max_records = 1000;
/*
- * There is a variable rl_completion_query_items for this but
- * apparently it's not defined everywhere.
+ * There is a variable rl_completion_query_items for this but apparently
+ * it's not defined everywhere.
*/
}
/*
* Scan the input line before our current position for the last four
- * words. According to those we'll make some smart decisions on what
- * the user is probably intending to type. TODO: Use strtokx() to do
- * this.
+ * words. According to those we'll make some smart decisions on what the
+ * user is probably intending to type. TODO: Use strtokx() to do this.
*/
prev_wd = previous_word(start, 0);
prev2_wd = previous_word(start, 1);
/* ALTER */
/*
- * complete with what you can alter (TABLE, GROUP, USER, ...) unless
- * we're in ALTER TABLE sth ALTER
+ * complete with what you can alter (TABLE, GROUP, USER, ...) unless we're
+ * in ALTER TABLE sth ALTER
*/
else if (pg_strcasecmp(prev_wd, "ALTER") == 0 &&
pg_strcasecmp(prev3_wd, "TABLE") != 0)
}
/* ALTER AGGREGATE,FUNCTION */
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- (pg_strcasecmp(prev2_wd, "AGGREGATE") == 0 ||
+ (pg_strcasecmp(prev2_wd, "AGGREGATE") == 0 ||
pg_strcasecmp(prev2_wd, "FUNCTION") == 0))
{
static const char *const list_ALTERAGG[] =
- {"OWNER TO", "RENAME TO","SET SCHEMA", NULL};
+ {"OWNER TO", "RENAME TO", "SET SCHEMA", NULL};
- COMPLETE_WITH_LIST(list_ALTERAGG);
+ COMPLETE_WITH_LIST(list_ALTERAGG);
}
/* ALTER CONVERSION,SCHEMA */
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
- (pg_strcasecmp(prev2_wd, "CONVERSION") == 0 ||
+ (pg_strcasecmp(prev2_wd, "CONVERSION") == 0 ||
pg_strcasecmp(prev2_wd, "SCHEMA") == 0))
{
static const char *const list_ALTERGEN[] =
/* ALTER USER,ROLE */
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
(pg_strcasecmp(prev2_wd, "USER") == 0 ||
- pg_strcasecmp(prev2_wd, "ROLE") == 0))
+ pg_strcasecmp(prev2_wd, "ROLE") == 0))
{
static const char *const list_ALTERUSER[] =
{"ENCRYPTED", "UNENCRYPTED", "CREATEDB", "NOCREATEDB", "CREATEUSER",
- "NOCREATEUSER","CREATEROLE","NOCREATEROLE","INHERIT","NOINHERIT",
- "LOGIN","NOLOGIN","CONNECTION LIMIT", "VALID UNTIL", "RENAME TO",
- "SUPERUSER","NOSUPERUSER", "SET", "RESET", NULL};
+ "NOCREATEUSER", "CREATEROLE", "NOCREATEROLE", "INHERIT", "NOINHERIT",
+ "LOGIN", "NOLOGIN", "CONNECTION LIMIT", "VALID UNTIL", "RENAME TO",
+ "SUPERUSER", "NOSUPERUSER", "SET", "RESET", NULL};
COMPLETE_WITH_LIST(list_ALTERUSER);
}
/* complete ALTER USER,ROLE ENCRYPTED,UNENCRYPTED with PASSWORD */
else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
- (pg_strcasecmp(prev3_wd, "ROLE") == 0 || pg_strcasecmp(prev3_wd, "USER") == 0) &&
- (pg_strcasecmp(prev_wd, "ENCRYPTED") == 0 || pg_strcasecmp(prev_wd, "UNENCRYPTED") == 0))
+ (pg_strcasecmp(prev3_wd, "ROLE") == 0 || pg_strcasecmp(prev3_wd, "USER") == 0) &&
+ (pg_strcasecmp(prev_wd, "ENCRYPTED") == 0 || pg_strcasecmp(prev_wd, "UNENCRYPTED") == 0))
{
- COMPLETE_WITH_CONST("PASSWORD");
+ COMPLETE_WITH_CONST("PASSWORD");
}
/* ALTER DOMAIN */
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
pg_strcasecmp(prev2_wd, "SEQUENCE") == 0)
{
- static const char *const list_ALTERSEQUENCE[] =
- {"INCREMENT", "MINVALUE", "MAXVALUE", "RESTART", "NO", "CACHE", "CYCLE",
- "SET SCHEMA", NULL};
+ static const char *const list_ALTERSEQUENCE[] =
+ {"INCREMENT", "MINVALUE", "MAXVALUE", "RESTART", "NO", "CACHE", "CYCLE",
+ "SET SCHEMA", NULL};
- COMPLETE_WITH_LIST(list_ALTERSEQUENCE);
+ COMPLETE_WITH_LIST(list_ALTERSEQUENCE);
}
/* ALTER SEQUENCE NO */
else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
pg_strcasecmp(prev3_wd, "SEQUENCE") == 0 &&
pg_strcasecmp(prev_wd, "NO") == 0)
{
- static const char *const list_ALTERSEQUENCE2[] =
- {"MINVALUE", "MAXVALUE", "CYCLE", NULL};
+ static const char *const list_ALTERSEQUENCE2[] =
+ {"MINVALUE", "MAXVALUE", "CYCLE", NULL};
- COMPLETE_WITH_LIST(list_ALTERSEQUENCE2);
+ COMPLETE_WITH_LIST(list_ALTERSEQUENCE2);
}
/* ALTER TRIGGER , add ON */
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
pg_strcasecmp(prev2_wd, "ALTER") == 0))
{
/* DROP ... does not work well yet */
- static const char *const list_COLUMNALTER[] =
+ static const char *const list_COLUMNALTER[] =
{"TYPE", "SET DEFAULT", "DROP DEFAULT", "SET NOT NULL",
- "DROP NOT NULL", "SET STATISTICS", "SET STORAGE", NULL};
+ "DROP NOT NULL", "SET STATISTICS", "SET STORAGE", NULL};
COMPLETE_WITH_LIST(list_COLUMNALTER);
}
pg_strcasecmp(prev_wd, "SET") == 0)
{
static const char *const list_TABLESET[] =
- {"WITHOUT", "TABLESPACE","SCHEMA", NULL};
+ {"WITHOUT", "TABLESPACE", "SCHEMA", NULL};
COMPLETE_WITH_LIST(list_TABLESET);
}
{
static const char *const list_ALTERTYPE[] =
{"OWNER TO", "SET SCHEMA", NULL};
+
COMPLETE_WITH_LIST(list_ALTERTYPE);
}
/* complete ALTER GROUP */
{"WORK", "TRANSACTION", NULL};
COMPLETE_WITH_LIST(list_TRANS);
- }
+ }
/* COMMIT */
- else if(pg_strcasecmp(prev_wd, "COMMIT") == 0)
+ else if (pg_strcasecmp(prev_wd, "COMMIT") == 0)
{
static const char *const list_COMMIT[] =
{"WORK", "TRANSACTION", "PREPARED", NULL};
COMPLETE_WITH_CONST("ON");
/*
- * If we have CLUSTER ON, then add the correct tablename as
- * well.
+ * If we have CLUSTER ON, then add the correct tablename as well.
*/
else if (pg_strcasecmp(prev3_wd, "CLUSTER") == 0 &&
pg_strcasecmp(prev_wd, "ON") == 0)
static const char *const list_COMMENT[] =
{"CAST", "CONVERSION", "DATABASE", "INDEX", "LANGUAGE", "RULE", "SCHEMA",
"SEQUENCE", "TABLE", "TYPE", "VIEW", "COLUMN", "AGGREGATE", "FUNCTION",
- "OPERATOR", "TRIGGER", "CONSTRAINT", "DOMAIN","LARGE OBJECT", NULL};
+ "OPERATOR", "TRIGGER", "CONSTRAINT", "DOMAIN", "LARGE OBJECT", NULL};
COMPLETE_WITH_LIST(list_COMMENT);
}
else if (pg_strcasecmp(prev2_wd, "COPY") == 0 ||
pg_strcasecmp(prev2_wd, "\\copy") == 0 ||
pg_strcasecmp(prev2_wd, "BINARY") == 0)
- {
- static const char *const list_FROMTO[] =
- {"FROM", "TO", NULL};
+ {
+ static const char *const list_FROMTO[] =
+ {"FROM", "TO", NULL};
- COMPLETE_WITH_LIST(list_FROMTO);
- }
+ COMPLETE_WITH_LIST(list_FROMTO);
+ }
/* If we have COPY|BINARY FROM|TO, complete with filename */
else if ((pg_strcasecmp(prev3_wd, "COPY") == 0 ||
pg_strcasecmp(prev3_wd, "\\copy") == 0 ||
pg_strcasecmp(prev4_wd, "BINARY") == 0) &&
(pg_strcasecmp(prev2_wd, "FROM") == 0 ||
pg_strcasecmp(prev2_wd, "TO") == 0))
- {
- static const char *const list_COPY[] =
- {"BINARY", "OIDS", "DELIMITER", "NULL", "CSV", NULL};
+ {
+ static const char *const list_COPY[] =
+ {"BINARY", "OIDS", "DELIMITER", "NULL", "CSV", NULL};
- COMPLETE_WITH_LIST(list_COPY);
- }
+ COMPLETE_WITH_LIST(list_COPY);
+ }
/* Handle COPY|BINARY FROM|TO filename CSV */
- else if (pg_strcasecmp(prev_wd, "CSV") == 0 &&
+ else if (pg_strcasecmp(prev_wd, "CSV") == 0 &&
(pg_strcasecmp(prev3_wd, "FROM") == 0 ||
pg_strcasecmp(prev3_wd, "TO") == 0))
{
- static const char *const list_CSV[] =
- {"HEADER", "QUOTE", "ESCAPE", "FORCE QUOTE", NULL};
+ static const char *const list_CSV[] =
+ {"HEADER", "QUOTE", "ESCAPE", "FORCE QUOTE", NULL};
- COMPLETE_WITH_LIST(list_CSV);
+ COMPLETE_WITH_LIST(list_CSV);
}
/* CREATE DATABASE */
{
static const char *const list_DATABASE[] =
{"OWNER", "TEMPLATE", "ENCODING", "TABLESPACE", "CONNECTION LIMIT",
- NULL};
+ NULL};
COMPLETE_WITH_LIST(list_DATABASE);
}
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
/*
- * Complete INDEX ON with a list of table columns
- * (which should really be in parens)
+ * Complete INDEX ON with a list of table columns (which
+ * should really be in parens)
*/
else if (pg_strcasecmp(prev4_wd, "INDEX") == 0 &&
pg_strcasecmp(prev2_wd, "ON") == 0)
/* CREATE TRIGGER */
/* complete CREATE TRIGGER with BEFORE,AFTER */
else if (pg_strcasecmp(prev3_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev2_wd, "TRIGGER") == 0)
+ pg_strcasecmp(prev2_wd, "TRIGGER") == 0)
{
static const char *const list_CREATETRIGGER[] =
- {"BEFORE", "AFTER", NULL};
- COMPLETE_WITH_LIST(list_CREATETRIGGER);
+ {"BEFORE", "AFTER", NULL};
+
+ COMPLETE_WITH_LIST(list_CREATETRIGGER);
}
/* complete CREATE TRIGGER BEFORE,AFTER sth with OR,ON */
else if (pg_strcasecmp(prev5_wd, "CREATE") == 0 &&
- pg_strcasecmp(prev4_wd, "TRIGGER") == 0 &&
- (pg_strcasecmp(prev2_wd, "BEFORE") == 0 ||
- pg_strcasecmp(prev2_wd, "AFTER") == 0))
+ pg_strcasecmp(prev4_wd, "TRIGGER") == 0 &&
+ (pg_strcasecmp(prev2_wd, "BEFORE") == 0 ||
+ pg_strcasecmp(prev2_wd, "AFTER") == 0))
{
static const char *const list_CREATETRIGGER2[] =
- {"ON","OR",NULL};
+ {"ON", "OR", NULL};
+
COMPLETE_WITH_LIST(list_CREATETRIGGER2);
}
pg_strcasecmp(prev2_wd, "GROUP") == 0 || pg_strcasecmp(prev2_wd, "USER") == 0))
{
static const char *const list_CREATEROLE[] =
- {"ADMIN","CONNECTION LIMIT","CREATEDB","CREATEROLE","CREATEUSER",
- "ENCRYPTED", "IN", "INHERIT", "LOGIN", "NOINHERIT", "NOLOGIN", "NOCREATEDB",
- "NOCREATEROLE", "NOCREATEUSER", "NOSUPERUSER", "ROLE", "SUPERUSER", "SYSID",
- "UNENCRYPTED",NULL};
- COMPLETE_WITH_LIST(list_CREATEROLE);
+ {"ADMIN", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE", "CREATEUSER",
+ "ENCRYPTED", "IN", "INHERIT", "LOGIN", "NOINHERIT", "NOLOGIN", "NOCREATEDB",
+ "NOCREATEROLE", "NOCREATEUSER", "NOSUPERUSER", "ROLE", "SUPERUSER", "SYSID",
+ "UNENCRYPTED", NULL};
+
+ COMPLETE_WITH_LIST(list_CREATEROLE);
}
- /* complete CREATE ROLE,USER,GROUP ENCRYPTED,UNENCRYPTED with PASSWORD */
+
+ /*
+ * complete CREATE ROLE,USER,GROUP ENCRYPTED,UNENCRYPTED with
+ * PASSWORD
+ */
else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
- (pg_strcasecmp(prev3_wd, "ROLE") == 0 ||
- pg_strcasecmp(prev3_wd, "GROUP") == 0 || pg_strcasecmp(prev3_wd, "USER") == 0) &&
- (pg_strcasecmp(prev_wd, "ENCRYPTED") == 0 || pg_strcasecmp(prev_wd, "UNENCRYPTED") == 0))
+ (pg_strcasecmp(prev3_wd, "ROLE") == 0 ||
+ pg_strcasecmp(prev3_wd, "GROUP") == 0 || pg_strcasecmp(prev3_wd, "USER") == 0) &&
+ (pg_strcasecmp(prev_wd, "ENCRYPTED") == 0 || pg_strcasecmp(prev_wd, "UNENCRYPTED") == 0))
{
- COMPLETE_WITH_CONST("PASSWORD");
+ COMPLETE_WITH_CONST("PASSWORD");
}
/* complete CREATE ROLE,USER,GROUP IN with ROLE,GROUP */
else if (pg_strcasecmp(prev4_wd, "CREATE") == 0 &&
- (pg_strcasecmp(prev3_wd, "ROLE") == 0 ||
- pg_strcasecmp(prev3_wd, "GROUP") == 0 || pg_strcasecmp(prev3_wd, "USER") == 0) &&
- pg_strcasecmp(prev_wd, "IN") == 0)
+ (pg_strcasecmp(prev3_wd, "ROLE") == 0 ||
+ pg_strcasecmp(prev3_wd, "GROUP") == 0 || pg_strcasecmp(prev3_wd, "USER") == 0) &&
+ pg_strcasecmp(prev_wd, "IN") == 0)
{
static const char *const list_CREATEROLE3[] =
- {"GROUP","ROLE",NULL};
- COMPLETE_WITH_LIST(list_CREATEROLE3);
+ {"GROUP", "ROLE", NULL};
+
+ COMPLETE_WITH_LIST(list_CREATEROLE3);
}
/* CREATE VIEW */
/* DECLARE */
else if (pg_strcasecmp(prev2_wd, "DECLARE") == 0)
{
- static const char *const list_DECLARE[] =
- {"BINARY", "INSENSITIVE", "SCROLL", "NO SCROLL", "CURSOR", NULL};
- COMPLETE_WITH_LIST(list_DECLARE);
+ static const char *const list_DECLARE[] =
+ {"BINARY", "INSENSITIVE", "SCROLL", "NO SCROLL", "CURSOR", NULL};
+
+ COMPLETE_WITH_LIST(list_DECLARE);
}
else if (pg_strcasecmp(prev_wd, "CURSOR") == 0)
{
- static const char *const list_DECLARECURSOR[] =
- {"WITH HOLD", "WITHOUT HOLD", "FOR", NULL};
- COMPLETE_WITH_LIST(list_DECLARECURSOR);
+ static const char *const list_DECLARECURSOR[] =
+ {"WITH HOLD", "WITHOUT HOLD", "FOR", NULL};
+
+ COMPLETE_WITH_LIST(list_DECLARECURSOR);
}
/* DELETE */
+
/*
* Complete DELETE with FROM (only if the word before that is not "ON"
* (cf. rules) or "BEFORE" or "AFTER" (cf. triggers) or GRANT)
pg_strcasecmp(prev3_wd, "AGGREGATE") == 0 &&
prev_wd[strlen(prev_wd) - 1] == ')'))
{
- static const char *const list_DROPCR[] =
- {"CASCADE", "RESTRICT", NULL};
- COMPLETE_WITH_LIST(list_DROPCR);
+ static const char *const list_DROPCR[] =
+ {"CASCADE", "RESTRICT", NULL};
+
+ COMPLETE_WITH_LIST(list_DROPCR);
}
/* EXPLAIN */
+
/*
* Complete EXPLAIN [ANALYZE] [VERBOSE] with list of EXPLAIN-able commands
*/
else if (pg_strcasecmp(prev_wd, "EXPLAIN") == 0)
{
- static const char *const list_EXPLAIN[] =
- {"SELECT","INSERT","DELETE","UPDATE","DECLARE","ANALYZE","VERBOSE",NULL};
- COMPLETE_WITH_LIST(list_EXPLAIN);
+ static const char *const list_EXPLAIN[] =
+ {"SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE", "ANALYZE", "VERBOSE", NULL};
+
+ COMPLETE_WITH_LIST(list_EXPLAIN);
}
else if (pg_strcasecmp(prev2_wd, "EXPLAIN") == 0 &&
- pg_strcasecmp(prev_wd, "ANALYZE") == 0)
+ pg_strcasecmp(prev_wd, "ANALYZE") == 0)
{
- static const char *const list_EXPLAIN[] =
- {"SELECT","INSERT","DELETE","UPDATE","DECLARE","VERBOSE",NULL};
- COMPLETE_WITH_LIST(list_EXPLAIN);
+ static const char *const list_EXPLAIN[] =
+ {"SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE", "VERBOSE", NULL};
+
+ COMPLETE_WITH_LIST(list_EXPLAIN);
}
else if (pg_strcasecmp(prev_wd, "VERBOSE") == 0 &&
pg_strcasecmp(prev3_wd, "VACUUM") != 0 &&
(pg_strcasecmp(prev2_wd, "ANALYZE") == 0 ||
pg_strcasecmp(prev2_wd, "EXPLAIN") == 0))
{
- static const char *const list_EXPLAIN[] =
- {"SELECT","INSERT","DELETE","UPDATE","DECLARE",NULL};
- COMPLETE_WITH_LIST(list_EXPLAIN);
+ static const char *const list_EXPLAIN[] =
+ {"SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE", NULL};
+
+ COMPLETE_WITH_LIST(list_EXPLAIN);
}
/* FETCH && MOVE */
}
/*
- * Complete FETCH with "FROM" or "IN". These are
- * equivalent, but we may as well tab-complete both: perhaps some
- * users prefer one variant or the other.
+ * Complete FETCH with "FROM" or "IN". These are equivalent,
+ * but we may as well tab-complete both: perhaps some users prefer one
+ * variant or the other.
*/
else if (pg_strcasecmp(prev3_wd, "FETCH") == 0 ||
pg_strcasecmp(prev3_wd, "MOVE") == 0)
COMPLETE_WITH_CONST("ON");
/*
- * Complete GRANT/REVOKE ON with a list of tables, views,
- * sequences, and indexes
+ * Complete GRANT/REVOKE ON with a list of tables, views, sequences,
+ * and indexes
*
- * keywords DATABASE, FUNCTION, LANGUAGE, SCHEMA added to query result
- * via UNION; seems to work intuitively
+ * keywords DATABASE, FUNCTION, LANGUAGE, SCHEMA added to query result via
+ * UNION; seems to work intuitively
*
- * Note: GRANT/REVOKE can get quite complex; tab-completion as
- * implemented here will only work if the privilege list contains
- * exactly one privilege
+ * Note: GRANT/REVOKE can get quite complex; tab-completion as implemented
+ * here will only work if the privilege list contains exactly one
+ * privilege
*/
else if ((pg_strcasecmp(prev3_wd, "GRANT") == 0 ||
pg_strcasecmp(prev3_wd, "REVOKE") == 0) &&
COMPLETE_WITH_LIST(constraint_list);
}
/* Complete SET ROLE */
- else if (pg_strcasecmp(prev2_wd, "SET") == 0 &&
+ else if (pg_strcasecmp(prev2_wd, "SET") == 0 &&
pg_strcasecmp(prev_wd, "ROLE") == 0)
COMPLETE_WITH_QUERY(Query_for_list_of_roles);
/* Complete SET SESSION with AUTHORIZATION or CHARACTERISTICS... */
COMPLETE_WITH_CONST("TO");
/* Suggest possible variable values */
else if (pg_strcasecmp(prev3_wd, "SET") == 0 &&
- (pg_strcasecmp(prev_wd, "TO") == 0 || strcmp(prev_wd, "=") == 0))
+ (pg_strcasecmp(prev_wd, "TO") == 0 || strcmp(prev_wd, "=") == 0))
{
if (pg_strcasecmp(prev2_wd, "DateStyle") == 0)
{
static const char *const my_list[] =
{"ISO", "SQL", "Postgres", "German",
- "YMD", "DMY", "MDY",
- "US", "European", "NonEuropean",
+ "YMD", "DMY", "MDY",
+ "US", "European", "NonEuropean",
"DEFAULT", NULL};
COMPLETE_WITH_LIST(my_list);
COMPLETE_WITH_CONST("SET");
/*
- * If the previous word is SET (and it wasn't caught above as the
- * _first_ word) the word before it was (hopefully) a table name and
- * we'll now make a list of attributes.
+ * If the previous word is SET (and it wasn't caught above as the _first_
+ * word) the word before it was (hopefully) a table name and we'll now
+ * make a list of attributes.
*/
else if (pg_strcasecmp(prev_wd, "SET") == 0)
COMPLETE_WITH_ATTR(prev2_wd);
/* UPDATE xx SET yy = */
else if (pg_strcasecmp(prev2_wd, "SET") == 0 &&
- pg_strcasecmp(prev4_wd, "UPDATE") == 0)
+ pg_strcasecmp(prev4_wd, "UPDATE") == 0)
COMPLETE_WITH_CONST("=");
/*
COMPLETE_WITH_LIST(my_list);
}
else if (strcmp(prev_wd, "\\cd") == 0 ||
- strcmp(prev_wd, "\\e") == 0 || strcmp(prev_wd, "\\edit") == 0 ||
+ strcmp(prev_wd, "\\e") == 0 || strcmp(prev_wd, "\\edit") == 0 ||
strcmp(prev_wd, "\\g") == 0 ||
- strcmp(prev_wd, "\\i") == 0 || strcmp(prev_wd, "\\include") == 0 ||
- strcmp(prev_wd, "\\o") == 0 || strcmp(prev_wd, "\\out") == 0 ||
+ strcmp(prev_wd, "\\i") == 0 || strcmp(prev_wd, "\\include") == 0 ||
+ strcmp(prev_wd, "\\o") == 0 || strcmp(prev_wd, "\\out") == 0 ||
strcmp(prev_wd, "\\s") == 0 ||
- strcmp(prev_wd, "\\w") == 0 || strcmp(prev_wd, "\\write") == 0
+ strcmp(prev_wd, "\\w") == 0 || strcmp(prev_wd, "\\write") == 0
)
matches = completion_matches(text, filename_completion_function);
- /*
- * Finally, we look through the list of "things", such as TABLE, INDEX
- * and check if that was the previous word. If so, execute the query
- * to get a list of them.
- */
+ /*
+ *
+ * Fi
+ * n
+ * a
+ * l
+ * l
+ * y
+ * ,
+ *
+ * we
+ *
+ * lo
+ * o
+ * k
+ *
+ * th
+ * r
+ * o
+ * u
+ * g
+ * h
+ *
+ * th
+ * e
+ *
+ * li
+ * s
+ * t
+ *
+ * of
+ *
+ * "t
+ * h
+ * i
+ * n
+ * g
+ * s
+ * "
+ * ,
+ *
+ * su
+ * c
+ * h
+ *
+ * as
+ *
+ * TA
+ * B
+ * L
+ * E
+ * ,
+ *
+ * IN
+ * D
+ * E
+ * X
+ *
+ * an
+ * d
+ *
+ * ch
+ * e
+ * c
+ * k
+ *
+ * if
+ *
+ * th
+ * a
+ * t
+ *
+ * wa
+ * s
+ *
+ * th
+ * e
+ *
+ * pr
+ * e
+ * v
+ * i
+ * o
+ * u
+ * s
+ *
+ * wo
+ * r
+ * d
+ * .
+ *
+ * If
+ *
+ * so
+ * ,
+ *
+ * ex
+ * e
+ * c
+ * u
+ * t
+ * e
+ *
+ * th
+ * e
+ *
+ * qu
+ * e
+ * r
+ * y
+ *
+ * to
+ *
+ * ge
+ * t
+ *
+ * a
+ * li
+ * s
+ * t
+ *
+ * of
+ *
+ * th
+ * e
+ * m
+ * .
+ * */
else
{
int i;
}
}
- /*
- * If we still don't have anything to match we have to fabricate some
- * sort of default list. If we were to just return NULL, readline
- * automatically attempts filename completion, and that's usually no
- * good.
- */
- if (matches == NULL)
- {
- COMPLETE_WITH_CONST("");
+ /*
+ *
+ * If
+ *
+ * we
+ *
+ * st
+ * i
+ * l
+ * l
+ *
+ * do
+ * n
+ * '
+ * t
+ *
+ * ha
+ * v
+ * e
+ *
+ * an
+ * y
+ * t
+ * h
+ * i
+ * n
+ * g
+ *
+ * to
+ *
+ * ma
+ * t
+ * c
+ * h
+ *
+ * we
+ *
+ * ha
+ * v
+ * e
+ *
+ * to
+ *
+ * fa
+ * b
+ * r
+ * i
+ * c
+ * a
+ * t
+ * e
+ *
+ * so
+ * m
+ * e
+ *
+ * so
+ * r
+ * t
+ *
+ * of
+ *
+ * de
+ * f
+ * a
+ * u
+ * l
+ * t
+ *
+ * li
+ * s
+ * t
+ * .
+ *
+ * If
+ *
+ * we
+ *
+ * we
+ * r
+ * e
+ *
+ * to
+ *
+ * ju
+ * s
+ * t
+ *
+ * re
+ * t
+ * u
+ * r
+ * n
+ *
+ * NU
+ * L
+ * L
+ * ,
+ *
+ * re
+ * a
+ * d
+ * l
+ * i
+ * n
+ * e
+ *
+ * au
+ * t
+ * o
+ * m
+ * a
+ * t
+ * i
+ * c
+ * a
+ * l
+ * l
+ * y
+ *
+ * at
+ * t
+ * e
+ * m
+ * p
+ * t
+ * s
+ *
+ * fi
+ * l
+ * e
+ * n
+ * a
+ * m
+ * e
+ *
+ * co
+ * m
+ * p
+ * l
+ * e
+ * t
+ * i
+ * o
+ * n
+ * ,
+ *
+ * an
+ * d
+ *
+ * th
+ * a
+ * t
+ * '
+ * s
+ *
+ * us
+ * u
+ * a
+ * l
+ * l
+ * y
+ *
+ * no
+ *
+ * go
+ * o
+ * d
+ * .
+ * */
+ if (matches == NULL)
+ {
+ COMPLETE_WITH_CONST("");
#ifdef HAVE_RL_COMPLETION_APPEND_CHARACTER
- rl_completion_append_character = '\0';
+ rl_completion_append_character = '\0';
#endif
- }
-
- /* free storage */
- free(prev_wd);
- free(prev2_wd);
- free(prev3_wd);
- free(prev4_wd);
- free(prev5_wd);
-
- /* Return our Grand List O' Matches */
- return matches;
-}
+ }
+
+ /*
+ * f
+ * r
+ * e
+ * e
+ *
+ * st
+ * o
+ * r
+ * a
+ * g
+ * e
+ *
+ */
+ free(prev_wd);
+ free(prev2_wd);
+ free(prev3_wd);
+ free(prev4_wd);
+ free(prev5_wd);
+
+ /*
+ * R
+ * e
+ * t
+ * u
+ * r
+ * n
+ *
+ * ou
+ * r
+ *
+ * Gr
+ * a
+ * n
+ * d
+ *
+ * Li
+ * s
+ * t
+ *
+ * O'
+ *
+ * Ma
+ * t
+ * c
+ * h
+ * e
+ * s
+ *
+ */
+ return matches;
+ }
/* This one gives you one from a list of things you can put after CREATE or DROP
as defined above.
*/
-static char *
-create_command_generator(const char *text, int state)
-{
- static int list_index,
- string_length;
- const char *name;
-
- /* If this is the first time for this completion, init some values */
- if (state == 0)
- {
- list_index = 0;
- string_length = strlen(text);
- }
-
- /* find something that matches */
- while ((name = words_after_create[list_index++].name))
- if (pg_strncasecmp(name, text, string_length) == 0)
- return pg_strdup(name);
-
- /* if nothing matches, return NULL */
- return NULL;
-}
+ static char *
+ create_command_generator(const char *text, int state)
+ {
+ static int list_index,
+ string_length;
+ const char *name;
+
+ /*
+ * I
+ * f
+ *
+ * th
+ * i
+ * s
+ *
+ * is
+ *
+ * th
+ * e
+ *
+ * fi
+ * r
+ * s
+ * t
+ *
+ * ti
+ * m
+ * e
+ *
+ * fo
+ * r
+ *
+ * th
+ * i
+ * s
+ *
+ * co
+ * m
+ * p
+ * l
+ * e
+ * t
+ * i
+ * o
+ * n
+ * ,
+ *
+ * in
+ * i
+ * t
+ *
+ * so
+ * m
+ * e
+ *
+ * va
+ * l
+ * u
+ * e
+ * s
+ *
+ */
+ if (state == 0)
+ {
+ list_index = 0;
+ string_length = strlen(text);
+ }
+
+ /*
+ * f
+ * i
+ * n
+ * d
+ *
+ * so
+ * m
+ * e
+ * t
+ * h
+ * i
+ * n
+ * g
+ *
+ * th
+ * a
+ * t
+ *
+ * ma
+ * t
+ * c
+ * h
+ * e
+ * s
+ *
+ */
+ while ((name = words_after_create[list_index++].name))
+ if (pg_strncasecmp(name, text, string_length) == 0)
+ return pg_strdup(name);
+
+ /*
+ * i
+ * f
+ *
+ * no
+ * t
+ * h
+ * i
+ * n
+ * g
+ *
+ * ma
+ * t
+ * c
+ * h
+ * e
+ * s
+ * ,
+ *
+ * re
+ * t
+ * u
+ * r
+ * n
+ *
+ * NU
+ * L
+ * L
+ *
+ */
+ return NULL;
+ }
/* The following two functions are wrappers for _complete_from_query */
-static char *
-complete_from_query(const char *text, int state)
-{
- return _complete_from_query(0, text, state);
-}
+ static char *
+ complete_from_query(const char *text, int state)
+ {
+ return _complete_from_query(0, text, state);
+ }
-static char *
-complete_from_schema_query(const char *text, int state)
-{
- return _complete_from_query(1, text, state);
-}
+ static char *
+ complete_from_schema_query(const char *text, int state)
+ {
+ return _complete_from_query(1, text, state);
+ }
/* This creates a list of matching things, according to a query pointed to
See top of file for examples of both kinds of query.
*/
-static char *
-_complete_from_query(int is_schema_query, const char *text, int state)
-{
- static int list_index,
- string_length;
- static PGresult *result = NULL;
-
- /*
- * If this is the first time for this completion, we fetch a list of
- * our "things" from the backend.
- */
- if (state == 0)
- {
- PQExpBufferData query_buffer;
- char *e_text;
- char *e_info_charp;
-
- list_index = 0;
- string_length = strlen(text);
-
- /* Free any prior result */
- PQclear(result);
- result = NULL;
-
- /* Set up suitably-escaped copies of textual inputs */
- e_text = pg_malloc(string_length * 2 + 1);
- PQescapeString(e_text, text, string_length);
-
- if (completion_info_charp)
- {
- size_t charp_len;
-
- charp_len = strlen(completion_info_charp);
- e_info_charp = pg_malloc(charp_len * 2 + 1);
- PQescapeString(e_info_charp, completion_info_charp,
- charp_len);
- }
- else
- e_info_charp = NULL;
-
- initPQExpBuffer(&query_buffer);
-
- if (is_schema_query)
- {
- /* completion_squery gives us the pieces to assemble */
- const char *qualresult = completion_squery->qualresult;
-
- if (qualresult == NULL)
- qualresult = completion_squery->result;
-
- /* Get unqualified names matching the input-so-far */
- appendPQExpBuffer(&query_buffer, "SELECT %s FROM %s WHERE ",
- completion_squery->result,
- completion_squery->catname);
- if (completion_squery->selcondition)
- appendPQExpBuffer(&query_buffer, "%s AND ",
- completion_squery->selcondition);
- appendPQExpBuffer(&query_buffer, "substring(%s,1,%d)='%s'",
- completion_squery->result,
- string_length, e_text);
- appendPQExpBuffer(&query_buffer, " AND %s",
- completion_squery->viscondition);
-
- /*
- * When fetching relation names, suppress system catalogs
- * unless the input-so-far begins with "pg_". This is a
- * compromise between not offering system catalogs for
- * completion at all, and having them swamp the result when
- * the input is just "p".
- */
- if (strcmp(completion_squery->catname,
- "pg_catalog.pg_class c") == 0 &&
- strncmp(text, "pg_", 3) !=0)
- {
- appendPQExpBuffer(&query_buffer,
- " AND c.relnamespace <> (SELECT oid FROM"
- " pg_catalog.pg_namespace WHERE nspname = 'pg_catalog')");
- }
-
- /*
- * Add in matching schema names, but only if there is more
- * than one potential match among schema names.
- */
- appendPQExpBuffer(&query_buffer, "\nUNION\n"
- "SELECT pg_catalog.quote_ident(n.nspname) || '.' "
- "FROM pg_catalog.pg_namespace n "
- "WHERE substring(pg_catalog.quote_ident(n.nspname) || '.',1,%d)='%s'",
- string_length, e_text);
- appendPQExpBuffer(&query_buffer,
- " AND (SELECT pg_catalog.count(*)"
- " FROM pg_catalog.pg_namespace"
- " WHERE substring(pg_catalog.quote_ident(nspname) || '.',1,%d) ="
- " substring('%s',1,pg_catalog.length(pg_catalog.quote_ident(nspname))+1)) > 1",
- string_length, e_text);
-
- /*
- * Add in matching qualified names, but only if there is
- * exactly one schema matching the input-so-far.
- */
- appendPQExpBuffer(&query_buffer, "\nUNION\n"
- "SELECT pg_catalog.quote_ident(n.nspname) || '.' || %s "
- "FROM %s, pg_catalog.pg_namespace n "
- "WHERE %s = n.oid AND ",
- qualresult,
- completion_squery->catname,
- completion_squery->namespace);
- if (completion_squery->selcondition)
- appendPQExpBuffer(&query_buffer, "%s AND ",
- completion_squery->selcondition);
- appendPQExpBuffer(&query_buffer, "substring(pg_catalog.quote_ident(n.nspname) || '.' || %s,1,%d)='%s'",
- qualresult,
- string_length, e_text);
-
- /*
- * This condition exploits the single-matching-schema rule to
- * speed up the query
- */
- appendPQExpBuffer(&query_buffer,
- " AND substring(pg_catalog.quote_ident(n.nspname) || '.',1,%d) ="
- " substring('%s',1,pg_catalog.length(pg_catalog.quote_ident(n.nspname))+1)",
- string_length, e_text);
- appendPQExpBuffer(&query_buffer,
- " AND (SELECT pg_catalog.count(*)"
- " FROM pg_catalog.pg_namespace"
- " WHERE substring(pg_catalog.quote_ident(nspname) || '.',1,%d) ="
- " substring('%s',1,pg_catalog.length(pg_catalog.quote_ident(nspname))+1)) = 1",
- string_length, e_text);
-
- /* If an addon query was provided, use it */
- if (completion_charp)
- appendPQExpBuffer(&query_buffer, "\n%s", completion_charp);
- }
- else
- {
- /* completion_charp is an sprintf-style format string */
- appendPQExpBuffer(&query_buffer, completion_charp,
- string_length, e_text, e_info_charp);
- }
-
- /* Limit the number of records in the result */
- appendPQExpBuffer(&query_buffer, "\nLIMIT %d",
- completion_max_records);
-
- result = exec_query(query_buffer.data);
-
- termPQExpBuffer(&query_buffer);
- free(e_text);
- if (e_info_charp)
- free(e_info_charp);
- }
-
- /* Find something that matches */
- if (result && PQresultStatus(result) == PGRES_TUPLES_OK)
- {
- const char *item;
-
- while (list_index < PQntuples(result) &&
- (item = PQgetvalue(result, list_index++, 0)))
- if (pg_strncasecmp(text, item, string_length) == 0)
- return pg_strdup(item);
- }
-
- /* If nothing matches, free the db structure and return null */
- PQclear(result);
- result = NULL;
- return NULL;
-}
+ static char *
+ _complete_from_query(int is_schema_query, const char *text, int state)
+ {
+ static int list_index,
+ string_length;
+ static PGresult *result = NULL;
+
+ /*
+ *
+ * If
+ *
+ * th
+ * i
+ * s
+ *
+ * is
+ *
+ * th
+ * e
+ *
+ * fi
+ * r
+ * s
+ * t
+ *
+ * ti
+ * m
+ * e
+ *
+ * fo
+ * r
+ *
+ * th
+ * i
+ * s
+ *
+ * co
+ * m
+ * p
+ * l
+ * e
+ * t
+ * i
+ * o
+ * n
+ * ,
+ *
+ * we
+ *
+ * fe
+ * t
+ * c
+ * h
+ *
+ * a
+ * li
+ * s
+ * t
+ *
+ * of
+ *
+ * ou
+ * r
+ *
+ * "t
+ * h
+ * i
+ * n
+ * g
+ * s
+ * "
+ *
+ * fr
+ * o
+ * m
+ *
+ * th
+ * e
+ *
+ * ba
+ * c
+ * k
+ * e
+ * n
+ * d
+ * .
+ * */
+ if (state == 0)
+ {
+ PQExpBufferData query_buffer;
+ char *e_text;
+ char *e_info_charp;
+
+ list_index = 0;
+ string_length = strlen(text);
+
+ /*
+ * F
+ * r
+ * e
+ * e
+ *
+ * an
+ * y
+ *
+ * pr
+ * i
+ * o
+ * r
+ *
+ * re
+ * s
+ * u
+ * l
+ * t
+ *
+ */
+ PQclear(result);
+ result = NULL;
+
+ /*
+ * S
+ * e
+ * t
+ *
+ * up
+ *
+ * su
+ * i
+ * t
+ * a
+ * b
+ * l
+ * y
+ * -
+ * e
+ * s
+ * c
+ * a
+ * p
+ * e
+ * d
+ *
+ * co
+ * p
+ * i
+ * e
+ * s
+ *
+ * of
+ *
+ * te
+ * x
+ * t
+ * u
+ * a
+ * l
+ *
+ * in
+ * p
+ * u
+ * t
+ * s
+ *
+ */
+ e_text = pg_malloc(string_length * 2 + 1);
+ PQescapeString(e_text, text, string_length);
+
+ if (completion_info_charp)
+ {
+ size_t charp_len;
+
+ charp_len = strlen(completion_info_charp);
+ e_info_charp = pg_malloc(charp_len * 2 + 1);
+ PQescapeString(e_info_charp, completion_info_charp,
+ charp_len);
+ }
+ else
+ e_info_charp = NULL;
+
+ initPQExpBuffer(&query_buffer);
+
+ if (is_schema_query)
+ {
+ /*
+ * c
+ * o
+ * m
+ * p
+ * l
+ * e
+ * t
+ * i
+ * o
+ * n
+ * _
+ * s
+ * q
+ * u
+ * e
+ * r
+ * y
+ *
+ * gi
+ * v
+ * e
+ * s
+ *
+ * us
+ *
+ * th
+ * e
+ *
+ * pi
+ * e
+ * c
+ * e
+ * s
+ *
+ * to
+ *
+ * as
+ * s
+ * e
+ * m
+ * b
+ * l
+ * e
+ *
+ */
+ const char *qualresult = completion_squery->qualresult;
+
+ if (qualresult == NULL)
+ qualresult = completion_squery->result;
+
+ /*
+ * G
+ * e
+ * t
+ *
+ * un
+ * q
+ * u
+ * a
+ * l
+ * i
+ * f
+ * i
+ * e
+ * d
+ *
+ * na
+ * m
+ * e
+ * s
+ *
+ * ma
+ * t
+ * c
+ * h
+ * i
+ * n
+ * g
+ *
+ * th
+ * e
+ *
+ * in
+ * p
+ * u
+ * t
+ * -
+ * s
+ * o
+ * -
+ * f
+ * a
+ * r
+ *
+ */
+ appendPQExpBuffer(&query_buffer, "SELECT %s FROM %s WHERE ",
+ completion_squery->result,
+ completion_squery->catname);
+ if (completion_squery->selcondition)
+ appendPQExpBuffer(&query_buffer, "%s AND ",
+ completion_squery->selcondition);
+ appendPQExpBuffer(&query_buffer, "substring(%s,1,%d)='%s'",
+ completion_squery->result,
+ string_length, e_text);
+ appendPQExpBuffer(&query_buffer, " AND %s",
+ completion_squery->viscondition);
+
+ /*
+ *
+ * Wh
+ * e
+ * n
+ *
+ * fe
+ * t
+ * c
+ * h
+ * i
+ * n
+ * g
+ *
+ * re
+ * l
+ * a
+ * t
+ * i
+ * o
+ * n
+ *
+ * na
+ * m
+ * e
+ * s
+ * ,
+ *
+ * su
+ * p
+ * p
+ * r
+ * e
+ * s
+ * s
+ *
+ * sy
+ * s
+ * t
+ * e
+ * m
+ *
+ * ca
+ * t
+ * a
+ * l
+ * o
+ * g
+ * s
+ *
+ * un
+ * l
+ * e
+ * s
+ * s
+ *
+ * th
+ * e
+ *
+ * in
+ * p
+ * u
+ * t
+ * -
+ * s
+ * o
+ * -
+ * f
+ * a
+ * r
+ *
+ * be
+ * g
+ * i
+ * n
+ * s
+ *
+ * wi
+ * t
+ * h
+ *
+ * "p
+ * g
+ * _
+ * "
+ * .
+ *
+ * Th
+ * i
+ * s
+ *
+ * is
+ *
+ * a
+ * co
+ * m
+ * p
+ * r
+ * o
+ * m
+ * i
+ * s
+ * e
+ *
+ * be
+ * t
+ * w
+ * e
+ * e
+ * n
+ *
+ * no
+ * t
+ *
+ * of
+ * f
+ * e
+ * r
+ * i
+ * n
+ * g
+ *
+ * sy
+ * s
+ * t
+ * e
+ * m
+ *
+ * ca
+ * t
+ * a
+ * l
+ * o
+ * g
+ * s
+ *
+ * fo
+ * r
+ *
+ * co
+ * m
+ * p
+ * l
+ * e
+ * t
+ * i
+ * o
+ * n
+ *
+ * at
+ *
+ * al
+ * l
+ * ,
+ *
+ * an
+ * d
+ *
+ * ha
+ * v
+ * i
+ * n
+ * g
+ *
+ * th
+ * e
+ * m
+ *
+ * sw
+ * a
+ * m
+ * p
+ *
+ * th
+ * e
+ *
+ * re
+ * s
+ * u
+ * l
+ * t
+ *
+ * wh
+ * e
+ * n
+ *
+ * th
+ * e
+ *
+ * in
+ * p
+ * u
+ * t
+ *
+ * is
+ *
+ * ju
+ * s
+ * t
+ *
+ * "p
+ * "
+ * .
+ * */
+ if (strcmp(completion_squery->catname,
+ "pg_catalog.pg_class c") == 0 &&
+ strncmp(text, "pg_", 3) !=0)
+ {
+ appendPQExpBuffer(&query_buffer,
+ " AND c.relnamespace <> (SELECT oid FROM"
+ " pg_catalog.pg_namespace WHERE nspname = 'pg_catalog')");
+ }
+
+ /*
+ *
+ * Ad
+ * d
+ *
+ * in
+ *
+ * ma
+ * t
+ * c
+ * h
+ * i
+ * n
+ * g
+ *
+ * sc
+ * h
+ * e
+ * m
+ * a
+ *
+ * na
+ * m
+ * e
+ * s
+ * ,
+ *
+ * bu
+ * t
+ *
+ * on
+ * l
+ * y
+ *
+ * if
+ *
+ * th
+ * e
+ * r
+ * e
+ *
+ * is
+ *
+ * mo
+ * r
+ * e
+ *
+ * th
+ * a
+ * n
+ *
+ * on
+ * e
+ *
+ * po
+ * t
+ * e
+ * n
+ * t
+ * i
+ * a
+ * l
+ *
+ * ma
+ * t
+ * c
+ * h
+ *
+ * am
+ * o
+ * n
+ * g
+ *
+ * sc
+ * h
+ * e
+ * m
+ * a
+ *
+ * na
+ * m
+ * e
+ * s
+ * .
+ * */
+ appendPQExpBuffer(&query_buffer, "\nUNION\n"
+ "SELECT pg_catalog.quote_ident(n.nspname) || '.' "
+ "FROM pg_catalog.pg_namespace n "
+ "WHERE substring(pg_catalog.quote_ident(n.nspname) || '.',1,%d)='%s'",
+ string_length, e_text);
+ appendPQExpBuffer(&query_buffer,
+ " AND (SELECT pg_catalog.count(*)"
+ " FROM pg_catalog.pg_namespace"
+ " WHERE substring(pg_catalog.quote_ident(nspname) || '.',1,%d) ="
+ " substring('%s',1,pg_catalog.length(pg_catalog.quote_ident(nspname))+1)) > 1",
+ string_length, e_text);
+
+ /*
+ *
+ * Ad
+ * d
+ *
+ * in
+ *
+ * ma
+ * t
+ * c
+ * h
+ * i
+ * n
+ * g
+ *
+ * qu
+ * a
+ * l
+ * i
+ * f
+ * i
+ * e
+ * d
+ *
+ * na
+ * m
+ * e
+ * s
+ * ,
+ *
+ * bu
+ * t
+ *
+ * on
+ * l
+ * y
+ *
+ * if
+ *
+ * th
+ * e
+ * r
+ * e
+ *
+ * is
+ *
+ * ex
+ * a
+ * c
+ * t
+ * l
+ * y
+ *
+ * on
+ * e
+ *
+ * sc
+ * h
+ * e
+ * m
+ * a
+ *
+ * ma
+ * t
+ * c
+ * h
+ * i
+ * n
+ * g
+ *
+ * th
+ * e
+ *
+ * in
+ * p
+ * u
+ * t
+ * -
+ * s
+ * o
+ * -
+ * f
+ * a
+ * r
+ * .
+ * */
+ appendPQExpBuffer(&query_buffer, "\nUNION\n"
+ "SELECT pg_catalog.quote_ident(n.nspname) || '.' || %s "
+ "FROM %s, pg_catalog.pg_namespace n "
+ "WHERE %s = n.oid AND ",
+ qualresult,
+ completion_squery->catname,
+ completion_squery->namespace);
+ if (completion_squery->selcondition)
+ appendPQExpBuffer(&query_buffer, "%s AND ",
+ completion_squery->selcondition);
+ appendPQExpBuffer(&query_buffer, "substring(pg_catalog.quote_ident(n.nspname) || '.' || %s,1,%d)='%s'",
+ qualresult,
+ string_length, e_text);
+
+ /*
+ *
+ * Th
+ * i
+ * s
+ *
+ * co
+ * n
+ * d
+ * i
+ * t
+ * i
+ * o
+ * n
+ *
+ * ex
+ * p
+ * l
+ * o
+ * i
+ * t
+ * s
+ *
+ * th
+ * e
+ *
+ * si
+ * n
+ * g
+ * l
+ * e
+ * -
+ * m
+ * a
+ * t
+ * c
+ * h
+ * i
+ * n
+ * g
+ * -
+ * s
+ * c
+ * h
+ * e
+ * m
+ * a
+ *
+ * ru
+ * l
+ * e
+ *
+ * to
+ *
+ * sp
+ * e
+ * e
+ * d
+ *
+ * up
+ *
+ * th
+ * e
+ *
+ * qu
+ * e
+ * r
+ * y
+ * */
+ appendPQExpBuffer(&query_buffer,
+ " AND substring(pg_catalog.quote_ident(n.nspname) || '.',1,%d) ="
+ " substring('%s',1,pg_catalog.length(pg_catalog.quote_ident(n.nspname))+1)",
+ string_length, e_text);
+ appendPQExpBuffer(&query_buffer,
+ " AND (SELECT pg_catalog.count(*)"
+ " FROM pg_catalog.pg_namespace"
+ " WHERE substring(pg_catalog.quote_ident(nspname) || '.',1,%d) ="
+ " substring('%s',1,pg_catalog.length(pg_catalog.quote_ident(nspname))+1)) = 1",
+ string_length, e_text);
+
+ /*
+ * I
+ * f
+ *
+ * an
+ *
+ * ad
+ * d
+ * o
+ * n
+ *
+ * qu
+ * e
+ * r
+ * y
+ *
+ * wa
+ * s
+ *
+ * pr
+ * o
+ * v
+ * i
+ * d
+ * e
+ * d
+ * ,
+ *
+ * us
+ * e
+ *
+ * it
+ *
+ */
+ if (completion_charp)
+ appendPQExpBuffer(&query_buffer, "\n%s", completion_charp);
+ }
+ else
+ {
+ /*
+ * c
+ * o
+ * m
+ * p
+ * l
+ * e
+ * t
+ * i
+ * o
+ * n
+ * _
+ * c
+ * h
+ * a
+ * r
+ * p
+ *
+ * is
+ *
+ * an
+ *
+ * sp
+ * r
+ * i
+ * n
+ * t
+ * f
+ * -
+ * s
+ * t
+ * y
+ * l
+ * e
+ *
+ * fo
+ * r
+ * m
+ * a
+ * t
+ *
+ * st
+ * r
+ * i
+ * n
+ * g
+ *
+ */
+ appendPQExpBuffer(&query_buffer, completion_charp,
+ string_length, e_text, e_info_charp);
+ }
+
+ /*
+ * L
+ * i
+ * m
+ * i
+ * t
+ *
+ * th
+ * e
+ *
+ * nu
+ * m
+ * b
+ * e
+ * r
+ *
+ * of
+ *
+ * re
+ * c
+ * o
+ * r
+ * d
+ * s
+ *
+ * in
+ *
+ * th
+ * e
+ *
+ * re
+ * s
+ * u
+ * l
+ * t
+ *
+ */
+ appendPQExpBuffer(&query_buffer, "\nLIMIT %d",
+ completion_max_records);
+
+ result = exec_query(query_buffer.data);
+
+ termPQExpBuffer(&query_buffer);
+ free(e_text);
+ if (e_info_charp)
+ free(e_info_charp);
+ }
+
+ /*
+ * F
+ * i
+ * n
+ * d
+ *
+ * so
+ * m
+ * e
+ * t
+ * h
+ * i
+ * n
+ * g
+ *
+ * th
+ * a
+ * t
+ *
+ * ma
+ * t
+ * c
+ * h
+ * e
+ * s
+ *
+ */
+ if (result && PQresultStatus(result) == PGRES_TUPLES_OK)
+ {
+ const char *item;
+
+ while (list_index < PQntuples(result) &&
+ (item = PQgetvalue(result, list_index++, 0)))
+ if (pg_strncasecmp(text, item, string_length) == 0)
+ return pg_strdup(item);
+ }
+
+ /*
+ * I
+ * f
+ *
+ * no
+ * t
+ * h
+ * i
+ * n
+ * g
+ *
+ * ma
+ * t
+ * c
+ * h
+ * e
+ * s
+ * ,
+ *
+ * fr
+ * e
+ * e
+ *
+ * th
+ * e
+ *
+ * db
+ *
+ * st
+ * r
+ * u
+ * c
+ * t
+ * u
+ * r
+ * e
+ *
+ * an
+ * d
+ *
+ * re
+ * t
+ * u
+ * r
+ * n
+ *
+ * nu
+ * l
+ * l
+ *
+ */
+ PQclear(result);
+ result = NULL;
+ return NULL;
+ }
/* This function returns in order one of a fixed, NULL pointer terminated list
of strings (if matching). This can be used if there are only a fixed number
SQL words that can appear at certain spot.
*/
-static char *
-complete_from_list(const char *text, int state)
-{
- static int string_length,
- list_index,
- matches;
- static bool casesensitive;
- const char *item;
-
- /* need to have a list */
- psql_assert(completion_charpp);
-
- /* Initialization */
- if (state == 0)
- {
- list_index = 0;
- string_length = strlen(text);
- casesensitive = true;
- matches = 0;
- }
-
- while ((item = completion_charpp[list_index++]))
- {
- /* First pass is case sensitive */
- if (casesensitive && strncmp(text, item, string_length) == 0)
- {
- matches++;
- return pg_strdup(item);
- }
-
- /* Second pass is case insensitive, don't bother counting matches */
- if (!casesensitive && pg_strncasecmp(text, item, string_length) == 0)
- return pg_strdup(item);
- }
-
- /*
- * No matches found. If we're not case insensitive already, lets
- * switch to being case insensitive and try again
- */
- if (casesensitive && matches == 0)
- {
- casesensitive = false;
- list_index = 0;
- state++;
- return (complete_from_list(text, state));
- }
-
- /* If no more matches, return null. */
- return NULL;
-}
+ static char *
+ complete_from_list(const char *text, int state)
+ {
+ static int string_length,
+ list_index,
+ matches;
+ static bool casesensitive;
+ const char *item;
+
+ /*
+ * n
+ * e
+ * e
+ * d
+ *
+ * to
+ *
+ * ha
+ * v
+ * e
+ *
+ * a
+ * li
+ * s
+ * t
+ *
+ */
+ psql_assert(completion_charpp);
+
+ /*
+ * I
+ * n
+ * i
+ * t
+ * i
+ * a
+ * l
+ * i
+ * z
+ * a
+ * t
+ * i
+ * o
+ * n
+ *
+ */
+ if (state == 0)
+ {
+ list_index = 0;
+ string_length = strlen(text);
+ casesensitive = true;
+ matches = 0;
+ }
+
+ while ((item = completion_charpp[list_index++]))
+ {
+ /*
+ * F
+ * i
+ * r
+ * s
+ * t
+ *
+ * pa
+ * s
+ * s
+ *
+ * is
+ *
+ * ca
+ * s
+ * e
+ *
+ * se
+ * n
+ * s
+ * i
+ * t
+ * i
+ * v
+ * e
+ *
+ */
+ if (casesensitive && strncmp(text, item, string_length) == 0)
+ {
+ matches++;
+ return pg_strdup(item);
+ }
+
+ /*
+ * S
+ * e
+ * c
+ * o
+ * n
+ * d
+ *
+ * pa
+ * s
+ * s
+ *
+ * is
+ *
+ * ca
+ * s
+ * e
+ *
+ * in
+ * s
+ * e
+ * n
+ * s
+ * i
+ * t
+ * i
+ * v
+ * e
+ * ,
+ *
+ * do
+ * n
+ * '
+ * t
+ *
+ * bo
+ * t
+ * h
+ * e
+ * r
+ *
+ * co
+ * u
+ * n
+ * t
+ * i
+ * n
+ * g
+ *
+ * ma
+ * t
+ * c
+ * h
+ * e
+ * s
+ *
+ */
+ if (!casesensitive && pg_strncasecmp(text, item, string_length) == 0)
+ return pg_strdup(item);
+ }
+
+ /*
+ *
+ * No
+ *
+ * ma
+ * t
+ * c
+ * h
+ * e
+ * s
+ *
+ * fo
+ * u
+ * n
+ * d
+ * .
+ *
+ * If
+ *
+ * we
+ * '
+ * r
+ * e
+ *
+ * no
+ * t
+ *
+ * ca
+ * s
+ * e
+ *
+ * in
+ * s
+ * e
+ * n
+ * s
+ * i
+ * t
+ * i
+ * v
+ * e
+ *
+ * al
+ * r
+ * e
+ * a
+ * d
+ * y
+ * ,
+ *
+ * le
+ * t
+ * s
+ *
+ * sw
+ * i
+ * t
+ * c
+ * h
+ *
+ * to
+ *
+ * be
+ * i
+ * n
+ * g
+ *
+ * ca
+ * s
+ * e
+ *
+ * in
+ * s
+ * e
+ * n
+ * s
+ * i
+ * t
+ * i
+ * v
+ * e
+ *
+ * an
+ * d
+ *
+ * tr
+ * y
+ *
+ * ag
+ * a
+ * i
+ * n
+ * */
+ if (casesensitive && matches == 0)
+ {
+ casesensitive = false;
+ list_index = 0;
+ state++;
+ return (complete_from_list(text, state));
+ }
+
+ /*
+ * I
+ * f
+ *
+ * no
+ *
+ * mo
+ * r
+ * e
+ *
+ * ma
+ * t
+ * c
+ * h
+ * e
+ * s
+ * ,
+ *
+ * re
+ * t
+ * u
+ * r
+ * n
+ *
+ * nu
+ * l
+ * l
+ * .
+ *
+ */
+ return NULL;
+ }
/* This function returns one fixed string the first time even if it doesn't
will be overwritten.
The string to be passed must be in completion_charp.
*/
-static char *
-complete_from_const(const char *text, int state)
-{
- (void) text; /* We don't care about what was entered
- * already. */
+ static char *
+ complete_from_const(const char *text, int state)
+ {
+ (void) text; /* We don't care about
+ * what was entered
+ * already. */
- psql_assert(completion_charp);
- if (state == 0)
- return pg_strdup(completion_charp);
- else
- return NULL;
-}
+ psql_assert(completion_charp);
+ if (state == 0)
+ return pg_strdup(completion_charp);
+ else
+ return NULL;
+ }
* Execute a query and report any errors. This should be the preferred way of
* talking to the database in this file.
*/
-static PGresult *
-exec_query(const char *query)
-{
- PGresult *result;
+ static PGresult *
+ exec_query(const char *query)
+ {
+ PGresult *result;
- if (query == NULL || !pset.db || PQstatus(pset.db) != CONNECTION_OK)
- return NULL;
+ if (query == NULL || !pset.db || PQstatus(pset.db) != CONNECTION_OK)
+ return NULL;
- result = PQexec(pset.db, query);
+ result = PQexec(pset.db, query);
- if (result != NULL && PQresultStatus(result) != PGRES_TUPLES_OK)
- {
+ if (result != NULL && PQresultStatus(result) != PGRES_TUPLES_OK)
+ {
#if 0
- psql_error("tab completion: %s failed - %s\n",
- query, PQresStatus(PQresultStatus(result)));
+ psql_error("tab completion: %s failed - %s\n",
+ query, PQresStatus(PQresultStatus(result)));
#endif
- PQclear(result);
- result = NULL;
- }
+ PQclear(result);
+ result = NULL;
+ }
- return result;
-}
+ return result;
+ }
* skip that many words; e.g. skip=1 finds the word before the
* previous one. Return value is NULL or a malloc'ed string.
*/
-static char *
-previous_word(int point, int skip)
-{
- int i,
- start = 0,
- end = -1,
- inquotes = 0;
- char *s;
-
- while (skip-- >= 0)
- {
- /* first we look for a space before the current word */
- for (i = point; i >= 0; i--)
- if (rl_line_buffer[i] == ' ')
- break;
-
- /* now find the first non-space which then constitutes the end */
- for (; i >= 0; i--)
- if (rl_line_buffer[i] != ' ')
- {
- end = i;
- break;
- }
-
- /*
- * If no end found we return null, because there is no word before
- * the point
- */
- if (end == -1)
- return NULL;
-
- /*
- * Otherwise we now look for the start. The start is either the
- * last character before any space going backwards from the end,
- * or it's simply character 0
- */
- for (start = end; start > 0; start--)
- {
- if (rl_line_buffer[start] == '"')
- inquotes = !inquotes;
- if ((rl_line_buffer[start - 1] == ' ') && inquotes == 0)
- break;
- }
-
- point = start;
- }
-
- /* make a copy */
- s = pg_malloc(end - start + 2);
-
- strncpy(s, &rl_line_buffer[start], end - start + 1);
- s[end - start + 1] = '\0';
-
- return s;
-}
+ static char *
+ previous_word(int point, int skip)
+ {
+ int i,
+ start = 0,
+ end = -1,
+ inquotes = 0;
+ char *s;
+
+ while (skip-- >= 0)
+ {
+ /*
+ * f
+ * i
+ * r
+ * s
+ * t
+ *
+ * we
+ *
+ * lo
+ * o
+ * k
+ *
+ * fo
+ * r
+ *
+ * a
+ * sp
+ * a
+ * c
+ * e
+ *
+ * be
+ * f
+ * o
+ * r
+ * e
+ *
+ * th
+ * e
+ *
+ * cu
+ * r
+ * r
+ * e
+ * n
+ * t
+ *
+ * wo
+ * r
+ * d
+ *
+ */
+ for (i = point; i >= 0; i--)
+ if (rl_line_buffer[i] == ' ')
+ break;
+
+ /*
+ * n
+ * o
+ * w
+ *
+ * fi
+ * n
+ * d
+ *
+ * th
+ * e
+ *
+ * fi
+ * r
+ * s
+ * t
+ *
+ * no
+ * n
+ * -
+ * s
+ * p
+ * a
+ * c
+ * e
+ *
+ * wh
+ * i
+ * c
+ * h
+ *
+ * th
+ * e
+ * n
+ *
+ * co
+ * n
+ * s
+ * t
+ * i
+ * t
+ * u
+ * t
+ * e
+ * s
+ *
+ * th
+ * e
+ *
+ * en
+ * d
+ *
+ */
+ for (; i >= 0; i--)
+ if (rl_line_buffer[i] != ' ')
+ {
+ end = i;
+ break;
+ }
+
+ /*
+ *
+ * If
+ *
+ * no
+ *
+ * en
+ * d
+ *
+ * fo
+ * u
+ * n
+ * d
+ *
+ * we
+ *
+ * re
+ * t
+ * u
+ * r
+ * n
+ *
+ * nu
+ * l
+ * l
+ * ,
+ *
+ * be
+ * c
+ * a
+ * u
+ * s
+ * e
+ *
+ * th
+ * e
+ * r
+ * e
+ *
+ * is
+ *
+ * no
+ *
+ * wo
+ * r
+ * d
+ *
+ * be
+ * f
+ * o
+ * r
+ * e
+ *
+ * th
+ * e
+ *
+ * po
+ * i
+ * n
+ * t
+ * */
+ if (end == -1)
+ return NULL;
+
+ /*
+ *
+ * Ot
+ * h
+ * e
+ * r
+ * w
+ * i
+ * s
+ * e
+ *
+ * we
+ *
+ * no
+ * w
+ *
+ * lo
+ * o
+ * k
+ *
+ * fo
+ * r
+ *
+ * th
+ * e
+ *
+ * st
+ * a
+ * r
+ * t
+ * .
+ *
+ * Th
+ * e
+ *
+ * st
+ * a
+ * r
+ * t
+ *
+ * is
+ *
+ * ei
+ * t
+ * h
+ * e
+ * r
+ *
+ * th
+ * e
+ *
+ * la
+ * s
+ * t
+ *
+ * ch
+ * a
+ * r
+ * a
+ * c
+ * t
+ * e
+ * r
+ *
+ * be
+ * f
+ * o
+ * r
+ * e
+ *
+ * an
+ * y
+ *
+ * sp
+ * a
+ * c
+ * e
+ *
+ * go
+ * i
+ * n
+ * g
+ *
+ * ba
+ * c
+ * k
+ * w
+ * a
+ * r
+ * d
+ * s
+ *
+ * fr
+ * o
+ * m
+ *
+ * th
+ * e
+ *
+ * en
+ * d
+ * ,
+ *
+ * or
+ *
+ * it
+ * '
+ * s
+ *
+ * si
+ * m
+ * p
+ * l
+ * y
+ *
+ * ch
+ * a
+ * r
+ * a
+ * c
+ * t
+ * e
+ * r
+ *
+ * 0
+ */
+ for (start = end; start > 0; start--)
+ {
+ if (rl_line_buffer[start] == '"')
+ inquotes = !inquotes;
+ if ((rl_line_buffer[start - 1] == ' ') && inquotes == 0)
+ break;
+ }
+
+ point = start;
+ }
+
+ /*
+ * m
+ * a
+ * k
+ * e
+ *
+ * a
+ * co
+ * p
+ * y
+ *
+ */
+ s = pg_malloc(end - start + 2);
+
+ strncpy(s, &rl_line_buffer[start], end - start + 1);
+ s[end - start + 1] = '\0';
+
+ return s;
+ }
* psql internal. Currently disabled because it is reported not to
* cooperate with certain versions of readline.
*/
-static char *
-quote_file_name(char *text, int match_type, char *quote_pointer)
-{
- char *s;
- size_t length;
-
- (void) quote_pointer; /* not used */
-
- length = strlen(text) +(match_type == SINGLE_MATCH ? 3 : 2);
- s = pg_malloc(length);
- s[0] = '\'';
- strcpy(s + 1, text);
- if (match_type == SINGLE_MATCH)
- s[length - 2] = '\'';
- s[length - 1] = '\0';
- return s;
-}
-
-
-
-static char *
-dequote_file_name(char *text, char quote_char)
-{
- char *s;
- size_t length;
-
- if (!quote_char)
- return pg_strdup(text);
-
- length = strlen(text);
- s = pg_malloc(length - 2 + 1);
- strncpy(s, text +1, length - 2);
- s[length] = '\0';
-
- return s;
-}
+ static char *
+ quote_file_name(char *text, int match_type, char *quote_pointer)
+ {
+ char *s;
+ size_t length;
+
+ (void) quote_pointer; /* not used */
+
+ length = strlen(text) +(match_type == SINGLE_MATCH ? 3 : 2);
+ s = pg_malloc(length);
+ s[0] = '\'';
+ strcpy(s + 1, text);
+ if (match_type == SINGLE_MATCH)
+ s[length - 2] = '\'';
+ s[length - 1] = '\0';
+ return s;
+ }
+
+
+
+ static char *
+ dequote_file_name(char *text, char quote_char)
+ {
+ char *s;
+ size_t length;
+
+ if (!quote_char)
+ return pg_strdup(text);
+
+ length = strlen(text);
+ s = pg_malloc(length - 2 + 1);
+ strncpy(s, text +1, length - 2);
+ s[length] = '\0';
+
+ return s;
+ }
#endif /* 0 */
#endif /* USE_READLINE */
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/variables.c,v 1.21 2005/01/01 05:43:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/variables.c,v 1.22 2005/10/15 02:49:41 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
return false; /* accept "off" or "OFF" as true */
/*
- * for backwards compatibility, anything except "off" or "OFF" is
- * taken as "true"
+ * for backwards compatibility, anything except "off" or "OFF" is taken as
+ * "true"
*/
return true;
}
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.18 2005/08/15 21:02:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.19 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "common.h"
#ifndef HAVE_INT_OPTRESET
-int optreset;
+int optreset;
#endif
password = simple_prompt("Password: ", 100, false);
/*
- * Start the connection. Loop until we have a password if requested
- * by backend.
+ * Start the connection. Loop until we have a password if requested by
+ * backend.
*/
do
{
*
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/scripts/common.h,v 1.11 2005/08/15 21:02:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/common.h,v 1.12 2005/10/15 02:49:41 momjian Exp $
*/
#ifndef COMMON_H
#define COMMON_H
#include "getopt_long.h"
#ifndef HAVE_INT_OPTRESET
-extern int optreset;
+extern int optreset;
#endif
typedef void (*help_handler) (const char *progname);
extern const char *get_user_name(const char *progname);
extern void handle_help_version_opts(int argc, char *argv[],
- const char *fixed_progname,
- help_handler hlp);
+ const char *fixed_progname,
+ help_handler hlp);
extern PGconn *connectDatabase(const char *dbname, const char *pghost,
- const char *pgport, const char *pguser,
- bool require_password, const char *progname);
+ const char *pgport, const char *pguser,
+ bool require_password, const char *progname);
extern PGresult *executeQuery(PGconn *conn, const char *query,
- const char *progname, bool echo);
+ const char *progname, bool echo);
extern void executeCommand(PGconn *conn, const char *query,
- const char *progname, bool echo);
+ const char *progname, bool echo);
extern int check_yesno_response(const char *string);
-#endif /* COMMON_H */
+#endif /* COMMON_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/createlang.c,v 1.20 2005/09/05 23:50:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/createlang.c,v 1.21 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
progname);
printfPQExpBuffer(&sql, "SELECT lanname as \"%s\", "
- "(CASE WHEN lanpltrusted THEN '%s' ELSE '%s' END) as \"%s\" "
- "FROM pg_catalog.pg_language WHERE lanispl;",
+ "(CASE WHEN lanpltrusted THEN '%s' ELSE '%s' END) as \"%s\" "
+ "FROM pg_catalog.pg_language WHERE lanispl;",
_("Name"), _("yes"), _("no"), _("Trusted?"));
result = executeQuery(conn, sql.data, progname, echo);
/*
* Make sure the language isn't already installed
*/
- printfPQExpBuffer(&sql,
- "SELECT oid FROM pg_catalog.pg_language WHERE lanname = '%s';",
+ printfPQExpBuffer(&sql,
+ "SELECT oid FROM pg_catalog.pg_language WHERE lanname = '%s';",
langname);
result = executeQuery(conn, sql.data, progname, echo);
if (PQntuples(result) > 0)
{
PQfinish(conn);
fprintf(stderr,
- _("%s: language \"%s\" is already installed in database \"%s\"\n"),
+ _("%s: language \"%s\" is already installed in database \"%s\"\n"),
progname, langname, dbname);
/* separate exit status for "already installed" */
exit(2);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/createuser.c,v 1.20 2005/09/30 07:58:01 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/createuser.c,v 1.21 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
printf(_(" -l, --login role can login (default)\n"));
printf(_(" -L, --no-login role cannot login\n"));
printf(_(" -i, --inherit role inherits privileges of roles it is a\n"
- " member of (default)\n"));
+ " member of (default)\n"));
printf(_(" -I, --no-inherit role does not inherit privileges\n"));
printf(_(" -c, --connection-limit=N connection limit for role (default: no limit)\n"));
printf(_(" -P, --pwprompt assign a password to new role\n"));
printf(_(" -U, --username=USERNAME user name to connect as (not the one to create)\n"));
printf(_(" -W, --password prompt for password to connect\n"));
printf(_("\nIf one of -s, -S, -d, -D, -r, -R and ROLENAME is not specified, you will\n"
- "be prompted interactively.\n"));
+ "be prompted interactively.\n"));
printf(_("\nReport bugs to .\n"));
}
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/droplang.c,v 1.17 2005/08/15 21:02:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/droplang.c,v 1.18 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
printQueryOpt popt;
- conn = connectDatabase(dbname, host, port, username, password,
+ conn = connectDatabase(dbname, host, port, username, password,
progname);
printfPQExpBuffer(&sql, "SELECT lanname as \"%s\", "
- "(CASE WHEN lanpltrusted THEN '%s' ELSE '%s' END) as \"%s\" "
- "FROM pg_catalog.pg_language WHERE lanispl;",
+ "(CASE WHEN lanpltrusted THEN '%s' ELSE '%s' END) as \"%s\" "
+ "FROM pg_catalog.pg_language WHERE lanispl;",
_("Name"), _("yes"), _("no"), _("Trusted?"));
result = executeQuery(conn, sql.data, progname, echo);
if (langname == NULL)
{
- fprintf(stderr, _("%s: missing required argument language name\n"),
+ fprintf(stderr, _("%s: missing required argument language name\n"),
progname);
- fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
+ fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
progname);
exit(1);
}
conn = connectDatabase(dbname, host, port, username, password, progname);
/*
- * Force schema search path to be just pg_catalog, so that we don't
- * have to be paranoid about search paths below.
+ * Force schema search path to be just pg_catalog, so that we don't have
+ * to be paranoid about search paths below.
*/
executeCommand(conn, "SET search_path = pg_catalog;",
progname, echo);
/*
- * Make sure the language is installed and find the OIDs of the
- * handler and validator functions
+ * Make sure the language is installed and find the OIDs of the handler
+ * and validator functions
*/
printfPQExpBuffer(&sql, "SELECT lanplcallfoid, lanvalidator "
- "FROM pg_language WHERE lanname = '%s' AND lanispl;",
+ "FROM pg_language WHERE lanname = '%s' AND lanispl;",
langname);
result = executeQuery(conn, sql.data, progname, echo);
if (PQntuples(result) == 0)
* Check that the handler function isn't used by some other language
*/
printfPQExpBuffer(&sql, "SELECT count(*) FROM pg_language "
- "WHERE lanplcallfoid = %u AND lanname <> '%s';",
+ "WHERE lanplcallfoid = %u AND lanname <> '%s';",
lanplcallfoid, langname);
result = executeQuery(conn, sql.data, progname, echo);
if (strcmp(PQgetvalue(result, 0, 0), "0") == 0)
{
printfPQExpBuffer(&sql, "SELECT proname, (SELECT nspname "
"FROM pg_namespace ns WHERE ns.oid = pronamespace) "
- "AS prons FROM pg_proc WHERE oid = %u;",
+ "AS prons FROM pg_proc WHERE oid = %u;",
lanplcallfoid);
result = executeQuery(conn, sql.data, progname, echo);
handler = strdup(PQgetvalue(result, 0, 0));
if (OidIsValid(lanvalidator))
{
printfPQExpBuffer(&sql, "SELECT count(*) FROM pg_language "
- "WHERE lanvalidator = %u AND lanname <> '%s';",
+ "WHERE lanvalidator = %u AND lanname <> '%s';",
lanvalidator, langname);
result = executeQuery(conn, sql.data, progname, echo);
if (strcmp(PQgetvalue(result, 0, 0), "0") == 0)
{
printfPQExpBuffer(&sql, "SELECT proname, (SELECT nspname "
"FROM pg_namespace ns WHERE ns.oid = pronamespace) "
- "AS prons FROM pg_proc WHERE oid = %u;",
+ "AS prons FROM pg_proc WHERE oid = %u;",
lanvalidator);
result = executeQuery(conn, sql.data, progname, echo);
validator = strdup(PQgetvalue(result, 0, 0));
*/
printfPQExpBuffer(&sql, "DROP LANGUAGE \"%s\";\n", langname);
if (!keephandler)
- appendPQExpBuffer(&sql, "DROP FUNCTION \"%s\".\"%s\" ();\n",
+ appendPQExpBuffer(&sql, "DROP FUNCTION \"%s\".\"%s\" ();\n",
handler_ns, handler);
if (!keepvalidator)
- appendPQExpBuffer(&sql, "DROP FUNCTION \"%s\".\"%s\" (oid);\n",
+ appendPQExpBuffer(&sql, "DROP FUNCTION \"%s\".\"%s\" (oid);\n",
validator_ns, validator);
if (echo)
printf("%s", sql.data);
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/scripts/reindexdb.c,v 1.3 2005/09/30 09:56:26 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/reindexdb.c,v 1.4 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void reindex_one_database(const char *name, const char *dbname,
- const char *type, const char *host,
- const char *port, const char *username,
- bool password, const char *progname,
- bool echo, bool quiet);
+ const char *type, const char *host,
+ const char *port, const char *username,
+ bool password, const char *progname,
+ bool echo, bool quiet);
static void reindex_all_databases(const char *host, const char *port,
- const char *username, bool password,
- const char *progname, bool echo,
- bool quiet);
+ const char *username, bool password,
+ const char *progname, bool echo,
+ bool quiet);
static void reindex_system_catalogs(const char *dbname,
- const char *host, const char *port,
- const char *username, bool password,
- const char *progname, bool echo,
- bool quiet);
+ const char *host, const char *port,
+ const char *username, bool password,
+ const char *progname, bool echo,
+ bool quiet);
static void help(const char *progname);
int
{NULL, 0, NULL, 0}
};
- const char *progname;
- int optindex;
- int c;
+ const char *progname;
+ int optindex;
+ int c;
- const char *dbname = NULL;
- const char *host = NULL;
- const char *port = NULL;
- const char *username = NULL;
+ const char *dbname = NULL;
+ const char *host = NULL;
+ const char *port = NULL;
+ const char *username = NULL;
bool password = false;
bool syscatalog = false;
bool alldb = false;
bool echo = false;
bool quiet = false;
- const char *table = NULL;
- const char *index = NULL;
-
+ const char *table = NULL;
+ const char *index = NULL;
+
progname = get_progname(argv[0]);
set_pglocale_pgservice(argv[0], "pgscripts");
}
reindex_all_databases(host, port, username, password,
- progname, echo, quiet);
+ progname, echo, quiet);
}
else if (syscatalog)
{
}
reindex_system_catalogs(dbname, host, port, username, password,
- progname, echo, quiet);
+ progname, echo, quiet);
}
else
{
if (index)
reindex_one_database(index, dbname, "INDEX", host, port,
- username, password, progname, echo, quiet);
+ username, password, progname, echo, quiet);
if (table)
reindex_one_database(table, dbname, "TABLE", host, port,
- username, password, progname, echo, quiet);
+ username, password, progname, echo, quiet);
/* reindex database only if index or table is not specified */
if (index == NULL && table == NULL)
reindex_one_database(dbname, dbname, "DATABASE", host, port,
- username, password, progname, echo, quiet);
+ username, password, progname, echo, quiet);
}
exit(0);
static void
reindex_one_database(const char *name, const char *dbname, const char *type,
- const char *host, const char *port, const char *username,
- bool password, const char *progname, bool echo,
- bool quiet)
+ const char *host, const char *port, const char *username,
+ bool password, const char *progname, bool echo,
+ bool quiet)
{
- PQExpBufferData sql;
+ PQExpBufferData sql;
- PGconn *conn;
- PGresult *result;
+ PGconn *conn;
+ PGresult *result;
initPQExpBuffer(&sql);
appendPQExpBuffer(&sql, ";\n");
conn = connectDatabase(dbname, host, port, username, password, progname);
-
+
if (echo)
printf("%s", sql.data);
result = PQexec(conn, sql.data);
static void
reindex_all_databases(const char *host, const char *port,
- const char *username, bool password,
- const char *progname, bool echo, bool quiet)
+ const char *username, bool password,
+ const char *progname, bool echo, bool quiet)
{
- PGconn *conn;
- PGresult *result;
- int i;
+ PGconn *conn;
+ PGresult *result;
+ int i;
conn = connectDatabase("postgres", host, port, username, password, progname);
result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn;", progname, echo);
for (i = 0; i < PQntuples(result); i++)
{
- char *dbname = PQgetvalue(result, i, 0);
+ char *dbname = PQgetvalue(result, i, 0);
if (!quiet)
fprintf(stderr, _("%s: reindexing database \"%s\"\n"), progname, dbname);
reindex_one_database(dbname, dbname, "DATABASE", host, port, username,
- password, progname, echo, quiet);
+ password, progname, echo, quiet);
}
PQclear(result);
static void
reindex_system_catalogs(const char *dbname, const char *host, const char *port,
- const char *username, bool password,
- const char *progname, bool echo, bool quiet)
+ const char *username, bool password,
+ const char *progname, bool echo, bool quiet)
{
- PQExpBufferData sql;
+ PQExpBufferData sql;
- PGconn *conn;
- PGresult *result;
+ PGconn *conn;
+ PGresult *result;
initPQExpBuffer(&sql);
appendPQExpBuffer(&sql, "REINDEX SYSTEM %s;\n", dbname);
conn = connectDatabase(dbname, host, port, username, password, progname);
-
+
if (echo)
printf("%s", sql.data);
result = PQexec(conn, sql.data);
if (PQresultStatus(result) != PGRES_COMMAND_OK)
{
fprintf(stderr, _("%s: reindexing of system catalogs failed: %s"),
- progname, PQerrorMessage(conn));
+ progname, PQerrorMessage(conn));
PQfinish(conn);
exit(1);
}
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/genam.h,v 1.52 2005/06/13 23:14:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/genam.h,v 1.53 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Snapshot snapshot,
int nkeys, ScanKey key);
extern IndexScanDesc index_beginscan_multi(Relation indexRelation,
- Snapshot snapshot,
- int nkeys, ScanKey key);
+ Snapshot snapshot,
+ int nkeys, ScanKey key);
extern void index_rescan(IndexScanDesc scan, ScanKey key);
extern void index_endscan(IndexScanDesc scan);
extern void index_markpos(IndexScanDesc scan);
extern bool index_getnext_indexitem(IndexScanDesc scan,
ScanDirection direction);
extern bool index_getmulti(IndexScanDesc scan,
- ItemPointer tids, int32 max_tids,
- int32 *returned_tids);
+ ItemPointer tids, int32 max_tids,
+ int32 *returned_tids);
extern IndexBulkDeleteResult *index_bulk_delete(Relation indexRelation,
IndexBulkDeleteCallback callback,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/gist.h,v 1.49 2005/06/30 17:52:14 teodor Exp $
+ * $PostgreSQL: pgsql/src/include/access/gist.h,v 1.50 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct GISTPageOpaqueData
{
- uint32 flags; /* 29 bits are unused for now */
- BlockNumber rightlink;
+ uint32 flags; /* 29 bits are unused for now */
+ BlockNumber rightlink;
- /* the only meaning - change this value if
- page split. */
+ /*
+ * the only meaning - change this value if page split.
+ */
GistNSN nsn;
} GISTPageOpaqueData;
bool leafkey;
} GISTENTRY;
-#define GistPageGetOpaque(page) ( (GISTPageOpaque) PageGetSpecialPointer(page) )
+#define GistPageGetOpaque(page) ( (GISTPageOpaque) PageGetSpecialPointer(page) )
#define GistPageIsLeaf(page) ( GistPageGetOpaque(page)->flags & F_LEAF)
#define GIST_LEAF(entry) (GistPageIsLeaf((entry)->page))
#define GistPageSetLeaf(page) ( GistPageGetOpaque(page)->flags |= F_LEAF)
-#define GistPageSetNonLeaf(page) ( GistPageGetOpaque(page)->flags &= ~F_LEAF)
+#define GistPageSetNonLeaf(page) ( GistPageGetOpaque(page)->flags &= ~F_LEAF)
-#define GistPageIsDeleted(page) ( GistPageGetOpaque(page)->flags & F_DELETED)
+#define GistPageIsDeleted(page) ( GistPageGetOpaque(page)->flags & F_DELETED)
#define GistPageSetDeleted(page) ( GistPageGetOpaque(page)->flags |= F_DELETED)
-#define GistPageSetNonDeleted(page) ( GistPageGetOpaque(page)->flags &= ~F_DELETED)
+#define GistPageSetNonDeleted(page) ( GistPageGetOpaque(page)->flags &= ~F_DELETED)
-#define GistTuplesDeleted(page) ( GistPageGetOpaque(page)->flags & F_TUPLES_DELETED)
-#define GistMarkTuplesDeleted(page) ( GistPageGetOpaque(page)->flags |= F_TUPLES_DELETED)
+#define GistTuplesDeleted(page) ( GistPageGetOpaque(page)->flags & F_TUPLES_DELETED)
+#define GistMarkTuplesDeleted(page) ( GistPageGetOpaque(page)->flags |= F_TUPLES_DELETED)
#define GistClearTuplesDeleted(page) ( GistPageGetOpaque(page)->flags &= ~F_TUPLES_DELETED)
/*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/gist_private.h,v 1.7 2005/06/30 17:52:14 teodor Exp $
+ * $PostgreSQL: pgsql/src/include/access/gist_private.h,v 1.8 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "access/xlogdefs.h"
#include "fmgr.h"
-#define GIST_UNLOCK BUFFER_LOCK_UNLOCK
+#define GIST_UNLOCK BUFFER_LOCK_UNLOCK
#define GIST_SHARE BUFFER_LOCK_SHARE
#define GIST_EXCLUSIVE BUFFER_LOCK_EXCLUSIVE
*/
typedef struct GISTScanOpaqueData
{
- GISTSearchStack *stack;
- GISTSearchStack *markstk;
- uint16 flags;
- GISTSTATE *giststate;
- MemoryContext tempCxt;
- Buffer curbuf;
- Buffer markbuf;
+ GISTSearchStack *stack;
+ GISTSearchStack *markstk;
+ uint16 flags;
+ GISTSTATE *giststate;
+ MemoryContext tempCxt;
+ Buffer curbuf;
+ Buffer markbuf;
} GISTScanOpaqueData;
typedef GISTScanOpaqueData *GISTScanOpaque;
/* XLog stuff */
-extern const XLogRecPtr XLogRecPtrForTemp;
+extern const XLogRecPtr XLogRecPtrForTemp;
-#define XLOG_GIST_ENTRY_UPDATE 0x00
-#define XLOG_GIST_ENTRY_DELETE 0x10
+#define XLOG_GIST_ENTRY_UPDATE 0x00
+#define XLOG_GIST_ENTRY_DELETE 0x10
#define XLOG_GIST_NEW_ROOT 0x20
-typedef struct gistxlogEntryUpdate {
- RelFileNode node;
- BlockNumber blkno;
+typedef struct gistxlogEntryUpdate
+{
+ RelFileNode node;
+ BlockNumber blkno;
uint16 ntodelete;
- bool isemptypage;
-
- /*
- * It used to identify completeness of insert.
- * Sets to leaf itup
- */
- ItemPointerData key;
-
- /* follow:
- * 1. todelete OffsetNumbers
- * 2. tuples to insert
- */
+ bool isemptypage;
+
+ /*
+ * It used to identify completeness of insert. Sets to leaf itup
+ */
+ ItemPointerData key;
+
+ /*
+ * follow: 1. todelete OffsetNumbers 2. tuples to insert
+ */
} gistxlogEntryUpdate;
#define XLOG_GIST_PAGE_SPLIT 0x30
-typedef struct gistxlogPageSplit {
- RelFileNode node;
- BlockNumber origblkno; /*splitted page*/
+typedef struct gistxlogPageSplit
+{
+ RelFileNode node;
+ BlockNumber origblkno; /* splitted page */
uint16 npage;
/* see comments on gistxlogEntryUpdate */
- ItemPointerData key;
-
- /* follow:
- * 1. gistxlogPage and array of IndexTupleData per page
- */
+ ItemPointerData key;
+
+ /*
+ * follow: 1. gistxlogPage and array of IndexTupleData per page
+ */
} gistxlogPageSplit;
#define XLOG_GIST_INSERT_COMPLETE 0x40
-typedef struct gistxlogPage {
- BlockNumber blkno;
- int num;
-} gistxlogPage;
+typedef struct gistxlogPage
+{
+ BlockNumber blkno;
+ int num;
+} gistxlogPage;
-#define XLOG_GIST_CREATE_INDEX 0x50
+#define XLOG_GIST_CREATE_INDEX 0x50
-typedef struct gistxlogInsertComplete {
- RelFileNode node;
+typedef struct gistxlogInsertComplete
+{
+ RelFileNode node;
/* follows ItemPointerData key to clean */
} gistxlogInsertComplete;
/* SplitedPageLayout - gistSplit function result */
-typedef struct SplitedPageLayout {
- gistxlogPage block;
- IndexTupleData *list;
- int lenlist;
- Buffer buffer; /* to write after all proceed */
+typedef struct SplitedPageLayout
+{
+ gistxlogPage block;
+ IndexTupleData *list;
+ int lenlist;
+ Buffer buffer; /* to write after all proceed */
- struct SplitedPageLayout *next;
+ struct SplitedPageLayout *next;
} SplitedPageLayout;
/*
* insertion
*/
-typedef struct GISTInsertStack {
+typedef struct GISTInsertStack
+{
/* current page */
- BlockNumber blkno;
+ BlockNumber blkno;
Buffer buffer;
Page page;
- /* log sequence number from page->lsn to
- recognize page update and compare it with page's nsn
- to recognize page split*/
+ /*
+ * log sequence number from page->lsn to recognize page update and
+ * compare it with page's nsn to recognize page split
+ */
GistNSN lsn;
-
+
/* child's offset */
- OffsetNumber childoffnum;
+ OffsetNumber childoffnum;
/* pointer to parent and child */
- struct GISTInsertStack *parent;
- struct GISTInsertStack *child;
+ struct GISTInsertStack *parent;
+ struct GISTInsertStack *child;
/* for gistFindPath */
- struct GISTInsertStack *next;
+ struct GISTInsertStack *next;
} GISTInsertStack;
#define XLogRecPtrIsInvalid( r ) ( (r).xlogid == 0 && (r).xrecoff == 0 )
-typedef struct {
+typedef struct
+{
Relation r;
- IndexTuple *itup; /* in/out, points to compressed entry */
- int ituplen; /* length of itup */
- GISTInsertStack *stack;
- bool needInsertComplete;
+ IndexTuple *itup; /* in/out, points to compressed entry */
+ int ituplen; /* length of itup */
+ GISTInsertStack *stack;
+ bool needInsertComplete;
/* pointer to heap tuple */
- ItemPointerData key;
+ ItemPointerData key;
} GISTInsertState;
/*
/*
* When we update a relation on which we're doing a scan, we need to
* check the scan and fix it if the update affected any of the pages
- * it touches. Otherwise, we can miss records that we should see.
+ * it touches. Otherwise, we can miss records that we should see.
* The only times we need to do this are for deletions and splits. See
* the code in gistscan.c for how the scan is fixed. These two
* constants tell us what sort of operation changed the index.
*/
#define GISTOP_DEL 0
-/* #define GISTOP_SPLIT 1 */
+/* #define GISTOP_SPLIT 1 */
#define ATTSIZE(datum, tupdesc, i, isnull) \
- ( \
- (isnull) ? 0 : \
- att_addlength(0, (tupdesc)->attrs[(i)-1]->attlen, (datum)) \
- )
+ ( \
+ (isnull) ? 0 : \
+ att_addlength(0, (tupdesc)->attrs[(i)-1]->attlen, (datum)) \
+ )
/*
* mark tuples on inner pages during recovery
extern void gistmakedeal(GISTInsertState *state, GISTSTATE *giststate);
extern void gistnewroot(Relation r, Buffer buffer, IndexTuple *itup, int len, ItemPointer key);
-extern IndexTuple * gistSplit(Relation r, Buffer buffer, IndexTuple *itup,
- int *len, SplitedPageLayout **dist, GISTSTATE *giststate);
+extern IndexTuple *gistSplit(Relation r, Buffer buffer, IndexTuple *itup,
+ int *len, SplitedPageLayout **dist, GISTSTATE *giststate);
+
+extern GISTInsertStack *gistFindPath(Relation r, BlockNumber child,
+ Buffer (*myReadBuffer) (Relation, BlockNumber));
-extern GISTInsertStack* gistFindPath( Relation r, BlockNumber child,
- Buffer (*myReadBuffer)(Relation, BlockNumber) );
/* gistxlog.c */
extern void gist_redo(XLogRecPtr lsn, XLogRecord *record);
extern void gist_desc(char *buf, uint8 xl_info, char *rec);
extern void gist_xlog_cleanup(void);
extern IndexTuple gist_form_invalid_tuple(BlockNumber blkno);
-extern XLogRecData* formUpdateRdata(RelFileNode node, BlockNumber blkno,
- OffsetNumber *todelete, int ntodelete, bool emptypage,
- IndexTuple *itup, int ituplen, ItemPointer key);
+extern XLogRecData *formUpdateRdata(RelFileNode node, BlockNumber blkno,
+ OffsetNumber *todelete, int ntodelete, bool emptypage,
+ IndexTuple *itup, int ituplen, ItemPointer key);
-extern XLogRecData* formSplitRdata(RelFileNode node, BlockNumber blkno,
- ItemPointer key, SplitedPageLayout *dist);
+extern XLogRecData *formSplitRdata(RelFileNode node, BlockNumber blkno,
+ ItemPointer key, SplitedPageLayout *dist);
extern XLogRecPtr gistxlogInsertCompletion(RelFileNode node, ItemPointerData *keys, int len);
extern Datum gistgetmulti(PG_FUNCTION_ARGS);
/* gistutil.c */
-extern Buffer gistNewBuffer(Relation r);
+extern Buffer gistNewBuffer(Relation r);
extern OffsetNumber gistfillbuffer(Relation r, Page page, IndexTuple *itup,
- int len, OffsetNumber off);
+ int len, OffsetNumber off);
extern bool gistnospace(Page page, IndexTuple *itvec, int len);
-extern IndexTuple * gistextractbuffer(Buffer buffer, int *len /* out */ );
-extern IndexTuple * gistjoinvector(
- IndexTuple *itvec, int *len,
- IndexTuple *additvec, int addlen);
+extern IndexTuple *gistextractbuffer(Buffer buffer, int *len /* out */ );
+extern IndexTuple *gistjoinvector(
+ IndexTuple *itvec, int *len,
+ IndexTuple *additvec, int addlen);
extern IndexTuple gistunion(Relation r, IndexTuple *itvec,
- int len, GISTSTATE *giststate);
+ int len, GISTSTATE *giststate);
extern IndexTuple gistgetadjusted(Relation r,
- IndexTuple oldtup,
- IndexTuple addtup,
- GISTSTATE *giststate);
+ IndexTuple oldtup,
+ IndexTuple addtup,
+ GISTSTATE *giststate);
extern int gistfindgroup(GISTSTATE *giststate,
- GISTENTRY *valvec, GIST_SPLITVEC *spl);
+ GISTENTRY *valvec, GIST_SPLITVEC *spl);
extern void gistadjsubkey(Relation r,
- IndexTuple *itup, int len,
- GIST_SPLITVEC *v,
- GISTSTATE *giststate);
+ IndexTuple *itup, int len,
+ GIST_SPLITVEC *v,
+ GISTSTATE *giststate);
extern IndexTuple gistFormTuple(GISTSTATE *giststate,
- Relation r, Datum *attdata, int *datumsize, bool *isnull);
+ Relation r, Datum *attdata, int *datumsize, bool *isnull);
extern OffsetNumber gistchoose(Relation r, Page p,
- IndexTuple it,
- GISTSTATE *giststate);
+ IndexTuple it,
+ GISTSTATE *giststate);
extern void gistcentryinit(GISTSTATE *giststate, int nkey,
- GISTENTRY *e, Datum k,
- Relation r, Page pg,
- OffsetNumber o, int b, bool l, bool isNull);
+ GISTENTRY *e, Datum k,
+ Relation r, Page pg,
+ OffsetNumber o, int b, bool l, bool isNull);
extern void gistDeCompressAtt(GISTSTATE *giststate, Relation r,
- IndexTuple tuple, Page p, OffsetNumber o,
- GISTENTRY *attdata, bool *isnull);
-extern void gistunionsubkey(Relation r, GISTSTATE *giststate,
- IndexTuple *itvec, GIST_SPLITVEC *spl, bool isall);
+ IndexTuple tuple, Page p, OffsetNumber o,
+ GISTENTRY *attdata, bool *isnull);
+extern void gistunionsubkey(Relation r, GISTSTATE *giststate,
+ IndexTuple *itvec, GIST_SPLITVEC *spl, bool isall);
extern void GISTInitBuffer(Buffer b, uint32 f);
extern void gistdentryinit(GISTSTATE *giststate, int nkey, GISTENTRY *e,
Datum k, Relation r, Page pg, OffsetNumber o,
int b, bool l, bool isNull);
void gistUserPicksplit(Relation r, GistEntryVector *entryvec, GIST_SPLITVEC *v,
- IndexTuple *itup, int len, GISTSTATE *giststate);
+ IndexTuple *itup, int len, GISTSTATE *giststate);
/* gistvacuum.c */
extern Datum gistbulkdelete(PG_FUNCTION_ARGS);
extern Datum gistvacuumcleanup(PG_FUNCTION_ARGS);
-#endif /* GIST_PRIVATE_H */
+#endif /* GIST_PRIVATE_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/hash.h,v 1.62 2005/06/06 17:01:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/hash.h,v 1.63 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* modeled after Margo Seltzer's hash implementation for unix.
uint16 hasho_filler; /* available for future use */
/*
- * We presently set hasho_filler to HASHO_FILL (0x1234); this is for
- * the convenience of pg_filedump, which otherwise would have a hard
- * time telling HashPageOpaqueData from BTPageOpaqueData. If we ever
- * need that space for some other purpose, pg_filedump will have to
- * find another way.
+ * We presently set hasho_filler to HASHO_FILL (0x1234); this is for the
+ * convenience of pg_filedump, which otherwise would have a hard time
+ * telling HashPageOpaqueData from BTPageOpaqueData. If we ever need that
+ * space for some other purpose, pg_filedump will have to find another
+ * way.
*/
} HashPageOpaqueData;
BlockNumber hashso_bucket_blkno;
/*
- * We also want to remember which buffers we're currently examining in
- * the scan. We keep these buffers pinned (but not locked) across
- * hashgettuple calls, in order to avoid doing a ReadBuffer() for
- * every tuple in the index.
+ * We also want to remember which buffers we're currently examining in the
+ * scan. We keep these buffers pinned (but not locked) across hashgettuple
+ * calls, in order to avoid doing a ReadBuffer() for every tuple in the
+ * index.
*/
Buffer hashso_curbuf;
Buffer hashso_mrkbuf;
double hashm_ntuples; /* number of tuples stored in the table */
uint16 hashm_ffactor; /* target fill factor (tuples/bucket) */
uint16 hashm_bsize; /* index page size (bytes) */
- uint16 hashm_bmsize; /* bitmap array size (bytes) - must be a
- * power of 2 */
+ uint16 hashm_bmsize; /* bitmap array size (bytes) - must be a power
+ * of 2 */
uint16 hashm_bmshift; /* log2(bitmap array size in BITS) */
uint32 hashm_maxbucket; /* ID of maximum bucket in use */
uint32 hashm_highmask; /* mask to modulo into entire table */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.104 2005/08/20 00:39:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.105 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
) \
) \
)
-
#else /* defined(DISABLE_COMPLEX_MACRO) */
extern Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
ItemPointer tid);
extern void setLastTid(const ItemPointer tid);
-extern Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid,
- bool use_wal, bool use_fsm);
+extern Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid,
+ bool use_wal, bool use_fsm);
extern HTSU_Result heap_delete(Relation relation, ItemPointer tid,
- ItemPointer ctid, TransactionId *update_xmax,
- CommandId cid, Snapshot crosscheck, bool wait);
+ ItemPointer ctid, TransactionId *update_xmax,
+ CommandId cid, Snapshot crosscheck, bool wait);
extern HTSU_Result heap_update(Relation relation, ItemPointer otid,
- HeapTuple newtup,
- ItemPointer ctid, TransactionId *update_xmax,
- CommandId cid, Snapshot crosscheck, bool wait);
+ HeapTuple newtup,
+ ItemPointer ctid, TransactionId *update_xmax,
+ CommandId cid, Snapshot crosscheck, bool wait);
extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
- Buffer *buffer, ItemPointer ctid,
- TransactionId *update_xmax, CommandId cid,
- LockTupleMode mode, bool nowait);
+ Buffer *buffer, ItemPointer ctid,
+ TransactionId *update_xmax, CommandId cid,
+ LockTupleMode mode, bool nowait);
extern Oid simple_heap_insert(Relation relation, HeapTuple tup);
extern void simple_heap_delete(Relation relation, ItemPointer tid);
/* in common/heaptuple.c */
extern Size heap_compute_data_size(TupleDesc tupleDesc,
- Datum *values, bool *isnull);
+ Datum *values, bool *isnull);
extern void heap_fill_tuple(TupleDesc tupleDesc,
- Datum *values, bool *isnull,
- char *data, uint16 *infomask, bits8 *bit);
+ Datum *values, bool *isnull,
+ char *data, uint16 *infomask, bits8 *bit);
extern bool heap_attisnull(HeapTuple tup, int attnum);
extern Datum nocachegetattr(HeapTuple tup, int attnum,
TupleDesc att, bool *isnull);
extern HeapTuple heap_copytuple(HeapTuple tuple);
extern void heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest);
extern HeapTuple heap_form_tuple(TupleDesc tupleDescriptor,
- Datum *values, bool *isnull);
+ Datum *values, bool *isnull);
extern HeapTuple heap_formtuple(TupleDesc tupleDescriptor,
Datum *values, char *nulls);
extern HeapTuple heap_modify_tuple(HeapTuple tuple,
- TupleDesc tupleDesc,
- Datum *replValues,
- bool *replIsnull,
- bool *doReplace);
+ TupleDesc tupleDesc,
+ Datum *replValues,
+ bool *replIsnull,
+ bool *doReplace);
extern HeapTuple heap_modifytuple(HeapTuple tuple,
TupleDesc tupleDesc,
Datum *replValues,
char *replNulls,
char *replActions);
extern void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
- Datum *values, bool *isnull);
+ Datum *values, bool *isnull);
extern void heap_deformtuple(HeapTuple tuple, TupleDesc tupleDesc,
Datum *values, char *nulls);
extern void heap_freetuple(HeapTuple tuple);
extern HeapTuple heap_addheader(int natts, bool withoid,
- Size structlen, void *structure);
+ Size structlen, void *structure);
#endif /* HEAPAM_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/hio.h,v 1.28 2005/06/20 18:37:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/hio.h,v 1.29 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void RelationPutHeapTuple(Relation relation, Buffer buffer,
HeapTuple tuple);
extern Buffer RelationGetBufferForTuple(Relation relation, Size len,
- Buffer otherBuffer, bool use_fsm);
+ Buffer otherBuffer, bool use_fsm);
#endif /* HIO_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.77 2005/09/02 19:02:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.78 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* and Cmin simultaneously, so this is no longer possible.
*
* A word about t_ctid: whenever a new tuple is stored on disk, its t_ctid
- * is initialized with its own TID (location). If the tuple is ever updated,
+ * is initialized with its own TID (location). If the tuple is ever updated,
* its t_ctid is changed to point to the replacement version of the tuple.
* Thus, a tuple is the latest version of its row iff XMAX is invalid or
* t_ctid points to itself (in which case, if XMAX is valid, the tuple is
*/
#define HEAP_HASNULL 0x0001 /* has null attribute(s) */
#define HEAP_HASVARWIDTH 0x0002 /* has variable-width attribute(s) */
-#define HEAP_HASEXTERNAL 0x0004 /* has external stored
- * attribute(s) */
-#define HEAP_HASCOMPRESSED 0x0008 /* has compressed stored
- * attribute(s) */
+#define HEAP_HASEXTERNAL 0x0004 /* has external stored attribute(s) */
+#define HEAP_HASCOMPRESSED 0x0008 /* has compressed stored attribute(s) */
#define HEAP_HASEXTENDED 0x000C /* the two above combined */
#define HEAP_HASOID 0x0010 /* has an object-id field */
/* 0x0020 is presently unused */
-#define HEAP_XMAX_EXCL_LOCK 0x0040 /* xmax is exclusive locker */
-#define HEAP_XMAX_SHARED_LOCK 0x0080 /* xmax is shared locker */
+#define HEAP_XMAX_EXCL_LOCK 0x0040 /* xmax is exclusive locker */
+#define HEAP_XMAX_SHARED_LOCK 0x0080 /* xmax is shared locker */
/* if either LOCK bit is set, xmax hasn't deleted the tuple, only locked it */
#define HEAP_IS_LOCKED (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_SHARED_LOCK)
#define HEAP_XMIN_COMMITTED 0x0100 /* t_xmin committed */
#define HEAP_XMAX_INVALID 0x0800 /* t_xmax invalid/aborted */
#define HEAP_XMAX_IS_MULTI 0x1000 /* t_xmax is a MultiXactId */
#define HEAP_UPDATED 0x2000 /* this is UPDATEd version of row */
-#define HEAP_MOVED_OFF 0x4000 /* moved to another place by
- * VACUUM FULL */
-#define HEAP_MOVED_IN 0x8000 /* moved from another place by
- * VACUUM FULL */
+#define HEAP_MOVED_OFF 0x4000 /* moved to another place by VACUUM
+ * FULL */
+#define HEAP_MOVED_IN 0x8000 /* moved from another place by VACUUM
+ * FULL */
#define HEAP_MOVED (HEAP_MOVED_OFF | HEAP_MOVED_IN)
#define HEAP_XACT_MASK 0xFFC0 /* visibility-related bits */
* * Part of a palloc'd tuple: the HeapTupleData itself and the tuple
* form a single palloc'd chunk. t_data points to the memory location
* immediately following the HeapTupleData struct (at offset HEAPTUPLESIZE),
- * and t_datamcxt is the containing context. This is used as the output
+ * and t_datamcxt is the containing context. This is used as the output
* format of heap_form_tuple and related routines.
*
* * Separately allocated tuple: t_data points to a palloc'd chunk that
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/itup.h,v 1.43 2005/03/27 18:38:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/itup.h,v 1.44 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Index tuple header structure
*
- * All index tuples start with IndexTupleData. If the HasNulls bit is set,
+ * All index tuples start with IndexTupleData. If the HasNulls bit is set,
* this is followed by an IndexAttributeBitMapData. The index attribute
* values follow, beginning at a MAXALIGN boundary.
*
/* routines in indextuple.c */
extern IndexTuple index_form_tuple(TupleDesc tupleDescriptor,
- Datum *values, bool *isnull);
+ Datum *values, bool *isnull);
extern Datum nocache_index_getattr(IndexTuple tup, int attnum,
TupleDesc tupleDesc, bool *isnull);
extern IndexTuple CopyIndexTuple(IndexTuple source);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/multixact.h,v 1.6 2005/08/20 23:26:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/multixact.h,v 1.7 2005/10/15 02:49:42 momjian Exp $
*/
#ifndef MULTIXACT_H
#define MULTIXACT_H
typedef struct xl_multixact_create
{
- MultiXactId mid; /* new MultiXact's ID */
- MultiXactOffset moff; /* its starting offset in members file */
- int32 nxids; /* number of member XIDs */
- TransactionId xids[1]; /* VARIABLE LENGTH ARRAY */
+ MultiXactId mid; /* new MultiXact's ID */
+ MultiXactOffset moff; /* its starting offset in members file */
+ int32 nxids; /* number of member XIDs */
+ TransactionId xids[1]; /* VARIABLE LENGTH ARRAY */
} xl_multixact_create;
#define MinSizeOfMultiXactCreate offsetof(xl_multixact_create, xids)
extern void MultiXactIdWait(MultiXactId multi);
extern bool ConditionalMultiXactIdWait(MultiXactId multi);
extern void MultiXactIdSetOldestMember(void);
-extern int GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids);
+extern int GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids);
extern void AtEOXact_MultiXact(void);
extern void StartupMultiXact(void);
extern void ShutdownMultiXact(void);
extern void MultiXactGetCheckptMulti(bool is_shutdown,
- MultiXactId *nextMulti,
- MultiXactOffset *nextMultiOffset);
+ MultiXactId *nextMulti,
+ MultiXactOffset *nextMultiOffset);
extern void CheckPointMultiXact(void);
extern void MultiXactSetNextMXact(MultiXactId nextMulti,
- MultiXactOffset nextMultiOffset);
+ MultiXactOffset nextMultiOffset);
extern void MultiXactAdvanceNextMXact(MultiXactId minMulti,
- MultiXactOffset minMultiOffset);
+ MultiXactOffset minMultiOffset);
extern void multixact_redo(XLogRecPtr lsn, XLogRecord *record);
extern void multixact_desc(char *buf, uint8 xl_info, char *rec);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.86 2005/06/06 17:01:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.87 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define XLOG_BTREE_SPLIT_R_ROOT 0x60 /* as above, new item on right */
#define XLOG_BTREE_DELETE 0x70 /* delete leaf btitem */
#define XLOG_BTREE_DELETE_PAGE 0x80 /* delete an entire page */
-#define XLOG_BTREE_DELETE_PAGE_META 0x90 /* same, plus update
- * metapage */
+#define XLOG_BTREE_DELETE_PAGE_META 0x90 /* same, plus update metapage */
#define XLOG_BTREE_NEWROOT 0xA0 /* new root page */
#define XLOG_BTREE_NEWMETA 0xB0 /* update metadata page */
/* these fields are set by _bt_preprocess_keys(): */
bool qual_ok; /* false if qual can never be satisfied */
int numberOfKeys; /* number of preprocessed scan keys */
- int numberOfRequiredKeys; /* number of keys that must be
- * matched to continue the scan */
+ int numberOfRequiredKeys; /* number of keys that must be matched
+ * to continue the scan */
ScanKey keyData; /* array of preprocessed scan keys */
} BTScanOpaqueData;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.40 2005/10/07 14:55:35 alvherre Exp $
+ * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.41 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If keys_are_unique and got_tuple are both true, we stop calling the
- * index AM; it is then necessary for index_getnext to keep track of
- * the logical scan position for itself. It does that using
- * unique_tuple_pos: -1 = before row, 0 = on row, +1 = after row.
+ * index AM; it is then necessary for index_getnext to keep track of the
+ * logical scan position for itself. It does that using unique_tuple_pos:
+ * -1 = before row, 0 = on row, +1 = after row.
*/
int unique_tuple_pos; /* logical position */
int unique_tuple_mark; /* logical marked position */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.13 2005/08/20 23:26:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.14 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Info for each buffer slot. Page number is undefined when status is
- * EMPTY. lru_count is essentially the number of page switches since
- * last use of this page; the page with highest lru_count is the best
- * candidate to replace.
+ * EMPTY. lru_count is essentially the number of page switches since last
+ * use of this page; the page with highest lru_count is the best candidate
+ * to replace.
*/
char *page_buffer[NUM_SLRU_BUFFERS];
SlruPageStatus page_status[NUM_SLRU_BUFFERS];
LWLockId buffer_locks[NUM_SLRU_BUFFERS];
/*
- * latest_page_number is the page number of the current end of the
- * log; this is not critical data, since we use it only to avoid
- * swapping out the latest page.
+ * latest_page_number is the page number of the current end of the log;
+ * this is not critical data, since we use it only to avoid swapping out
+ * the latest page.
*/
int latest_page_number;
} SlruSharedData;
SlruShared shared;
/*
- * This flag tells whether to fsync writes (true for pg_clog, false
- * for pg_subtrans).
+ * This flag tells whether to fsync writes (true for pg_clog, false for
+ * pg_subtrans).
*/
bool do_fsync;
/*
- * Decide which of two page numbers is "older" for truncation
- * purposes. We need to use comparison of TransactionIds here in order
- * to do the right thing with wraparound XID arithmetic.
+ * Decide which of two page numbers is "older" for truncation purposes. We
+ * need to use comparison of TransactionIds here in order to do the right
+ * thing with wraparound XID arithmetic.
*/
bool (*PagePrecedes) (int, int);
/*
- * Dir is set during SimpleLruInit and does not change thereafter.
- * Since it's always the same, it doesn't need to be in shared memory.
+ * Dir is set during SimpleLruInit and does not change thereafter. Since
+ * it's always the same, it doesn't need to be in shared memory.
*/
char Dir[64];
} SlruCtlData;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/transam.h,v 1.55 2005/08/12 01:36:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/transam.h,v 1.56 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* using the OID generator. (We start the generator at 10000.)
*
* OIDs beginning at 16384 are assigned from the OID generator
- * during normal multiuser operation. (We force the generator up to
+ * during normal multiuser operation. (We force the generator up to
* 16384 as soon as we are in normal operation.)
*
* The choices of 10000 and 16384 are completely arbitrary, and can be moved
Oid nextOid; /* next OID to assign */
uint32 oidCount; /* OIDs available before must do XLOG work */
TransactionId nextXid; /* next XID to assign */
- TransactionId xidWarnLimit; /* start complaining here */
- TransactionId xidStopLimit; /* refuse to advance nextXid beyond here */
- TransactionId xidWrapLimit; /* where the world ends */
+ TransactionId xidWarnLimit; /* start complaining here */
+ TransactionId xidStopLimit; /* refuse to advance nextXid beyond here */
+ TransactionId xidWrapLimit; /* where the world ends */
NameData limit_datname; /* database that needs vacuumed first */
} VariableCacheData;
extern TransactionId GetNewTransactionId(bool isSubXact);
extern TransactionId ReadNewTransactionId(void);
extern void SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
- Name oldest_datname);
+ Name oldest_datname);
extern Oid GetNewObjectId(void);
#endif /* TRAMSAM_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/tupmacs.h,v 1.28 2005/05/06 17:24:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/tupmacs.h,v 1.29 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
: \
PointerGetDatum((char *) (T)) \
)
-
#else /* SIZEOF_DATUM != 8 */
#define fetch_att(T,attbyval,attlen) \
break; \
} \
} while (0)
-
#else /* SIZEOF_DATUM != 8 */
#define store_att_byval(T,newdatum,attlen) \
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/twophase.h,v 1.5 2005/08/20 23:26:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/twophase.h,v 1.6 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct GlobalTransactionData *GlobalTransaction;
/* GUC variable */
-extern int max_prepared_xacts;
+extern int max_prepared_xacts;
extern Size TwoPhaseShmemSize(void);
extern void TwoPhaseShmemInit(void);
extern PGPROC *TwoPhaseGetDummyProc(TransactionId xid);
extern GlobalTransaction MarkAsPreparing(TransactionId xid, const char *gid,
- TimestampTz prepared_at,
- Oid owner, Oid databaseid);
+ TimestampTz prepared_at,
+ Oid owner, Oid databaseid);
extern void StartPrepare(GlobalTransaction gxact);
extern void EndPrepare(GlobalTransaction gxact);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/twophase_rmgr.h,v 1.1 2005/06/17 22:32:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/twophase_rmgr.h,v 1.2 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define TWOPHASE_RMGR_H
typedef void (*TwoPhaseCallback) (TransactionId xid, uint16 info,
- void *recdata, uint32 len);
+ void *recdata, uint32 len);
typedef uint8 TwoPhaseRmgrId;
/*
#define TWOPHASE_RM_END_ID 0
#define TWOPHASE_RM_LOCK_ID 1
#define TWOPHASE_RM_INVAL_ID 2
-#define TWOPHASE_RM_FLATFILES_ID 3
-#define TWOPHASE_RM_NOTIFY_ID 4
+#define TWOPHASE_RM_FLATFILES_ID 3
+#define TWOPHASE_RM_NOTIFY_ID 4
#define TWOPHASE_RM_MAX_ID TWOPHASE_RM_NOTIFY_ID
extern const TwoPhaseCallback twophase_recover_callbacks[];
extern void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info,
- const void *data, uint32 len);
+ const void *data, uint32 len);
#endif /* TWOPHASE_RMGR_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/xact.h,v 1.78 2005/06/29 22:51:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/xact.h,v 1.79 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} SubXactEvent;
typedef void (*SubXactCallback) (SubXactEvent event, SubTransactionId mySubid,
- SubTransactionId parentSubid, void *arg);
+ SubTransactionId parentSubid, void *arg);
/* ----------------
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/xlog.h,v 1.68 2005/08/20 23:26:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/xlog.h,v 1.69 2005/10/15 02:49:42 momjian Exp $
*/
#ifndef XLOG_H
#define XLOG_H
* where there can be zero to three backup blocks (as signaled by xl_info flag
* bits). XLogRecord structs always start on MAXALIGN boundaries in the WAL
* files, and we round up SizeOfXLogRecord so that the rmgr data is also
- * guaranteed to begin on a MAXALIGN boundary. However, no padding is added
+ * guaranteed to begin on a MAXALIGN boundary. However, no padding is added
* to align BkpBlock structs or backup block data.
*
* NOTE: xl_len counts only the rmgr data, not the XLogRecord header,
- * and also not any backup blocks. xl_tot_len counts everything. Neither
+ * and also not any backup blocks. xl_tot_len counts everything. Neither
* length field is rounded up to an alignment boundary.
*/
typedef struct XLogRecord
* record. (Could support 4 if we cared to dedicate all the xl_info bits for
* this purpose; currently bit 0 of xl_info is unused and available.)
*/
-#define XLR_BKP_BLOCK_MASK 0x0E /* all info bits used for bkp
- * blocks */
+#define XLR_BKP_BLOCK_MASK 0x0E /* all info bits used for bkp blocks */
#define XLR_MAX_BKP_BLOCKS 3
#define XLR_SET_BKP_BLOCK(iblk) (0x08 >> (iblk))
#define XLR_BKP_BLOCK_1 XLR_SET_BKP_BLOCK(0) /* 0x08 */
/* Sync methods */
#define SYNC_METHOD_FSYNC 0
#define SYNC_METHOD_FDATASYNC 1
-#define SYNC_METHOD_OPEN 2 /* for O_SYNC and O_DSYNC */
+#define SYNC_METHOD_OPEN 2 /* for O_SYNC and O_DSYNC */
#define SYNC_METHOD_FSYNC_WRITETHROUGH 3
extern int sync_method;
* value (ignoring InvalidBuffer) appearing in the rdata chain.
*
* When buffer is valid, caller must set buffer_std to indicate whether the
- * page uses standard pd_lower/pd_upper header fields. If this is true, then
+ * page uses standard pd_lower/pd_upper header fields. If this is true, then
* XLOG is allowed to omit the free space between pd_lower and pd_upper from
* the backed-up page image. Note that even when buffer_std is false, the
* page MUST have an LSN field as its first eight bytes!
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/c.h,v 1.189 2005/07/21 15:16:27 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/c.h,v 1.190 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#if !defined(WIN32) && !defined(__CYGWIN__)
#include "pg_config_os.h" /* must be before any system header files */
#else
-#if defined(_MSC_VER) || defined(__BORLANDC__)
-#define WIN32_CLIENT_ONLY
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+#define WIN32_CLIENT_ONLY
#endif
#endif
#include "postgres_ext.h"
/* We have to redefine some system functions after they are included above */
#include "pg_config_os.h"
#else
-#include "port/win32.h" /* We didn't run configure, but this is our port file */
+#include "port/win32.h" /* We didn't run configure, but this is our
+ * port file */
#endif
#endif
#define CppAsString(identifier) #identifier
#define CppConcat(x, y) x##y
-
#else /* !HAVE_STRINGIZE */
#define CppAsString(identifier) "identifier"
#ifndef HAVE_UINT64
typedef unsigned long int uint64;
#endif
-
#elif defined(HAVE_LONG_LONG_INT_64)
/* We have working support for "long long int", use that */
#ifndef HAVE_UINT64
typedef unsigned long long int uint64;
#endif
-
#else /* not HAVE_LONG_INT_64 and not
* HAVE_LONG_LONG_INT_64 */
/*
* Specialized array types. These are physically laid out just the same
* as regular arrays (so that the regular array subscripting code works
- * with them). They exist as distinct types mostly for historical reasons:
+ * with them). They exist as distinct types mostly for historical reasons:
* they have nonstandard I/O behavior which we don't want to change for fear
* of breaking applications that look at the system catalogs. There is also
* an implementation issue for oidvector: it's part of the primary key for
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/catalog.h,v 1.33 2005/08/12 01:36:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/catalog.h,v 1.34 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Oid GetNewOid(Relation relation);
extern Oid GetNewOidWithIndex(Relation relation, Relation indexrel);
-extern Oid GetNewRelFileNode(Oid reltablespace, bool relisshared,
- Relation pg_class);
+extern Oid GetNewRelFileNode(Oid reltablespace, bool relisshared,
+ Relation pg_class);
#endif /* CATALOG_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/dependency.h,v 1.16 2005/08/01 04:03:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/dependency.h,v 1.17 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
Oid classId; /* Class Id from pg_class */
Oid objectId; /* OID of the object */
- int32 objectSubId; /* Subitem within the object (column of
- * table) */
+ int32 objectSubId; /* Subitem within the object (column of table) */
} ObjectAddress;
/*
- * This enum covers all system catalogs whose OIDs can appear in
+ * This enum covers all system catalogs whose OIDs can appear in
* pg_depend.classId or pg_shdepend.classId.
*/
typedef enum ObjectClass
extern long deleteDependencyRecordsFor(Oid classId, Oid objectId);
extern long changeDependencyFor(Oid classId, Oid objectId,
- Oid refClassId, Oid oldRefObjectId,
- Oid newRefObjectId);
+ Oid refClassId, Oid oldRefObjectId,
+ Oid newRefObjectId);
/* in pg_shdepend.c */
extern void recordSharedDependencyOn(ObjectAddress *depender,
- ObjectAddress *referenced,
- SharedDependencyType deptype);
+ ObjectAddress *referenced,
+ SharedDependencyType deptype);
extern void deleteSharedDependencyRecordsFor(Oid classId, Oid objectId);
extern void recordDependencyOnOwner(Oid classId, Oid objectId, Oid owner);
extern void changeDependencyOnOwner(Oid classId, Oid objectId,
- Oid newOwnerId);
+ Oid newOwnerId);
extern void updateAclDependencies(Oid classId, Oid objectId,
- Oid ownerId, bool isGrant,
- int noldmembers, Oid *oldmembers,
- int nnewmembers, Oid *newmembers);
+ Oid ownerId, bool isGrant,
+ int noldmembers, Oid *oldmembers,
+ int nnewmembers, Oid *newmembers);
extern char *checkSharedDependencies(Oid classId, Oid objectId);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/heap.h,v 1.75 2005/08/26 03:08:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/heap.h,v 1.76 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct RawColumnDefault
{
AttrNumber attnum; /* attribute to attach default to */
- Node *raw_default; /* default value (untransformed parse
- * tree) */
+ Node *raw_default; /* default value (untransformed parse tree) */
} RawColumnDefault;
typedef struct CookedConstraint
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/index.h,v 1.63 2005/05/11 06:24:55 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/index.h,v 1.64 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Typedef for callback function for IndexBuildHeapScan */
typedef void (*IndexBuildCallback) (Relation index,
- HeapTuple htup,
- Datum *values,
- bool *isnull,
- bool tupleIsAlive,
- void *state);
+ HeapTuple htup,
+ Datum *values,
+ bool *isnull,
+ bool tupleIsAlive,
+ void *state);
extern Oid index_create(Oid heapRelationId,
bool *isnull);
extern void IndexCloseAndUpdateStats(Relation heap, double heapTuples,
- Relation index, double indexTuples);
+ Relation index, double indexTuples);
extern void setRelhasindex(Oid relid, bool hasindex,
bool isprimary, Oid reltoastidxid);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/indexing.h,v 1.91 2005/09/08 20:07:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/indexing.h,v 1.92 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* index name (much less the numeric OID).
*/
-DECLARE_UNIQUE_INDEX(pg_aggregate_fnoid_index,2650, on pg_aggregate using btree(aggfnoid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_aggregate_fnoid_index, 2650, on pg_aggregate using btree(aggfnoid oid_ops));
#define AggregateFnoidIndexId 2650
-DECLARE_UNIQUE_INDEX(pg_am_name_index,2651, on pg_am using btree(amname name_ops));
+DECLARE_UNIQUE_INDEX(pg_am_name_index, 2651, on pg_am using btree(amname name_ops));
#define AmNameIndexId 2651
-DECLARE_UNIQUE_INDEX(pg_am_oid_index,2652, on pg_am using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_am_oid_index, 2652, on pg_am using btree(oid oid_ops));
#define AmOidIndexId 2652
-DECLARE_UNIQUE_INDEX(pg_amop_opc_strat_index,2653, on pg_amop using btree(amopclaid oid_ops, amopsubtype oid_ops, amopstrategy int2_ops));
+DECLARE_UNIQUE_INDEX(pg_amop_opc_strat_index, 2653, on pg_amop using btree(amopclaid oid_ops, amopsubtype oid_ops, amopstrategy int2_ops));
#define AccessMethodStrategyIndexId 2653
-DECLARE_UNIQUE_INDEX(pg_amop_opr_opc_index,2654, on pg_amop using btree(amopopr oid_ops, amopclaid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_amop_opr_opc_index, 2654, on pg_amop using btree(amopopr oid_ops, amopclaid oid_ops));
#define AccessMethodOperatorIndexId 2654
-DECLARE_UNIQUE_INDEX(pg_amproc_opc_proc_index,2655, on pg_amproc using btree(amopclaid oid_ops, amprocsubtype oid_ops, amprocnum int2_ops));
+DECLARE_UNIQUE_INDEX(pg_amproc_opc_proc_index, 2655, on pg_amproc using btree(amopclaid oid_ops, amprocsubtype oid_ops, amprocnum int2_ops));
#define AccessMethodProcedureIndexId 2655
-DECLARE_UNIQUE_INDEX(pg_attrdef_adrelid_adnum_index,2656, on pg_attrdef using btree(adrelid oid_ops, adnum int2_ops));
-#define AttrDefaultIndexId 2656
-DECLARE_UNIQUE_INDEX(pg_attrdef_oid_index,2657, on pg_attrdef using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_attrdef_adrelid_adnum_index, 2656, on pg_attrdef using btree(adrelid oid_ops, adnum int2_ops));
+#define AttrDefaultIndexId 2656
+DECLARE_UNIQUE_INDEX(pg_attrdef_oid_index, 2657, on pg_attrdef using btree(oid oid_ops));
#define AttrDefaultOidIndexId 2657
-DECLARE_UNIQUE_INDEX(pg_attribute_relid_attnam_index,2658, on pg_attribute using btree(attrelid oid_ops, attname name_ops));
+DECLARE_UNIQUE_INDEX(pg_attribute_relid_attnam_index, 2658, on pg_attribute using btree(attrelid oid_ops, attname name_ops));
#define AttributeRelidNameIndexId 2658
-DECLARE_UNIQUE_INDEX(pg_attribute_relid_attnum_index,2659, on pg_attribute using btree(attrelid oid_ops, attnum int2_ops));
+DECLARE_UNIQUE_INDEX(pg_attribute_relid_attnum_index, 2659, on pg_attribute using btree(attrelid oid_ops, attnum int2_ops));
#define AttributeRelidNumIndexId 2659
-DECLARE_UNIQUE_INDEX(pg_authid_rolname_index,2676, on pg_authid using btree(rolname name_ops));
+DECLARE_UNIQUE_INDEX(pg_authid_rolname_index, 2676, on pg_authid using btree(rolname name_ops));
#define AuthIdRolnameIndexId 2676
-DECLARE_UNIQUE_INDEX(pg_authid_oid_index,2677, on pg_authid using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_authid_oid_index, 2677, on pg_authid using btree(oid oid_ops));
#define AuthIdOidIndexId 2677
-DECLARE_UNIQUE_INDEX(pg_auth_members_role_member_index,2694, on pg_auth_members using btree(roleid oid_ops, member oid_ops));
+DECLARE_UNIQUE_INDEX(pg_auth_members_role_member_index, 2694, on pg_auth_members using btree(roleid oid_ops, member oid_ops));
#define AuthMemRoleMemIndexId 2694
-DECLARE_UNIQUE_INDEX(pg_auth_members_member_role_index,2695, on pg_auth_members using btree(member oid_ops, roleid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_auth_members_member_role_index, 2695, on pg_auth_members using btree(member oid_ops, roleid oid_ops));
#define AuthMemMemRoleIndexId 2695
-DECLARE_UNIQUE_INDEX(pg_autovacuum_vacrelid_index,1250, on pg_autovacuum using btree(vacrelid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_autovacuum_vacrelid_index, 1250, on pg_autovacuum using btree(vacrelid oid_ops));
#define AutovacuumRelidIndexId 1250
-DECLARE_UNIQUE_INDEX(pg_cast_oid_index,2660, on pg_cast using btree(oid oid_ops));
-#define CastOidIndexId 2660
-DECLARE_UNIQUE_INDEX(pg_cast_source_target_index,2661, on pg_cast using btree(castsource oid_ops, casttarget oid_ops));
+DECLARE_UNIQUE_INDEX(pg_cast_oid_index, 2660, on pg_cast using btree(oid oid_ops));
+#define CastOidIndexId 2660
+DECLARE_UNIQUE_INDEX(pg_cast_source_target_index, 2661, on pg_cast using btree(castsource oid_ops, casttarget oid_ops));
#define CastSourceTargetIndexId 2661
-DECLARE_UNIQUE_INDEX(pg_class_oid_index,2662, on pg_class using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_class_oid_index, 2662, on pg_class using btree(oid oid_ops));
#define ClassOidIndexId 2662
-DECLARE_UNIQUE_INDEX(pg_class_relname_nsp_index,2663, on pg_class using btree(relname name_ops, relnamespace oid_ops));
+DECLARE_UNIQUE_INDEX(pg_class_relname_nsp_index, 2663, on pg_class using btree(relname name_ops, relnamespace oid_ops));
#define ClassNameNspIndexId 2663
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_constraint_conname_nsp_index,2664, on pg_constraint using btree(conname name_ops, connamespace oid_ops));
+DECLARE_INDEX(pg_constraint_conname_nsp_index, 2664, on pg_constraint using btree(conname name_ops, connamespace oid_ops));
#define ConstraintNameNspIndexId 2664
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_constraint_conrelid_index,2665, on pg_constraint using btree(conrelid oid_ops));
-#define ConstraintRelidIndexId 2665
+DECLARE_INDEX(pg_constraint_conrelid_index, 2665, on pg_constraint using btree(conrelid oid_ops));
+#define ConstraintRelidIndexId 2665
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_constraint_contypid_index,2666, on pg_constraint using btree(contypid oid_ops));
-#define ConstraintTypidIndexId 2666
-DECLARE_UNIQUE_INDEX(pg_constraint_oid_index,2667, on pg_constraint using btree(oid oid_ops));
+DECLARE_INDEX(pg_constraint_contypid_index, 2666, on pg_constraint using btree(contypid oid_ops));
+#define ConstraintTypidIndexId 2666
+DECLARE_UNIQUE_INDEX(pg_constraint_oid_index, 2667, on pg_constraint using btree(oid oid_ops));
#define ConstraintOidIndexId 2667
-DECLARE_UNIQUE_INDEX(pg_conversion_default_index,2668, on pg_conversion using btree(connamespace oid_ops, conforencoding int4_ops, contoencoding int4_ops, oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_conversion_default_index, 2668, on pg_conversion using btree(connamespace oid_ops, conforencoding int4_ops, contoencoding int4_ops, oid oid_ops));
#define ConversionDefaultIndexId 2668
-DECLARE_UNIQUE_INDEX(pg_conversion_name_nsp_index,2669, on pg_conversion using btree(conname name_ops, connamespace oid_ops));
+DECLARE_UNIQUE_INDEX(pg_conversion_name_nsp_index, 2669, on pg_conversion using btree(conname name_ops, connamespace oid_ops));
#define ConversionNameNspIndexId 2669
-DECLARE_UNIQUE_INDEX(pg_conversion_oid_index,2670, on pg_conversion using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_conversion_oid_index, 2670, on pg_conversion using btree(oid oid_ops));
#define ConversionOidIndexId 2670
-DECLARE_UNIQUE_INDEX(pg_database_datname_index,2671, on pg_database using btree(datname name_ops));
+DECLARE_UNIQUE_INDEX(pg_database_datname_index, 2671, on pg_database using btree(datname name_ops));
#define DatabaseNameIndexId 2671
-DECLARE_UNIQUE_INDEX(pg_database_oid_index,2672, on pg_database using btree(oid oid_ops));
-#define DatabaseOidIndexId 2672
+DECLARE_UNIQUE_INDEX(pg_database_oid_index, 2672, on pg_database using btree(oid oid_ops));
+#define DatabaseOidIndexId 2672
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_depend_depender_index,2673, on pg_depend using btree(classid oid_ops, objid oid_ops, objsubid int4_ops));
+DECLARE_INDEX(pg_depend_depender_index, 2673, on pg_depend using btree(classid oid_ops, objid oid_ops, objsubid int4_ops));
#define DependDependerIndexId 2673
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_depend_reference_index,2674, on pg_depend using btree(refclassid oid_ops, refobjid oid_ops, refobjsubid int4_ops));
-#define DependReferenceIndexId 2674
+DECLARE_INDEX(pg_depend_reference_index, 2674, on pg_depend using btree(refclassid oid_ops, refobjid oid_ops, refobjsubid int4_ops));
+#define DependReferenceIndexId 2674
-DECLARE_UNIQUE_INDEX(pg_description_o_c_o_index,2675, on pg_description using btree(objoid oid_ops, classoid oid_ops, objsubid int4_ops));
+DECLARE_UNIQUE_INDEX(pg_description_o_c_o_index, 2675, on pg_description using btree(objoid oid_ops, classoid oid_ops, objsubid int4_ops));
#define DescriptionObjIndexId 2675
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_index_indrelid_index,2678, on pg_index using btree(indrelid oid_ops));
+DECLARE_INDEX(pg_index_indrelid_index, 2678, on pg_index using btree(indrelid oid_ops));
#define IndexIndrelidIndexId 2678
-DECLARE_UNIQUE_INDEX(pg_index_indexrelid_index,2679, on pg_index using btree(indexrelid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_index_indexrelid_index, 2679, on pg_index using btree(indexrelid oid_ops));
#define IndexRelidIndexId 2679
-DECLARE_UNIQUE_INDEX(pg_inherits_relid_seqno_index,2680, on pg_inherits using btree(inhrelid oid_ops, inhseqno int4_ops));
+DECLARE_UNIQUE_INDEX(pg_inherits_relid_seqno_index, 2680, on pg_inherits using btree(inhrelid oid_ops, inhseqno int4_ops));
#define InheritsRelidSeqnoIndexId 2680
-DECLARE_UNIQUE_INDEX(pg_language_name_index,2681, on pg_language using btree(lanname name_ops));
+DECLARE_UNIQUE_INDEX(pg_language_name_index, 2681, on pg_language using btree(lanname name_ops));
#define LanguageNameIndexId 2681
-DECLARE_UNIQUE_INDEX(pg_language_oid_index,2682, on pg_language using btree(oid oid_ops));
-#define LanguageOidIndexId 2682
+DECLARE_UNIQUE_INDEX(pg_language_oid_index, 2682, on pg_language using btree(oid oid_ops));
+#define LanguageOidIndexId 2682
-DECLARE_UNIQUE_INDEX(pg_largeobject_loid_pn_index,2683, on pg_largeobject using btree(loid oid_ops, pageno int4_ops));
+DECLARE_UNIQUE_INDEX(pg_largeobject_loid_pn_index, 2683, on pg_largeobject using btree(loid oid_ops, pageno int4_ops));
#define LargeObjectLOidPNIndexId 2683
-DECLARE_UNIQUE_INDEX(pg_namespace_nspname_index,2684, on pg_namespace using btree(nspname name_ops));
+DECLARE_UNIQUE_INDEX(pg_namespace_nspname_index, 2684, on pg_namespace using btree(nspname name_ops));
#define NamespaceNameIndexId 2684
-DECLARE_UNIQUE_INDEX(pg_namespace_oid_index,2685, on pg_namespace using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_namespace_oid_index, 2685, on pg_namespace using btree(oid oid_ops));
#define NamespaceOidIndexId 2685
-DECLARE_UNIQUE_INDEX(pg_opclass_am_name_nsp_index,2686, on pg_opclass using btree(opcamid oid_ops, opcname name_ops, opcnamespace oid_ops));
+DECLARE_UNIQUE_INDEX(pg_opclass_am_name_nsp_index, 2686, on pg_opclass using btree(opcamid oid_ops, opcname name_ops, opcnamespace oid_ops));
#define OpclassAmNameNspIndexId 2686
-DECLARE_UNIQUE_INDEX(pg_opclass_oid_index,2687, on pg_opclass using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_opclass_oid_index, 2687, on pg_opclass using btree(oid oid_ops));
#define OpclassOidIndexId 2687
-DECLARE_UNIQUE_INDEX(pg_operator_oid_index,2688, on pg_operator using btree(oid oid_ops));
-#define OperatorOidIndexId 2688
-DECLARE_UNIQUE_INDEX(pg_operator_oprname_l_r_n_index,2689, on pg_operator using btree(oprname name_ops, oprleft oid_ops, oprright oid_ops, oprnamespace oid_ops));
-#define OperatorNameNspIndexId 2689
+DECLARE_UNIQUE_INDEX(pg_operator_oid_index, 2688, on pg_operator using btree(oid oid_ops));
+#define OperatorOidIndexId 2688
+DECLARE_UNIQUE_INDEX(pg_operator_oprname_l_r_n_index, 2689, on pg_operator using btree(oprname name_ops, oprleft oid_ops, oprright oid_ops, oprnamespace oid_ops));
+#define OperatorNameNspIndexId 2689
-DECLARE_UNIQUE_INDEX(pg_pltemplate_name_index,1137, on pg_pltemplate using btree(tmplname name_ops));
+DECLARE_UNIQUE_INDEX(pg_pltemplate_name_index, 1137, on pg_pltemplate using btree(tmplname name_ops));
#define PLTemplateNameIndexId 1137
-DECLARE_UNIQUE_INDEX(pg_proc_oid_index,2690, on pg_proc using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_proc_oid_index, 2690, on pg_proc using btree(oid oid_ops));
#define ProcedureOidIndexId 2690
-DECLARE_UNIQUE_INDEX(pg_proc_proname_args_nsp_index,2691, on pg_proc using btree(proname name_ops, proargtypes oidvector_ops, pronamespace oid_ops));
+DECLARE_UNIQUE_INDEX(pg_proc_proname_args_nsp_index, 2691, on pg_proc using btree(proname name_ops, proargtypes oidvector_ops, pronamespace oid_ops));
#define ProcedureNameArgsNspIndexId 2691
-DECLARE_UNIQUE_INDEX(pg_rewrite_oid_index,2692, on pg_rewrite using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_rewrite_oid_index, 2692, on pg_rewrite using btree(oid oid_ops));
#define RewriteOidIndexId 2692
-DECLARE_UNIQUE_INDEX(pg_rewrite_rel_rulename_index,2693, on pg_rewrite using btree(ev_class oid_ops, rulename name_ops));
+DECLARE_UNIQUE_INDEX(pg_rewrite_rel_rulename_index, 2693, on pg_rewrite using btree(ev_class oid_ops, rulename name_ops));
#define RewriteRelRulenameIndexId 2693
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_shdepend_depender_index,1232, on pg_shdepend using btree(dbid oid_ops, classid oid_ops, objid oid_ops));
+DECLARE_INDEX(pg_shdepend_depender_index, 1232, on pg_shdepend using btree(dbid oid_ops, classid oid_ops, objid oid_ops));
#define SharedDependDependerIndexId 1232
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_shdepend_reference_index,1233, on pg_shdepend using btree(refclassid oid_ops, refobjid oid_ops));
+DECLARE_INDEX(pg_shdepend_reference_index, 1233, on pg_shdepend using btree(refclassid oid_ops, refobjid oid_ops));
#define SharedDependReferenceIndexId 1233
-DECLARE_UNIQUE_INDEX(pg_statistic_relid_att_index,2696, on pg_statistic using btree(starelid oid_ops, staattnum int2_ops));
+DECLARE_UNIQUE_INDEX(pg_statistic_relid_att_index, 2696, on pg_statistic using btree(starelid oid_ops, staattnum int2_ops));
#define StatisticRelidAttnumIndexId 2696
-DECLARE_UNIQUE_INDEX(pg_tablespace_oid_index,2697, on pg_tablespace using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_tablespace_oid_index, 2697, on pg_tablespace using btree(oid oid_ops));
#define TablespaceOidIndexId 2697
-DECLARE_UNIQUE_INDEX(pg_tablespace_spcname_index,2698, on pg_tablespace using btree(spcname name_ops));
+DECLARE_UNIQUE_INDEX(pg_tablespace_spcname_index, 2698, on pg_tablespace using btree(spcname name_ops));
#define TablespaceNameIndexId 2698
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_trigger_tgconstrname_index,2699, on pg_trigger using btree(tgconstrname name_ops));
+DECLARE_INDEX(pg_trigger_tgconstrname_index, 2699, on pg_trigger using btree(tgconstrname name_ops));
#define TriggerConstrNameIndexId 2699
/* This following index is not used for a cache and is not unique */
-DECLARE_INDEX(pg_trigger_tgconstrrelid_index,2700, on pg_trigger using btree(tgconstrrelid oid_ops));
+DECLARE_INDEX(pg_trigger_tgconstrrelid_index, 2700, on pg_trigger using btree(tgconstrrelid oid_ops));
#define TriggerConstrRelidIndexId 2700
-DECLARE_UNIQUE_INDEX(pg_trigger_tgrelid_tgname_index,2701, on pg_trigger using btree(tgrelid oid_ops, tgname name_ops));
+DECLARE_UNIQUE_INDEX(pg_trigger_tgrelid_tgname_index, 2701, on pg_trigger using btree(tgrelid oid_ops, tgname name_ops));
#define TriggerRelidNameIndexId 2701
-DECLARE_UNIQUE_INDEX(pg_trigger_oid_index,2702, on pg_trigger using btree(oid oid_ops));
+DECLARE_UNIQUE_INDEX(pg_trigger_oid_index, 2702, on pg_trigger using btree(oid oid_ops));
#define TriggerOidIndexId 2702
-DECLARE_UNIQUE_INDEX(pg_type_oid_index,2703, on pg_type using btree(oid oid_ops));
-#define TypeOidIndexId 2703
-DECLARE_UNIQUE_INDEX(pg_type_typname_nsp_index,2704, on pg_type using btree(typname name_ops, typnamespace oid_ops));
-#define TypeNameNspIndexId 2704
+DECLARE_UNIQUE_INDEX(pg_type_oid_index, 2703, on pg_type using btree(oid oid_ops));
+#define TypeOidIndexId 2703
+DECLARE_UNIQUE_INDEX(pg_type_typname_nsp_index, 2704, on pg_type using btree(typname name_ops, typnamespace oid_ops));
+#define TypeNameNspIndexId 2704
/* last step of initialization script: build the indexes declared above */
BUILD_INDICES
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/namespace.h,v 1.36 2005/08/01 04:03:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/namespace.h,v 1.37 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void InitializeSearchPath(void);
extern void AtEOXact_Namespace(bool isCommit);
extern void AtEOSubXact_Namespace(bool isCommit, SubTransactionId mySubid,
- SubTransactionId parentSubid);
+ SubTransactionId parentSubid);
/* stuff for search_path GUC variable */
extern char *namespace_search_path;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_aggregate.h,v 1.51 2005/04/14 01:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_aggregate.h,v 1.52 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
DATA(insert ( 2148 int8_accum numeric_variance 0 1231 "{0,0,0}" ));
DATA(insert ( 2149 int4_accum numeric_variance 0 1231 "{0,0,0}" ));
DATA(insert ( 2150 int2_accum numeric_variance 0 1231 "{0,0,0}" ));
-DATA(insert ( 2151 float4_accum float8_variance 0 1022 "{0,0,0}" ));
-DATA(insert ( 2152 float8_accum float8_variance 0 1022 "{0,0,0}" ));
-DATA(insert ( 2153 numeric_accum numeric_variance 0 1231 "{0,0,0}" ));
+DATA(insert ( 2151 float4_accum float8_variance 0 1022 "{0,0,0}" ));
+DATA(insert ( 2152 float8_accum float8_variance 0 1022 "{0,0,0}" ));
+DATA(insert ( 2153 numeric_accum numeric_variance 0 1231 "{0,0,0}" ));
/* stddev */
DATA(insert ( 2154 int8_accum numeric_stddev 0 1231 "{0,0,0}" ));
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_am.h,v 1.37 2005/06/27 12:45:23 teodor Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_am.h,v 1.38 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* typedef struct FormData_pg_am
* ----------------
*/
-#define AccessMethodRelationId 2601
+#define AccessMethodRelationId 2601
CATALOG(pg_am,2601)
{
NameData amname; /* access method name */
- int2 amstrategies; /* total NUMBER of strategies (operators)
- * by which we can traverse/search this AM */
- int2 amsupport; /* total NUMBER of support functions that
- * this AM uses */
- int2 amorderstrategy;/* if this AM has a sort order, the
- * strategy number of the sort operator.
- * Zero if AM is not ordered. */
+ int2 amstrategies; /* total NUMBER of strategies (operators) by
+ * which we can traverse/search this AM */
+ int2 amsupport; /* total NUMBER of support functions that this
+ * AM uses */
+ int2 amorderstrategy;/* if this AM has a sort order, the strategy
+ * number of the sort operator. Zero if AM is
+ * not ordered. */
bool amcanunique; /* does AM support UNIQUE indexes? */
bool amcanmulticol; /* does AM support multi-column indexes? */
bool amoptionalkey; /* can query omit key for the first column? */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_amop.h,v 1.65 2005/07/01 19:19:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_amop.h,v 1.66 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* typedef struct FormData_pg_amop
* ----------------
*/
-#define AccessMethodOperatorRelationId 2602
+#define AccessMethodOperatorRelationId 2602
CATALOG(pg_amop,2602) BKI_WITHOUT_OIDS
{
* rtree box_ops
*/
-DATA(insert ( 425 0 1 f 493 ));
-DATA(insert ( 425 0 2 f 494 ));
-DATA(insert ( 425 0 3 f 500 ));
-DATA(insert ( 425 0 4 f 495 ));
-DATA(insert ( 425 0 5 f 496 ));
-DATA(insert ( 425 0 6 f 499 ));
-DATA(insert ( 425 0 7 f 498 ));
-DATA(insert ( 425 0 8 f 497 ));
-DATA(insert ( 425 0 9 f 2571 ));
-DATA(insert ( 425 0 10 f 2570 ));
-DATA(insert ( 425 0 11 f 2573 ));
-DATA(insert ( 425 0 12 f 2572 ));
+DATA(insert ( 425 0 1 f 493 ));
+DATA(insert ( 425 0 2 f 494 ));
+DATA(insert ( 425 0 3 f 500 ));
+DATA(insert ( 425 0 4 f 495 ));
+DATA(insert ( 425 0 5 f 496 ));
+DATA(insert ( 425 0 6 f 499 ));
+DATA(insert ( 425 0 7 f 498 ));
+DATA(insert ( 425 0 8 f 497 ));
+DATA(insert ( 425 0 9 f 2571 ));
+DATA(insert ( 425 0 10 f 2570 ));
+DATA(insert ( 425 0 11 f 2573 ));
+DATA(insert ( 425 0 12 f 2572 ));
/*
* rtree poly_ops (supports polygons)
*/
-DATA(insert ( 1993 0 1 f 485 ));
-DATA(insert ( 1993 0 2 f 486 ));
-DATA(insert ( 1993 0 3 f 492 ));
-DATA(insert ( 1993 0 4 f 487 ));
-DATA(insert ( 1993 0 5 f 488 ));
-DATA(insert ( 1993 0 6 f 491 ));
-DATA(insert ( 1993 0 7 f 490 ));
-DATA(insert ( 1993 0 8 f 489 ));
-DATA(insert ( 1993 0 9 f 2575 ));
-DATA(insert ( 1993 0 10 f 2574 ));
-DATA(insert ( 1993 0 11 f 2577 ));
-DATA(insert ( 1993 0 12 f 2576 ));
+DATA(insert ( 1993 0 1 f 485 ));
+DATA(insert ( 1993 0 2 f 486 ));
+DATA(insert ( 1993 0 3 f 492 ));
+DATA(insert ( 1993 0 4 f 487 ));
+DATA(insert ( 1993 0 5 f 488 ));
+DATA(insert ( 1993 0 6 f 491 ));
+DATA(insert ( 1993 0 7 f 490 ));
+DATA(insert ( 1993 0 8 f 489 ));
+DATA(insert ( 1993 0 9 f 2575 ));
+DATA(insert ( 1993 0 10 f 2574 ));
+DATA(insert ( 1993 0 11 f 2577 ));
+DATA(insert ( 1993 0 12 f 2576 ));
/*
* btree int2_ops
* gist box_ops
*/
-DATA(insert ( 2593 0 1 f 493 ));
-DATA(insert ( 2593 0 2 f 494 ));
-DATA(insert ( 2593 0 3 f 500 ));
-DATA(insert ( 2593 0 4 f 495 ));
-DATA(insert ( 2593 0 5 f 496 ));
-DATA(insert ( 2593 0 6 f 499 ));
-DATA(insert ( 2593 0 7 f 498 ));
-DATA(insert ( 2593 0 8 f 497 ));
-DATA(insert ( 2593 0 9 f 2571 ));
-DATA(insert ( 2593 0 10 f 2570 ));
-DATA(insert ( 2593 0 11 f 2573 ));
-DATA(insert ( 2593 0 12 f 2572 ));
+DATA(insert ( 2593 0 1 f 493 ));
+DATA(insert ( 2593 0 2 f 494 ));
+DATA(insert ( 2593 0 3 f 500 ));
+DATA(insert ( 2593 0 4 f 495 ));
+DATA(insert ( 2593 0 5 f 496 ));
+DATA(insert ( 2593 0 6 f 499 ));
+DATA(insert ( 2593 0 7 f 498 ));
+DATA(insert ( 2593 0 8 f 497 ));
+DATA(insert ( 2593 0 9 f 2571 ));
+DATA(insert ( 2593 0 10 f 2570 ));
+DATA(insert ( 2593 0 11 f 2573 ));
+DATA(insert ( 2593 0 12 f 2572 ));
/*
* gist poly_ops (supports polygons)
*/
-DATA(insert ( 2594 0 1 t 485 ));
-DATA(insert ( 2594 0 2 t 486 ));
-DATA(insert ( 2594 0 3 t 492 ));
-DATA(insert ( 2594 0 4 t 487 ));
-DATA(insert ( 2594 0 5 t 488 ));
-DATA(insert ( 2594 0 6 t 491 ));
-DATA(insert ( 2594 0 7 t 490 ));
-DATA(insert ( 2594 0 8 t 489 ));
-DATA(insert ( 2594 0 9 t 2575 ));
-DATA(insert ( 2594 0 10 t 2574 ));
-DATA(insert ( 2594 0 11 t 2577 ));
-DATA(insert ( 2594 0 12 t 2576 ));
+DATA(insert ( 2594 0 1 t 485 ));
+DATA(insert ( 2594 0 2 t 486 ));
+DATA(insert ( 2594 0 3 t 492 ));
+DATA(insert ( 2594 0 4 t 487 ));
+DATA(insert ( 2594 0 5 t 488 ));
+DATA(insert ( 2594 0 6 t 491 ));
+DATA(insert ( 2594 0 7 t 490 ));
+DATA(insert ( 2594 0 8 t 489 ));
+DATA(insert ( 2594 0 9 t 2575 ));
+DATA(insert ( 2594 0 10 t 2574 ));
+DATA(insert ( 2594 0 11 t 2577 ));
+DATA(insert ( 2594 0 12 t 2576 ));
/*
* gist circle_ops
*/
-DATA(insert ( 2595 0 1 t 1506 ));
-DATA(insert ( 2595 0 2 t 1507 ));
-DATA(insert ( 2595 0 3 t 1513 ));
-DATA(insert ( 2595 0 4 t 1508 ));
-DATA(insert ( 2595 0 5 t 1509 ));
-DATA(insert ( 2595 0 6 t 1512 ));
-DATA(insert ( 2595 0 7 t 1511 ));
-DATA(insert ( 2595 0 8 t 1510 ));
-DATA(insert ( 2595 0 9 t 2589 ));
-DATA(insert ( 2595 0 10 t 1515 ));
-DATA(insert ( 2595 0 11 t 1514 ));
-DATA(insert ( 2595 0 12 t 2590 ));
+DATA(insert ( 2595 0 1 t 1506 ));
+DATA(insert ( 2595 0 2 t 1507 ));
+DATA(insert ( 2595 0 3 t 1513 ));
+DATA(insert ( 2595 0 4 t 1508 ));
+DATA(insert ( 2595 0 5 t 1509 ));
+DATA(insert ( 2595 0 6 t 1512 ));
+DATA(insert ( 2595 0 7 t 1511 ));
+DATA(insert ( 2595 0 8 t 1510 ));
+DATA(insert ( 2595 0 9 t 2589 ));
+DATA(insert ( 2595 0 10 t 1515 ));
+DATA(insert ( 2595 0 11 t 1514 ));
+DATA(insert ( 2595 0 12 t 2590 ));
#endif /* PG_AMOP_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_attribute.h,v 1.118 2005/06/28 05:09:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_attribute.h,v 1.119 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS
{
- Oid attrelid; /* OID of relation containing this
- * attribute */
+ Oid attrelid; /* OID of relation containing this attribute */
NameData attname; /* name of attribute */
/*
* atttypid is the OID of the instance in Catalog Class pg_type that
- * defines the data type of this attribute (e.g. int4). Information
- * in that instance is redundant with the attlen, attbyval, and
- * attalign attributes of this instance, so they had better match or
- * Postgres will fail.
+ * defines the data type of this attribute (e.g. int4). Information in
+ * that instance is redundant with the attlen, attbyval, and attalign
+ * attributes of this instance, so they had better match or Postgres will
+ * fail.
*/
Oid atttypid;
/*
- * attstattarget is the target number of statistics datapoints to
- * collect during VACUUM ANALYZE of this column. A zero here
- * indicates that we do not wish to collect any stats about this
- * column. A "-1" here indicates that no value has been explicitly set
- * for this column, so ANALYZE should use the default setting.
+ * attstattarget is the target number of statistics datapoints to collect
+ * during VACUUM ANALYZE of this column. A zero here indicates that we do
+ * not wish to collect any stats about this column. A "-1" here indicates
+ * that no value has been explicitly set for this column, so ANALYZE
+ * should use the default setting.
*/
int4 attstattarget;
/*
- * attlen is a copy of the typlen field from pg_type for this
- * attribute. See atttypid comments above.
+ * attlen is a copy of the typlen field from pg_type for this attribute.
+ * See atttypid comments above.
*/
int2 attlen;
/*
* attnum is the "attribute number" for the attribute: A value that
* uniquely identifies this attribute within its class. For user
- * attributes, Attribute numbers are greater than 0 and not greater
- * than the number of attributes in the class. I.e. if the Class
- * pg_class says that Class XYZ has 10 attributes, then the user
- * attribute numbers in Class pg_attribute must be 1-10.
+ * attributes, Attribute numbers are greater than 0 and not greater than
+ * the number of attributes in the class. I.e. if the Class pg_class says
+ * that Class XYZ has 10 attributes, then the user attribute numbers in
+ * Class pg_attribute must be 1-10.
*
* System attributes have attribute numbers less than 0 that are unique
* within the class, but not constrained to any particular range.
int4 attndims;
/*
- * fastgetattr() uses attcacheoff to cache byte offsets of attributes
- * in heap tuples. The value actually stored in pg_attribute (-1)
- * indicates no cached value. But when we copy these tuples into a
- * tuple descriptor, we may then update attcacheoff in the copies.
- * This speeds up the attribute walking process.
+ * fastgetattr() uses attcacheoff to cache byte offsets of attributes in
+ * heap tuples. The value actually stored in pg_attribute (-1) indicates
+ * no cached value. But when we copy these tuples into a tuple
+ * descriptor, we may then update attcacheoff in the copies. This speeds
+ * up the attribute walking process.
*/
int4 attcacheoff;
/*
- * atttypmod records type-specific data supplied at table creation
- * time (for example, the max length of a varchar field). It is
- * passed to type-specific input and output functions as the third
- * argument. The value will generally be -1 for types that do not need
- * typmod.
+ * atttypmod records type-specific data supplied at table creation time
+ * (for example, the max length of a varchar field). It is passed to
+ * type-specific input and output functions as the third argument. The
+ * value will generally be -1 for types that do not need typmod.
*/
int4 atttypmod;
{ 1255, {"pronargs"}, 21, -1, 2, 10, 0, -1, -1, true, 'p', 's', true, false, false, true, 0 }, \
{ 1255, {"prorettype"}, 26, -1, 4, 11, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1255, {"proargtypes"}, 30, -1, -1, 12, 1, -1, -1, false, 'p', 'i', true, false, false, true, 0 }, \
-{ 1255, {"proallargtypes"}, 1028, -1, -1, 13, 1, -1, -1, false, 'x', 'i', false, false, false, true, 0 }, \
+{ 1255, {"proallargtypes"}, 1028, -1, -1, 13, 1, -1, -1, false, 'x', 'i', false, false, false, true, 0 }, \
{ 1255, {"proargmodes"}, 1002, -1, -1, 14, 1, -1, -1, false, 'x', 'i', false, false, false, true, 0 }, \
{ 1255, {"proargnames"}, 1009, -1, -1, 15, 1, -1, -1, false, 'x', 'i', false, false, false, true, 0 }, \
{ 1255, {"prosrc"}, 25, -1, -1, 16, 0, -1, -1, false, 'x', 'i', false, false, false, true, 0 }, \
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_authid.h,v 1.3 2005/07/31 17:19:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_authid.h,v 1.4 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
/*
* The CATALOG definition has to refer to the type of rolvaliduntil as
* "timestamptz" (lower case) so that bootstrap mode recognizes it. But
- * the C header files define this type as TimestampTz. Since the field is
+ * the C header files define this type as TimestampTz. Since the field is
* potentially-null and therefore can't be accessed directly from C code,
* there is no particular need for the C struct definition to show the
* field type as TimestampTz --- instead we just make it Datum.
* typedef struct FormData_pg_authid
* ----------------
*/
-#define AuthIdRelationId 1260
+#define AuthIdRelationId 1260
CATALOG(pg_authid,1260) BKI_SHARED_RELATION
{
/* remaining fields may be null; use heap_getattr to read them! */
text rolpassword; /* password, if any */
- timestamptz rolvaliduntil; /* password expiration time, if any */
+ timestamptz rolvaliduntil; /* password expiration time, if any */
text rolconfig[1]; /* GUC settings to apply at login */
} FormData_pg_authid;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_autovacuum.h,v 1.2 2005/08/11 21:11:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_autovacuum.h,v 1.3 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* typedef struct FormData_pg_autovacuum
* ----------------
*/
-#define AutovacuumRelationId 1248
+#define AutovacuumRelationId 1248
CATALOG(pg_autovacuum,1248) BKI_WITHOUT_OIDS
{
- Oid vacrelid; /* OID of table */
- bool enabled; /* enabled for this table? */
+ Oid vacrelid; /* OID of table */
+ bool enabled; /* enabled for this table? */
int4 vac_base_thresh; /* base threshold value */
- float4 vac_scale_factor; /* reltuples scaling factor */
+ float4 vac_scale_factor; /* reltuples scaling factor */
int4 anl_base_thresh; /* base threshold value */
- float4 anl_scale_factor; /* reltuples scaling factor */
- int4 vac_cost_delay; /* vacuum cost-based delay */
- int4 vac_cost_limit; /* vacuum cost limit */
+ float4 anl_scale_factor; /* reltuples scaling factor */
+ int4 vac_cost_delay; /* vacuum cost-based delay */
+ int4 vac_cost_limit; /* vacuum cost limit */
} FormData_pg_autovacuum;
/* ----------------
*
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_cast.h,v 1.22 2005/10/02 23:50:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_cast.h,v 1.23 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
#ifndef PG_CAST_H
#define PG_CAST_H
-#define CastRelationId 2605
+#define CastRelationId 2605
CATALOG(pg_cast,2605)
{
typedef enum CoercionCodes
{
- COERCION_CODE_IMPLICIT = 'i', /* coercion in context of
- * expression */
- COERCION_CODE_ASSIGNMENT = 'a', /* coercion in context of
- * assignment */
- COERCION_CODE_EXPLICIT = 'e' /* explicit cast operation */
+ COERCION_CODE_IMPLICIT = 'i', /* coercion in context of expression */
+ COERCION_CODE_ASSIGNMENT = 'a', /* coercion in context of assignment */
+ COERCION_CODE_EXPLICIT = 'e' /* explicit cast operation */
} CoercionCodes;
DATA(insert ( 1700 701 1746 i ));
/* Allow explicit coercions between int4 and bool */
-DATA(insert ( 23 16 2557 e ));
-DATA(insert ( 16 23 2558 e ));
+DATA(insert ( 23 16 2557 e ));
+DATA(insert ( 16 23 2558 e ));
/*
* OID category: allow implicit conversion from any integral type (including
* from OID to int4 or int8. Similarly for each OID-alias type. Also allow
* implicit coercions between OID and each OID-alias type, as well as
* regproc<->regprocedure and regoper<->regoperator. (Other coercions
- * between alias types must pass through OID.) Lastly, there is an implicit
+ * between alias types must pass through OID.) Lastly, there is an implicit
* cast from text to regclass, which exists mainly to support legacy forms
* of nextval() and related functions.
*/
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.89 2005/06/28 05:09:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.90 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* to get the relacl field ... and don't forget to check isNull.
* ----------------
*/
-#define RelationRelationId 1259
+#define RelationRelationId 1259
CATALOG(pg_class,1259) BKI_BOOTSTRAP
{
/*
* Class pg_attribute must contain exactly "relnatts" user attributes
- * (with attnums ranging from 1 to relnatts) for this class. It may
- * also contain entries with negative attnums for system attributes.
+ * (with attnums ranging from 1 to relnatts) for this class. It may also
+ * contain entries with negative attnums for system attributes.
*/
int2 relchecks; /* # of CHECK constraints for class */
int2 reltriggers; /* # of TRIGGERs */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_constraint.h,v 1.17 2005/08/01 04:03:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_constraint.h,v 1.18 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* conname + connamespace is deliberately not unique; we allow, for
* example, the same name to be used for constraints of different
* relations. This is partly for backwards compatibility with past
- * Postgres practice, and partly because we don't want to have to
- * obtain a global lock to generate a globally unique name for a
- * nameless constraint. We associate a namespace with constraint
- * names only for SQL92 compatibility.
+ * Postgres practice, and partly because we don't want to have to obtain a
+ * global lock to generate a globally unique name for a nameless
+ * constraint. We associate a namespace with constraint names only for
+ * SQL92 compatibility.
*/
NameData conname; /* name of this constraint */
Oid connamespace; /* OID of namespace containing constraint */
bool condeferred; /* deferred by default? */
/*
- * conrelid and conkey are only meaningful if the constraint applies
- * to a specific relation (this excludes domain constraints and
- * assertions). Otherwise conrelid is 0 and conkey is NULL.
+ * conrelid and conkey are only meaningful if the constraint applies to a
+ * specific relation (this excludes domain constraints and assertions).
+ * Otherwise conrelid is 0 and conkey is NULL.
*/
Oid conrelid; /* relation this constraint constrains */
* contypid links to the pg_type row for a domain if this is a domain
* constraint. Otherwise it's 0.
*
- * For SQL-style global ASSERTIONs, both conrelid and contypid would be
- * zero. This is not presently supported, however.
+ * For SQL-style global ASSERTIONs, both conrelid and contypid would be zero.
+ * This is not presently supported, however.
*/
Oid contypid; /* domain this constraint constrains */
/*
* These fields, plus confkey, are only meaningful for a foreign-key
- * constraint. Otherwise confrelid is 0 and the char fields are
- * spaces.
+ * constraint. Otherwise confrelid is 0 and the char fields are spaces.
*/
Oid confrelid; /* relation referenced by foreign key */
char confupdtype; /* foreign key's ON UPDATE action */
extern char *GetConstraintNameForTrigger(Oid triggerId);
extern void AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
- Oid newNspId, bool isType);
+ Oid newNspId, bool isType);
#endif /* PG_CONSTRAINT_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.24 2005/10/03 00:28:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.25 2005/10/15 02:49:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct CheckPoint
{
XLogRecPtr redo; /* next RecPtr available when we began to
- * create CheckPoint (i.e. REDO start
- * point) */
+ * create CheckPoint (i.e. REDO start point) */
XLogRecPtr undo; /* first record of oldest in-progress
- * transaction when we started (i.e. UNDO
- * end point) */
+ * transaction when we started (i.e. UNDO end
+ * point) */
TimeLineID ThisTimeLineID; /* current TLI */
TransactionId nextXid; /* next free XID */
Oid nextOid; /* next free OID */
- MultiXactId nextMulti; /* next free MultiXactId */
+ MultiXactId nextMulti; /* next free MultiXactId */
MultiXactOffset nextMultiOffset; /* next free MultiXact offset */
time_t time; /* time stamp of checkpoint */
} CheckPoint;
typedef struct ControlFileData
{
/*
- * Unique system identifier --- to ensure we match up xlog files with
- * the installation that produced them.
+ * Unique system identifier --- to ensure we match up xlog files with the
+ * installation that produced them.
*/
uint64 system_identifier;
/*
* Version identifier information. Keep these fields at the same offset,
- * especially pg_control_version; they won't be real useful if they
- * move around. (For historical reasons they must be 8 bytes into
- * the file rather than immediately at the front.)
+ * especially pg_control_version; they won't be real useful if they move
+ * around. (For historical reasons they must be 8 bytes into the file
+ * rather than immediately at the front.)
*
* pg_control_version identifies the format of pg_control itself.
* catalog_version_no identifies the format of the system catalogs.
*
- * There are additional version identifiers in individual files; for
- * example, WAL logs contain per-page magic numbers that can serve as
- * version cues for the WAL log.
+ * There are additional version identifiers in individual files; for example,
+ * WAL logs contain per-page magic numbers that can serve as version cues
+ * for the WAL log.
*/
uint32 pg_control_version; /* PG_CONTROL_VERSION */
uint32 catalog_version_no; /* see catversion.h */
CheckPoint checkPointCopy; /* copy of last check point record */
/*
- * This data is used to check for hardware-architecture compatibility
- * of the database and the backend executable. We need not check
- * endianness explicitly, since the pg_control version will surely
- * look wrong to a machine of different endianness, but we do need
- * to worry about MAXALIGN and floating-point format. (Note: storage
- * layout nominally also depends on SHORTALIGN and INTALIGN, but in
- * practice these are the same on all architectures of interest.)
+ * This data is used to check for hardware-architecture compatibility of
+ * the database and the backend executable. We need not check endianness
+ * explicitly, since the pg_control version will surely look wrong to a
+ * machine of different endianness, but we do need to worry about MAXALIGN
+ * and floating-point format. (Note: storage layout nominally also
+ * depends on SHORTALIGN and INTALIGN, but in practice these are the same
+ * on all architectures of interest.)
*
* Testing just one double value is not a very bulletproof test for
* floating-point compatibility, but it will catch most cases.
#define FLOATFORMAT_VALUE 1234567.0
/*
- * This data is used to make sure that configuration of this database
- * is compatible with the backend executable.
+ * This data is used to make sure that configuration of this database is
+ * compatible with the backend executable.
*/
uint32 blcksz; /* block size for this DB */
uint32 relseg_size; /* blocks per segment of large relation */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_database.h,v 1.37 2005/07/31 17:19:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_database.h,v 1.38 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* typedef struct FormData_pg_database
* ----------------
*/
-#define DatabaseRelationId 1262
+#define DatabaseRelationId 1262
CATALOG(pg_database,1262) BKI_SHARED_RELATION
{
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_index.h,v 1.37 2005/04/14 01:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_index.h,v 1.38 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
/* VARIABLE LENGTH FIELDS: */
int2vector indkey; /* column numbers of indexed cols, or 0 */
oidvector indclass; /* opclass identifiers */
- text indexprs; /* expression trees for index attributes
- * that are not simple column references;
- * one for each zero entry in indkey[] */
- text indpred; /* expression tree for predicate, if a
- * partial index; else NULL */
+ text indexprs; /* expression trees for index attributes that
+ * are not simple column references; one for
+ * each zero entry in indkey[] */
+ text indpred; /* expression tree for predicate, if a partial
+ * index; else NULL */
} FormData_pg_index;
/* ----------------
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_inherits.h,v 1.20 2005/04/14 01:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_inherits.h,v 1.21 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* typedef struct FormData_pg_inherits
* ----------------
*/
-#define InheritsRelationId 2611
+#define InheritsRelationId 2611
CATALOG(pg_inherits,2611) BKI_WITHOUT_OIDS
{
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_language.h,v 1.26 2005/04/14 01:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_language.h,v 1.27 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* typedef struct FormData_pg_language
* ----------------
*/
-#define LanguageRelationId 2612
+#define LanguageRelationId 2612
CATALOG(pg_language,2612)
{
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_listener.h,v 1.19 2005/04/14 01:38:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_listener.h,v 1.20 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* cpp turns this into typedef struct FormData_pg_listener
* ----------------------------------------------------------------
*/
-#define ListenerRelationId 2614
+#define ListenerRelationId 2614
CATALOG(pg_listener,2614) BKI_WITHOUT_OIDS
{
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_namespace.h,v 1.18 2005/06/28 05:09:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_namespace.h,v 1.19 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
/*
* prototypes for functions in pg_namespace.c
*/
-extern Oid NamespaceCreate(const char *nspName, Oid ownerId);
+extern Oid NamespaceCreate(const char *nspName, Oid ownerId);
#endif /* PG_NAMESPACE_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_operator.h,v 1.136 2005/07/01 19:19:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_operator.h,v 1.137 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* typedef struct FormData_pg_operator
* ----------------
*/
-#define OperatorRelationId 2617
+#define OperatorRelationId 2617
CATALOG(pg_operator,2617)
{
DATA(insert OID = 1511 ( "~" PGNSP PGUID b f 718 718 16 1510 0 0 0 0 0 circle_contain contsel contjoinsel ));
DATA(insert OID = 1512 ( "~=" PGNSP PGUID b f 718 718 16 1512 0 0 0 0 0 circle_same eqsel eqjoinsel ));
DATA(insert OID = 1513 ( "&&" PGNSP PGUID b f 718 718 16 1513 0 0 0 0 0 circle_overlap areasel areajoinsel ));
-DATA(insert OID = 1514 ( "|>>" PGNSP PGUID b f 718 718 16 0 0 0 0 0 0 circle_above positionsel positionjoinsel ));
-DATA(insert OID = 1515 ( "<<|" PGNSP PGUID b f 718 718 16 0 0 0 0 0 0 circle_below positionsel positionjoinsel ));
+DATA(insert OID = 1514 ( "|>>" PGNSP PGUID b f 718 718 16 0 0 0 0 0 0 circle_above positionsel positionjoinsel ));
+DATA(insert OID = 1515 ( "<<|" PGNSP PGUID b f 718 718 16 0 0 0 0 0 0 circle_below positionsel positionjoinsel ));
DATA(insert OID = 1516 ( "+" PGNSP PGUID b f 718 600 718 0 0 0 0 0 0 circle_add_pt - - ));
DATA(insert OID = 1517 ( "-" PGNSP PGUID b f 718 600 718 0 0 0 0 0 0 circle_sub_pt - - ));
DATA(insert OID = 2555 ( "+" PGNSP PGUID b f 23 1082 1082 1100 0 0 0 0 0 integer_pl_date - - ));
/* new operators for Y-direction rtree opclasses */
-DATA(insert OID = 2570 ( "<<|" PGNSP PGUID b f 603 603 16 0 0 0 0 0 0 box_below positionsel positionjoinsel ));
-DATA(insert OID = 2571 ( "&<|" PGNSP PGUID b f 603 603 16 0 0 0 0 0 0 box_overbelow positionsel positionjoinsel ));
-DATA(insert OID = 2572 ( "|&>" PGNSP PGUID b f 603 603 16 0 0 0 0 0 0 box_overabove positionsel positionjoinsel ));
-DATA(insert OID = 2573 ( "|>>" PGNSP PGUID b f 603 603 16 0 0 0 0 0 0 box_above positionsel positionjoinsel ));
-DATA(insert OID = 2574 ( "<<|" PGNSP PGUID b f 604 604 16 0 0 0 0 0 0 poly_below positionsel positionjoinsel ));
-DATA(insert OID = 2575 ( "&<|" PGNSP PGUID b f 604 604 16 0 0 0 0 0 0 poly_overbelow positionsel positionjoinsel ));
-DATA(insert OID = 2576 ( "|&>" PGNSP PGUID b f 604 604 16 0 0 0 0 0 0 poly_overabove positionsel positionjoinsel ));
-DATA(insert OID = 2577 ( "|>>" PGNSP PGUID b f 604 604 16 0 0 0 0 0 0 poly_above positionsel positionjoinsel ));
-DATA(insert OID = 2589 ( "&<|" PGNSP PGUID b f 718 718 16 0 0 0 0 0 0 circle_overbelow positionsel positionjoinsel ));
-DATA(insert OID = 2590 ( "|&>" PGNSP PGUID b f 718 718 16 0 0 0 0 0 0 circle_overabove positionsel positionjoinsel ));
+DATA(insert OID = 2570 ( "<<|" PGNSP PGUID b f 603 603 16 0 0 0 0 0 0 box_below positionsel positionjoinsel ));
+DATA(insert OID = 2571 ( "&<|" PGNSP PGUID b f 603 603 16 0 0 0 0 0 0 box_overbelow positionsel positionjoinsel ));
+DATA(insert OID = 2572 ( "|&>" PGNSP PGUID b f 603 603 16 0 0 0 0 0 0 box_overabove positionsel positionjoinsel ));
+DATA(insert OID = 2573 ( "|>>" PGNSP PGUID b f 603 603 16 0 0 0 0 0 0 box_above positionsel positionjoinsel ));
+DATA(insert OID = 2574 ( "<<|" PGNSP PGUID b f 604 604 16 0 0 0 0 0 0 poly_below positionsel positionjoinsel ));
+DATA(insert OID = 2575 ( "&<|" PGNSP PGUID b f 604 604 16 0 0 0 0 0 0 poly_overbelow positionsel positionjoinsel ));
+DATA(insert OID = 2576 ( "|&>" PGNSP PGUID b f 604 604 16 0 0 0 0 0 0 poly_overabove positionsel positionjoinsel ));
+DATA(insert OID = 2577 ( "|>>" PGNSP PGUID b f 604 604 16 0 0 0 0 0 0 poly_above positionsel positionjoinsel ));
+DATA(insert OID = 2589 ( "&<|" PGNSP PGUID b f 718 718 16 0 0 0 0 0 0 circle_overbelow positionsel positionjoinsel ));
+DATA(insert OID = 2590 ( "|&>" PGNSP PGUID b f 718 718 16 0 0 0 0 0 0 circle_overabove positionsel positionjoinsel ));
/*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.386 2005/10/02 23:50:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.387 2005/10/15 02:49:42 momjian Exp $
*
* NOTES
* The script catalog/genbki.sh reads this file and generates .bki
/* VARIABLE LENGTH FIELDS: */
oidvector proargtypes; /* parameter types (excludes OUT params) */
- Oid proallargtypes[1]; /* all param types (NULL if IN only) */
+ Oid proallargtypes[1]; /* all param types (NULL if IN only) */
char proargmodes[1]; /* parameter modes (NULL if IN only) */
text proargnames[1]; /* parameter names (NULL if no names) */
text prosrc; /* procedure source text */
DESCR("matches regex., case-sensitive");
DATA(insert OID = 1256 ( textregexne PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ _null_ _null_ textregexne - _null_ ));
DESCR("does not match regex., case-sensitive");
-DATA(insert OID = 1257 ( textlen PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ textlen - _null_ ));
+DATA(insert OID = 1257 ( textlen PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ textlen - _null_ ));
DESCR("length");
DATA(insert OID = 1258 ( textcat PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ _null_ _null_ textcat - _null_ ));
DESCR("concatenate");
DATA(insert OID = 110 ( unknownout PGNSP PGUID 12 f f t f i 1 2275 "705" _null_ _null_ _null_ unknownout - _null_ ));
DESCR("I/O");
DATA(insert OID = 111 ( numeric_fac PGNSP PGUID 12 f f t f i 1 1700 "20" _null_ _null_ _null_ numeric_fac - _null_ ));
-DATA(insert OID = 112 ( text PGNSP PGUID 12 f f t f i 1 25 "23" _null_ _null_ _null_ int4_text - _null_ ));
+DATA(insert OID = 112 ( text PGNSP PGUID 12 f f t f i 1 25 "23" _null_ _null_ _null_ int4_text - _null_ ));
DESCR("convert int4 to text");
-DATA(insert OID = 113 ( text PGNSP PGUID 12 f f t f i 1 25 "21" _null_ _null_ _null_ int2_text - _null_ ));
+DATA(insert OID = 113 ( text PGNSP PGUID 12 f f t f i 1 25 "21" _null_ _null_ _null_ int2_text - _null_ ));
DESCR("convert int2 to text");
-DATA(insert OID = 114 ( text PGNSP PGUID 12 f f t f i 1 25 "26" _null_ _null_ _null_ oid_text - _null_ ));
+DATA(insert OID = 114 ( text PGNSP PGUID 12 f f t f i 1 25 "26" _null_ _null_ _null_ oid_text - _null_ ));
DESCR("convert oid to text");
DATA(insert OID = 115 ( box_above_eq PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_above_eq - _null_ ));
DESCR("I/O");
DATA(insert OID = 124 ( box_out PGNSP PGUID 12 f f t f i 1 2275 "603" _null_ _null_ _null_ box_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 125 ( box_overlap PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overlap - _null_ ));
+DATA(insert OID = 125 ( box_overlap PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overlap - _null_ ));
DESCR("overlaps");
-DATA(insert OID = 126 ( box_ge PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_ge - _null_ ));
+DATA(insert OID = 126 ( box_ge PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_ge - _null_ ));
DESCR("greater-than-or-equal by area");
-DATA(insert OID = 127 ( box_gt PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_gt - _null_ ));
+DATA(insert OID = 127 ( box_gt PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_gt - _null_ ));
DESCR("greater-than by area");
-DATA(insert OID = 128 ( box_eq PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_eq - _null_ ));
+DATA(insert OID = 128 ( box_eq PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_eq - _null_ ));
DESCR("equal by area");
-DATA(insert OID = 129 ( box_lt PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_lt - _null_ ));
+DATA(insert OID = 129 ( box_lt PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_lt - _null_ ));
DESCR("less-than by area");
-DATA(insert OID = 130 ( box_le PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_le - _null_ ));
+DATA(insert OID = 130 ( box_le PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_le - _null_ ));
DESCR("less-than-or-equal by area");
-DATA(insert OID = 131 ( point_above PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_above - _null_ ));
+DATA(insert OID = 131 ( point_above PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_above - _null_ ));
DESCR("is above");
-DATA(insert OID = 132 ( point_left PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_left - _null_ ));
+DATA(insert OID = 132 ( point_left PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_left - _null_ ));
DESCR("is left of");
-DATA(insert OID = 133 ( point_right PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_right - _null_ ));
+DATA(insert OID = 133 ( point_right PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_right - _null_ ));
DESCR("is right of");
-DATA(insert OID = 134 ( point_below PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_below - _null_ ));
+DATA(insert OID = 134 ( point_below PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_below - _null_ ));
DESCR("is below");
-DATA(insert OID = 135 ( point_eq PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_eq - _null_ ));
+DATA(insert OID = 135 ( point_eq PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_eq - _null_ ));
DESCR("same as?");
-DATA(insert OID = 136 ( on_pb PGNSP PGUID 12 f f t f i 2 16 "600 603" _null_ _null_ _null_ on_pb - _null_ ));
+DATA(insert OID = 136 ( on_pb PGNSP PGUID 12 f f t f i 2 16 "600 603" _null_ _null_ _null_ on_pb - _null_ ));
DESCR("point inside box?");
-DATA(insert OID = 137 ( on_ppath PGNSP PGUID 12 f f t f i 2 16 "600 602" _null_ _null_ _null_ on_ppath - _null_ ));
+DATA(insert OID = 137 ( on_ppath PGNSP PGUID 12 f f t f i 2 16 "600 602" _null_ _null_ _null_ on_ppath - _null_ ));
DESCR("point within closed path, or point on open path");
DATA(insert OID = 138 ( box_center PGNSP PGUID 12 f f t f i 1 600 "603" _null_ _null_ _null_ box_center - _null_ ));
DESCR("center of");
DESCR("equal");
DATA(insert OID = 185 ( oidne PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ _null_ _null_ oidne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 186 ( box_same PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_same - _null_ ));
+DATA(insert OID = 186 ( box_same PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_same - _null_ ));
DESCR("same as?");
-DATA(insert OID = 187 ( box_contain PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_contain - _null_ ));
+DATA(insert OID = 187 ( box_contain PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_contain - _null_ ));
DESCR("contains?");
-DATA(insert OID = 188 ( box_left PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_left - _null_ ));
+DATA(insert OID = 188 ( box_left PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_left - _null_ ));
DESCR("is left of");
-DATA(insert OID = 189 ( box_overleft PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overleft - _null_ ));
+DATA(insert OID = 189 ( box_overleft PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overleft - _null_ ));
DESCR("overlaps or is left of");
-DATA(insert OID = 190 ( box_overright PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overright - _null_ ));
+DATA(insert OID = 190 ( box_overright PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overright - _null_ ));
DESCR("overlaps or is right of");
-DATA(insert OID = 191 ( box_right PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_right - _null_ ));
+DATA(insert OID = 191 ( box_right PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_right - _null_ ));
DESCR("is right of");
-DATA(insert OID = 192 ( box_contained PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_contained - _null_ ));
+DATA(insert OID = 192 ( box_contained PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_contained - _null_ ));
DESCR("contained in?");
DATA(insert OID = 193 ( rt_box_union PGNSP PGUID 12 f f t f i 2 603 "603 603" _null_ _null_ _null_ rt_box_union - _null_ ));
DESCR("r-tree");
DATA(insert OID = 194 ( rt_box_inter PGNSP PGUID 12 f f t f i 2 2278 "603 603" _null_ _null_ _null_ rt_box_inter - _null_ ));
DESCR("r-tree");
-DATA(insert OID = 195 ( rt_box_size PGNSP PGUID 12 f f t f i 2 2278 "603 2281" _null_ _null_ _null_ rt_box_size - _null_ ));
+DATA(insert OID = 195 ( rt_box_size PGNSP PGUID 12 f f t f i 2 2278 "603 2281" _null_ _null_ _null_ rt_box_size - _null_ ));
DESCR("r-tree");
DATA(insert OID = 197 ( rt_poly_union PGNSP PGUID 12 f f t f i 2 604 "604 604" _null_ _null_ _null_ rt_poly_union - _null_ ));
DESCR("r-tree");
DATA(insert OID = 198 ( rt_poly_inter PGNSP PGUID 12 f f t f i 2 2278 "604 604" _null_ _null_ _null_ rt_poly_inter - _null_ ));
DESCR("r-tree");
-DATA(insert OID = 199 ( rt_poly_size PGNSP PGUID 12 f f t f i 2 2278 "604 2281" _null_ _null_ _null_ rt_poly_size - _null_ ));
+DATA(insert OID = 199 ( rt_poly_size PGNSP PGUID 12 f f t f i 2 2278 "604 2281" _null_ _null_ _null_ rt_poly_size - _null_ ));
DESCR("r-tree");
/* OIDS 200 - 299 */
DESCR("negate");
DATA(insert OID = 207 ( float4abs PGNSP PGUID 12 f f t f i 1 700 "700" _null_ _null_ _null_ float4abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 208 ( float4_accum PGNSP PGUID 12 f f t f i 2 1022 "1022 700" _null_ _null_ _null_ float4_accum - _null_ ));
+DATA(insert OID = 208 ( float4_accum PGNSP PGUID 12 f f t f i 2 1022 "1022 700" _null_ _null_ _null_ float4_accum - _null_ ));
DESCR("aggregate transition function");
DATA(insert OID = 209 ( float4larger PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ _null_ _null_ float4larger - _null_ ));
DESCR("larger of two");
DATA(insert OID = 211 ( float4smaller PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ _null_ _null_ float4smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 212 ( int4um PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4um - _null_ ));
+DATA(insert OID = 212 ( int4um PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4um - _null_ ));
DESCR("negate");
-DATA(insert OID = 213 ( int2um PGNSP PGUID 12 f f t f i 1 21 "21" _null_ _null_ _null_ int2um - _null_ ));
+DATA(insert OID = 213 ( int2um PGNSP PGUID 12 f f t f i 1 21 "21" _null_ _null_ _null_ int2um - _null_ ));
DESCR("negate");
DATA(insert OID = 214 ( float8in PGNSP PGUID 12 f f t f i 1 701 "2275" _null_ _null_ _null_ float8in - _null_ ));
DESCR("negate");
DATA(insert OID = 221 ( float8abs PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ float8abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 222 ( float8_accum PGNSP PGUID 12 f f t f i 2 1022 "1022 701" _null_ _null_ _null_ float8_accum - _null_ ));
+DATA(insert OID = 222 ( float8_accum PGNSP PGUID 12 f f t f i 2 1022 "1022 701" _null_ _null_ _null_ float8_accum - _null_ ));
DESCR("aggregate transition function");
DATA(insert OID = 223 ( float8larger PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ _null_ _null_ float8larger - _null_ ));
DESCR("larger of two");
DESCR("I/O");
DATA(insert OID = 247 ( tintervalout PGNSP PGUID 12 f f t f s 1 2275 "704" _null_ _null_ _null_ tintervalout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 248 ( intinterval PGNSP PGUID 12 f f t f i 2 16 "702 704" _null_ _null_ _null_ intinterval - _null_ ));
+DATA(insert OID = 248 ( intinterval PGNSP PGUID 12 f f t f i 2 16 "702 704" _null_ _null_ _null_ intinterval - _null_ ));
DESCR("abstime in tinterval");
DATA(insert OID = 249 ( tintervalrel PGNSP PGUID 12 f f t f i 1 703 "704" _null_ _null_ _null_ tintervalrel - _null_ ));
DESCR("tinterval to reltime");
DATA(insert OID = 250 ( timenow PGNSP PGUID 12 f f t f s 0 702 "" _null_ _null_ _null_ timenow - _null_ ));
DESCR("Current date and time (abstime)");
-DATA(insert OID = 251 ( abstimeeq PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimeeq - _null_ ));
+DATA(insert OID = 251 ( abstimeeq PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimeeq - _null_ ));
DESCR("equal");
-DATA(insert OID = 252 ( abstimene PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimene - _null_ ));
+DATA(insert OID = 252 ( abstimene PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimene - _null_ ));
DESCR("not equal");
-DATA(insert OID = 253 ( abstimelt PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimelt - _null_ ));
+DATA(insert OID = 253 ( abstimelt PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimelt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 254 ( abstimegt PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimegt - _null_ ));
+DATA(insert OID = 254 ( abstimegt PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimegt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 255 ( abstimele PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimele - _null_ ));
+DATA(insert OID = 255 ( abstimele PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimele - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 256 ( abstimege PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimege - _null_ ));
+DATA(insert OID = 256 ( abstimege PGNSP PGUID 12 f f t f i 2 16 "702 702" _null_ _null_ _null_ abstimege - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 257 ( reltimeeq PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimeeq - _null_ ));
+DATA(insert OID = 257 ( reltimeeq PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimeeq - _null_ ));
DESCR("equal");
-DATA(insert OID = 258 ( reltimene PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimene - _null_ ));
+DATA(insert OID = 258 ( reltimene PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimene - _null_ ));
DESCR("not equal");
-DATA(insert OID = 259 ( reltimelt PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimelt - _null_ ));
+DATA(insert OID = 259 ( reltimelt PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimelt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 260 ( reltimegt PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimegt - _null_ ));
+DATA(insert OID = 260 ( reltimegt PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimegt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 261 ( reltimele PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimele - _null_ ));
+DATA(insert OID = 261 ( reltimele PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimele - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 262 ( reltimege PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimege - _null_ ));
+DATA(insert OID = 262 ( reltimege PGNSP PGUID 12 f f t f i 2 16 "703 703" _null_ _null_ _null_ reltimege - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 263 ( tintervalsame PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalsame - _null_ ));
+DATA(insert OID = 263 ( tintervalsame PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalsame - _null_ ));
DESCR("same as?");
-DATA(insert OID = 264 ( tintervalct PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalct - _null_ ));
+DATA(insert OID = 264 ( tintervalct PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalct - _null_ ));
DESCR("less-than");
-DATA(insert OID = 265 ( tintervalov PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalov - _null_ ));
+DATA(insert OID = 265 ( tintervalov PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalov - _null_ ));
DESCR("overlaps");
-DATA(insert OID = 266 ( tintervalleneq PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervalleneq - _null_ ));
+DATA(insert OID = 266 ( tintervalleneq PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervalleneq - _null_ ));
DESCR("length equal");
-DATA(insert OID = 267 ( tintervallenne PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallenne - _null_ ));
+DATA(insert OID = 267 ( tintervallenne PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallenne - _null_ ));
DESCR("length not equal to");
-DATA(insert OID = 268 ( tintervallenlt PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallenlt - _null_ ));
+DATA(insert OID = 268 ( tintervallenlt PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallenlt - _null_ ));
DESCR("length less-than");
-DATA(insert OID = 269 ( tintervallengt PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallengt - _null_ ));
+DATA(insert OID = 269 ( tintervallengt PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallengt - _null_ ));
DESCR("length greater-than");
-DATA(insert OID = 270 ( tintervallenle PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallenle - _null_ ));
+DATA(insert OID = 270 ( tintervallenle PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallenle - _null_ ));
DESCR("length less-than-or-equal");
-DATA(insert OID = 271 ( tintervallenge PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallenge - _null_ ));
+DATA(insert OID = 271 ( tintervallenge PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ _null_ _null_ tintervallenge - _null_ ));
DESCR("length greater-than-or-equal");
DATA(insert OID = 272 ( tintervalstart PGNSP PGUID 12 f f t f i 1 702 "704" _null_ _null_ _null_ tintervalstart - _null_ ));
DESCR("start of interval");
DESCR("end of interval");
DATA(insert OID = 274 ( timeofday PGNSP PGUID 12 f f t f v 0 25 "" _null_ _null_ _null_ timeofday - _null_ ));
DESCR("Current date and time - increments during transactions");
-DATA(insert OID = 275 ( isfinite PGNSP PGUID 12 f f t f i 1 16 "702" _null_ _null_ _null_ abstime_finite - _null_ ));
+DATA(insert OID = 275 ( isfinite PGNSP PGUID 12 f f t f i 1 16 "702" _null_ _null_ _null_ abstime_finite - _null_ ));
DESCR("finite abstime?");
-DATA(insert OID = 277 ( inter_sl PGNSP PGUID 12 f f t f i 2 16 "601 628" _null_ _null_ _null_ inter_sl - _null_ ));
+DATA(insert OID = 277 ( inter_sl PGNSP PGUID 12 f f t f i 2 16 "601 628" _null_ _null_ _null_ inter_sl - _null_ ));
DESCR("intersect?");
-DATA(insert OID = 278 ( inter_lb PGNSP PGUID 12 f f t f i 2 16 "628 603" _null_ _null_ _null_ inter_lb - _null_ ));
+DATA(insert OID = 278 ( inter_lb PGNSP PGUID 12 f f t f i 2 16 "628 603" _null_ _null_ _null_ inter_lb - _null_ ));
DESCR("intersect?");
DATA(insert OID = 279 ( float48mul PGNSP PGUID 12 f f t f i 2 701 "700 701" _null_ _null_ _null_ float48mul - _null_ ));
DATA(insert OID = 286 ( float84mi PGNSP PGUID 12 f f t f i 2 701 "701 700" _null_ _null_ _null_ float84mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 287 ( float4eq PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4eq - _null_ ));
+DATA(insert OID = 287 ( float4eq PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 288 ( float4ne PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4ne - _null_ ));
+DATA(insert OID = 288 ( float4ne PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 289 ( float4lt PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4lt - _null_ ));
+DATA(insert OID = 289 ( float4lt PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 290 ( float4le PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4le - _null_ ));
+DATA(insert OID = 290 ( float4le PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 291 ( float4gt PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4gt - _null_ ));
+DATA(insert OID = 291 ( float4gt PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 292 ( float4ge PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4ge - _null_ ));
+DATA(insert OID = 292 ( float4ge PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ _null_ _null_ float4ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 293 ( float8eq PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8eq - _null_ ));
+DATA(insert OID = 293 ( float8eq PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 294 ( float8ne PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8ne - _null_ ));
+DATA(insert OID = 294 ( float8ne PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 295 ( float8lt PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8lt - _null_ ));
+DATA(insert OID = 295 ( float8lt PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 296 ( float8le PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8le - _null_ ));
+DATA(insert OID = 296 ( float8le PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 297 ( float8gt PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8gt - _null_ ));
+DATA(insert OID = 297 ( float8gt PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 298 ( float8ge PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8ge - _null_ ));
+DATA(insert OID = 298 ( float8ge PGNSP PGUID 12 f f t f i 2 16 "701 701" _null_ _null_ _null_ float8ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 299 ( float48eq PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48eq - _null_ ));
+DATA(insert OID = 299 ( float48eq PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48eq - _null_ ));
DESCR("equal");
/* OIDS 300 - 399 */
-DATA(insert OID = 300 ( float48ne PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48ne - _null_ ));
+DATA(insert OID = 300 ( float48ne PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 301 ( float48lt PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48lt - _null_ ));
+DATA(insert OID = 301 ( float48lt PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 302 ( float48le PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48le - _null_ ));
+DATA(insert OID = 302 ( float48le PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 303 ( float48gt PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48gt - _null_ ));
+DATA(insert OID = 303 ( float48gt PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 304 ( float48ge PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48ge - _null_ ));
+DATA(insert OID = 304 ( float48ge PGNSP PGUID 12 f f t f i 2 16 "700 701" _null_ _null_ _null_ float48ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 305 ( float84eq PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84eq - _null_ ));
+DATA(insert OID = 305 ( float84eq PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 306 ( float84ne PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84ne - _null_ ));
+DATA(insert OID = 306 ( float84ne PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 307 ( float84lt PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84lt - _null_ ));
+DATA(insert OID = 307 ( float84lt PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 308 ( float84le PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84le - _null_ ));
+DATA(insert OID = 308 ( float84le PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 309 ( float84gt PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84gt - _null_ ));
+DATA(insert OID = 309 ( float84gt PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 310 ( float84ge PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84ge - _null_ ));
+DATA(insert OID = 310 ( float84ge PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ _null_ _null_ float84ge - _null_ ));
DESCR("greater-than-or-equal");
DATA(insert OID = 311 ( float8 PGNSP PGUID 12 f f t f i 1 701 "700" _null_ _null_ _null_ ftod - _null_ ));
DESCR("convert int2 to int4");
DATA(insert OID = 314 ( int2 PGNSP PGUID 12 f f t f i 1 21 "23" _null_ _null_ _null_ i4toi2 - _null_ ));
DESCR("convert int4 to int2");
-DATA(insert OID = 315 ( int2vectoreq PGNSP PGUID 12 f f t f i 2 16 "22 22" _null_ _null_ _null_ int2vectoreq - _null_ ));
+DATA(insert OID = 315 ( int2vectoreq PGNSP PGUID 12 f f t f i 2 16 "22 22" _null_ _null_ _null_ int2vectoreq - _null_ ));
DESCR("equal");
DATA(insert OID = 316 ( float8 PGNSP PGUID 12 f f t f i 1 701 "23" _null_ _null_ _null_ i4tod - _null_ ));
DESCR("convert int4 to float8");
DESCR("r-tree(internal)");
DATA(insert OID = 327 ( rtrestrpos PGNSP PGUID 12 f f t f v 1 2278 "2281" _null_ _null_ _null_ rtrestrpos - _null_ ));
DESCR("r-tree(internal)");
-DATA(insert OID = 328 ( rtrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ _null_ _null_ rtrescan - _null_ ));
+DATA(insert OID = 328 ( rtrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ _null_ _null_ rtrescan - _null_ ));
DESCR("r-tree(internal)");
DATA(insert OID = 321 ( rtbulkdelete PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ _null_ _null_ rtbulkdelete - _null_ ));
DESCR("r-tree(internal)");
DESCR("btree(internal)");
DATA(insert OID = 333 ( btbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ _null_ _null_ btbeginscan - _null_ ));
DESCR("btree(internal)");
-DATA(insert OID = 334 ( btrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ _null_ _null_ btrescan - _null_ ));
+DATA(insert OID = 334 ( btrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ _null_ _null_ btrescan - _null_ ));
DESCR("btree(internal)");
DATA(insert OID = 335 ( btendscan PGNSP PGUID 12 f f t f v 1 2278 "2281" _null_ _null_ _null_ btendscan - _null_ ));
DESCR("btree(internal)");
DATA(insert OID = 1268 ( btcostestimate PGNSP PGUID 12 f f t f v 7 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ btcostestimate - _null_ ));
DESCR("btree(internal)");
-DATA(insert OID = 339 ( poly_same PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_same - _null_ ));
+DATA(insert OID = 339 ( poly_same PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_same - _null_ ));
DESCR("same as?");
-DATA(insert OID = 340 ( poly_contain PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_contain - _null_ ));
+DATA(insert OID = 340 ( poly_contain PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_contain - _null_ ));
DESCR("contains?");
-DATA(insert OID = 341 ( poly_left PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_left - _null_ ));
+DATA(insert OID = 341 ( poly_left PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_left - _null_ ));
DESCR("is left of");
-DATA(insert OID = 342 ( poly_overleft PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overleft - _null_ ));
+DATA(insert OID = 342 ( poly_overleft PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overleft - _null_ ));
DESCR("overlaps or is left of");
-DATA(insert OID = 343 ( poly_overright PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overright - _null_ ));
+DATA(insert OID = 343 ( poly_overright PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overright - _null_ ));
DESCR("overlaps or is right of");
-DATA(insert OID = 344 ( poly_right PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_right - _null_ ));
+DATA(insert OID = 344 ( poly_right PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_right - _null_ ));
DESCR("is right of");
-DATA(insert OID = 345 ( poly_contained PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_contained - _null_ ));
+DATA(insert OID = 345 ( poly_contained PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_contained - _null_ ));
DESCR("contained in?");
-DATA(insert OID = 346 ( poly_overlap PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overlap - _null_ ));
+DATA(insert OID = 346 ( poly_overlap PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overlap - _null_ ));
DESCR("overlaps");
DATA(insert OID = 347 ( poly_in PGNSP PGUID 12 f f t f i 1 604 "2275" _null_ _null_ _null_ poly_in - _null_ ));
DESCR("I/O");
DESCR("btree less-equal-greater");
DATA(insert OID = 842 ( btint8cmp PGNSP PGUID 12 f f t f i 2 23 "20 20" _null_ _null_ _null_ btint8cmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 354 ( btfloat4cmp PGNSP PGUID 12 f f t f i 2 23 "700 700" _null_ _null_ _null_ btfloat4cmp - _null_ ));
+DATA(insert OID = 354 ( btfloat4cmp PGNSP PGUID 12 f f t f i 2 23 "700 700" _null_ _null_ _null_ btfloat4cmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 355 ( btfloat8cmp PGNSP PGUID 12 f f t f i 2 23 "701 701" _null_ _null_ _null_ btfloat8cmp - _null_ ));
+DATA(insert OID = 355 ( btfloat8cmp PGNSP PGUID 12 f f t f i 2 23 "701 701" _null_ _null_ _null_ btfloat8cmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 356 ( btoidcmp PGNSP PGUID 12 f f t f i 2 23 "26 26" _null_ _null_ _null_ btoidcmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 404 ( btoidvectorcmp PGNSP PGUID 12 f f t f i 2 23 "30 30" _null_ _null_ _null_ btoidvectorcmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 357 ( btabstimecmp PGNSP PGUID 12 f f t f i 2 23 "702 702" _null_ _null_ _null_ btabstimecmp - _null_ ));
+DATA(insert OID = 357 ( btabstimecmp PGNSP PGUID 12 f f t f i 2 23 "702 702" _null_ _null_ _null_ btabstimecmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 358 ( btcharcmp PGNSP PGUID 12 f f t f i 2 23 "18 18" _null_ _null_ _null_ btcharcmp - _null_ ));
DESCR("btree less-equal-greater");
DESCR("btree less-equal-greater");
DATA(insert OID = 360 ( bttextcmp PGNSP PGUID 12 f f t f i 2 23 "25 25" _null_ _null_ _null_ bttextcmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 377 ( cash_cmp PGNSP PGUID 12 f f t f i 2 23 "790 790" _null_ _null_ _null_ cash_cmp - _null_ ));
+DATA(insert OID = 377 ( cash_cmp PGNSP PGUID 12 f f t f i 2 23 "790 790" _null_ _null_ _null_ cash_cmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 380 ( btreltimecmp PGNSP PGUID 12 f f t f i 2 23 "703 703" _null_ _null_ _null_ btreltimecmp - _null_ ));
+DATA(insert OID = 380 ( btreltimecmp PGNSP PGUID 12 f f t f i 2 23 "703 703" _null_ _null_ _null_ btreltimecmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 381 ( bttintervalcmp PGNSP PGUID 12 f f t f i 2 23 "704 704" _null_ _null_ _null_ bttintervalcmp - _null_ ));
+DATA(insert OID = 381 ( bttintervalcmp PGNSP PGUID 12 f f t f i 2 23 "704 704" _null_ _null_ _null_ bttintervalcmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 382 ( btarraycmp PGNSP PGUID 12 f f t f i 2 23 "2277 2277" _null_ _null_ _null_ btarraycmp - _null_ ));
DESCR("btree less-equal-greater");
DESCR("closest point on box");
DATA(insert OID = 368 ( close_sb PGNSP PGUID 12 f f t f i 2 600 "601 603" _null_ _null_ _null_ close_sb - _null_ ));
DESCR("closest point to line segment on box");
-DATA(insert OID = 369 ( on_ps PGNSP PGUID 12 f f t f i 2 16 "600 601" _null_ _null_ _null_ on_ps - _null_ ));
+DATA(insert OID = 369 ( on_ps PGNSP PGUID 12 f f t f i 2 16 "600 601" _null_ _null_ _null_ on_ps - _null_ ));
DESCR("point contained in segment?");
DATA(insert OID = 370 ( path_distance PGNSP PGUID 12 f f t f i 2 701 "602 602" _null_ _null_ _null_ path_distance - _null_ ));
DESCR("distance between paths");
DATA(insert OID = 371 ( dist_ppath PGNSP PGUID 12 f f t f i 2 701 "600 602" _null_ _null_ _null_ dist_ppath - _null_ ));
DESCR("distance between point and path");
-DATA(insert OID = 372 ( on_sb PGNSP PGUID 12 f f t f i 2 16 "601 603" _null_ _null_ _null_ on_sb - _null_ ));
+DATA(insert OID = 372 ( on_sb PGNSP PGUID 12 f f t f i 2 16 "601 603" _null_ _null_ _null_ on_sb - _null_ ));
DESCR("lseg contained in box?");
-DATA(insert OID = 373 ( inter_sb PGNSP PGUID 12 f f t f i 2 16 "601 603" _null_ _null_ _null_ inter_sb - _null_ ));
+DATA(insert OID = 373 ( inter_sb PGNSP PGUID 12 f f t f i 2 16 "601 603" _null_ _null_ _null_ inter_sb - _null_ ));
DESCR("intersect?");
/* OIDS 400 - 499 */
DESCR("hash(internal)");
DATA(insert OID = 443 ( hashbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ _null_ _null_ hashbeginscan - _null_ ));
DESCR("hash(internal)");
-DATA(insert OID = 444 ( hashrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ _null_ _null_ hashrescan - _null_ ));
+DATA(insert OID = 444 ( hashrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ _null_ _null_ hashrescan - _null_ ));
DESCR("hash(internal)");
DATA(insert OID = 445 ( hashendscan PGNSP PGUID 12 f f t f v 1 2278 "2281" _null_ _null_ _null_ hashendscan - _null_ ));
DESCR("hash(internal)");
DATA(insert OID = 438 ( hashcostestimate PGNSP PGUID 12 f f t f v 7 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ hashcostestimate - _null_ ));
DESCR("hash(internal)");
-DATA(insert OID = 449 ( hashint2 PGNSP PGUID 12 f f t f i 1 23 "21" _null_ _null_ _null_ hashint2 - _null_ ));
+DATA(insert OID = 449 ( hashint2 PGNSP PGUID 12 f f t f i 1 23 "21" _null_ _null_ _null_ hashint2 - _null_ ));
DESCR("hash");
-DATA(insert OID = 450 ( hashint4 PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ hashint4 - _null_ ));
+DATA(insert OID = 450 ( hashint4 PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ hashint4 - _null_ ));
DESCR("hash");
-DATA(insert OID = 949 ( hashint8 PGNSP PGUID 12 f f t f i 1 23 "20" _null_ _null_ _null_ hashint8 - _null_ ));
+DATA(insert OID = 949 ( hashint8 PGNSP PGUID 12 f f t f i 1 23 "20" _null_ _null_ _null_ hashint8 - _null_ ));
DESCR("hash");
-DATA(insert OID = 451 ( hashfloat4 PGNSP PGUID 12 f f t f i 1 23 "700" _null_ _null_ _null_ hashfloat4 - _null_ ));
+DATA(insert OID = 451 ( hashfloat4 PGNSP PGUID 12 f f t f i 1 23 "700" _null_ _null_ _null_ hashfloat4 - _null_ ));
DESCR("hash");
-DATA(insert OID = 452 ( hashfloat8 PGNSP PGUID 12 f f t f i 1 23 "701" _null_ _null_ _null_ hashfloat8 - _null_ ));
+DATA(insert OID = 452 ( hashfloat8 PGNSP PGUID 12 f f t f i 1 23 "701" _null_ _null_ _null_ hashfloat8 - _null_ ));
DESCR("hash");
-DATA(insert OID = 453 ( hashoid PGNSP PGUID 12 f f t f i 1 23 "26" _null_ _null_ _null_ hashoid - _null_ ));
+DATA(insert OID = 453 ( hashoid PGNSP PGUID 12 f f t f i 1 23 "26" _null_ _null_ _null_ hashoid - _null_ ));
DESCR("hash");
-DATA(insert OID = 454 ( hashchar PGNSP PGUID 12 f f t f i 1 23 "18" _null_ _null_ _null_ hashchar - _null_ ));
+DATA(insert OID = 454 ( hashchar PGNSP PGUID 12 f f t f i 1 23 "18" _null_ _null_ _null_ hashchar - _null_ ));
DESCR("hash");
-DATA(insert OID = 455 ( hashname PGNSP PGUID 12 f f t f i 1 23 "19" _null_ _null_ _null_ hashname - _null_ ));
+DATA(insert OID = 455 ( hashname PGNSP PGUID 12 f f t f i 1 23 "19" _null_ _null_ _null_ hashname - _null_ ));
DESCR("hash");
DATA(insert OID = 400 ( hashtext PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ hashtext - _null_ ));
DESCR("hash");
DATA(insert OID = 456 ( hashvarlena PGNSP PGUID 12 f f t f i 1 23 "2281" _null_ _null_ _null_ hashvarlena - _null_ ));
DESCR("hash any varlena type");
-DATA(insert OID = 457 ( hashoidvector PGNSP PGUID 12 f f t f i 1 23 "30" _null_ _null_ _null_ hashoidvector - _null_ ));
+DATA(insert OID = 457 ( hashoidvector PGNSP PGUID 12 f f t f i 1 23 "30" _null_ _null_ _null_ hashoidvector - _null_ ));
DESCR("hash");
DATA(insert OID = 329 ( hash_aclitem PGNSP PGUID 12 f f t f i 1 23 "1033" _null_ _null_ _null_ hash_aclitem - _null_ ));
DESCR("hash");
-DATA(insert OID = 398 ( hashint2vector PGNSP PGUID 12 f f t f i 1 23 "22" _null_ _null_ _null_ hashint2vector - _null_ ));
+DATA(insert OID = 398 ( hashint2vector PGNSP PGUID 12 f f t f i 1 23 "22" _null_ _null_ _null_ hashint2vector - _null_ ));
DESCR("hash");
-DATA(insert OID = 399 ( hashmacaddr PGNSP PGUID 12 f f t f i 1 23 "829" _null_ _null_ _null_ hashmacaddr - _null_ ));
+DATA(insert OID = 399 ( hashmacaddr PGNSP PGUID 12 f f t f i 1 23 "829" _null_ _null_ _null_ hashmacaddr - _null_ ));
DESCR("hash");
-DATA(insert OID = 422 ( hashinet PGNSP PGUID 12 f f t f i 1 23 "869" _null_ _null_ _null_ hashinet - _null_ ));
+DATA(insert OID = 422 ( hashinet PGNSP PGUID 12 f f t f i 1 23 "869" _null_ _null_ _null_ hashinet - _null_ ));
DESCR("hash");
DATA(insert OID = 458 ( text_larger PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ _null_ _null_ text_larger - _null_ ));
DESCR("larger of two");
DESCR("I/O");
DATA(insert OID = 461 ( int8out PGNSP PGUID 12 f f t f i 1 2275 "20" _null_ _null_ _null_ int8out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 462 ( int8um PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8um - _null_ ));
+DATA(insert OID = 462 ( int8um PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8um - _null_ ));
DESCR("negate");
DATA(insert OID = 463 ( int8pl PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ _null_ _null_ int8pl - _null_ ));
DESCR("add");
DATA(insert OID = 479 ( int84ge PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ _null_ _null_ int84ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 480 ( int4 PGNSP PGUID 12 f f t f i 1 23 "20" _null_ _null_ _null_ int84 - _null_ ));
+DATA(insert OID = 480 ( int4 PGNSP PGUID 12 f f t f i 1 23 "20" _null_ _null_ _null_ int84 - _null_ ));
DESCR("convert int8 to int4");
-DATA(insert OID = 481 ( int8 PGNSP PGUID 12 f f t f i 1 20 "23" _null_ _null_ _null_ int48 - _null_ ));
+DATA(insert OID = 481 ( int8 PGNSP PGUID 12 f f t f i 1 20 "23" _null_ _null_ _null_ int48 - _null_ ));
DESCR("convert int4 to int8");
-DATA(insert OID = 482 ( float8 PGNSP PGUID 12 f f t f i 1 701 "20" _null_ _null_ _null_ i8tod - _null_ ));
+DATA(insert OID = 482 ( float8 PGNSP PGUID 12 f f t f i 1 701 "20" _null_ _null_ _null_ i8tod - _null_ ));
DESCR("convert int8 to float8");
DATA(insert OID = 483 ( int8 PGNSP PGUID 12 f f t f i 1 20 "701" _null_ _null_ _null_ dtoi8 - _null_ ));
DESCR("convert float8 to int8");
/* OIDS 600 - 699 */
-DATA(insert OID = 652 ( float4 PGNSP PGUID 12 f f t f i 1 700 "20" _null_ _null_ _null_ i8tof - _null_ ));
+DATA(insert OID = 652 ( float4 PGNSP PGUID 12 f f t f i 1 700 "20" _null_ _null_ _null_ i8tof - _null_ ));
DESCR("convert int8 to float4");
DATA(insert OID = 653 ( int8 PGNSP PGUID 12 f f t f i 1 20 "700" _null_ _null_ _null_ ftoi8 - _null_ ));
DESCR("convert float4 to int8");
-DATA(insert OID = 714 ( int2 PGNSP PGUID 12 f f t f i 1 21 "20" _null_ _null_ _null_ int82 - _null_ ));
+DATA(insert OID = 714 ( int2 PGNSP PGUID 12 f f t f i 1 21 "20" _null_ _null_ _null_ int82 - _null_ ));
DESCR("convert int8 to int2");
-DATA(insert OID = 754 ( int8 PGNSP PGUID 12 f f t f i 1 20 "21" _null_ _null_ _null_ int28 - _null_ ));
+DATA(insert OID = 754 ( int8 PGNSP PGUID 12 f f t f i 1 20 "21" _null_ _null_ _null_ int28 - _null_ ));
DESCR("convert int2 to int8");
DATA(insert OID = 1285 ( int4notin PGNSP PGUID 12 f f t f s 2 16 "23 25" _null_ _null_ _null_ int4notin - _null_ ));
DATA(insert OID = 717 ( oidle PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ _null_ _null_ oidle - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 720 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "17" _null_ _null_ _null_ byteaoctetlen - _null_ ));
+DATA(insert OID = 720 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "17" _null_ _null_ _null_ byteaoctetlen - _null_ ));
DESCR("octet length");
DATA(insert OID = 721 ( get_byte PGNSP PGUID 12 f f t f i 2 23 "17 23" _null_ _null_ _null_ byteaGetByte - _null_ ));
DESCR("get byte");
DESCR("array dimensions");
DATA(insert OID = 750 ( array_in PGNSP PGUID 12 f f t f s 3 2277 "2275 26 23" _null_ _null_ _null_ array_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 751 ( array_out PGNSP PGUID 12 f f t f s 1 2275 "2277" _null_ _null_ _null_ array_out - _null_ ));
+DATA(insert OID = 751 ( array_out PGNSP PGUID 12 f f t f s 1 2275 "2277" _null_ _null_ _null_ array_out - _null_ ));
DESCR("I/O");
DATA(insert OID = 2091 ( array_lower PGNSP PGUID 12 f f t f i 2 23 "2277 23" _null_ _null_ _null_ array_lower - _null_ ));
DESCR("array lower dimension");
DESCR("I/O");
DATA(insert OID = 761 ( smgrout PGNSP PGUID 12 f f t f s 1 2275 "210" _null_ _null_ _null_ smgrout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 762 ( smgreq PGNSP PGUID 12 f f t f i 2 16 "210 210" _null_ _null_ _null_ smgreq - _null_ ));
+DATA(insert OID = 762 ( smgreq PGNSP PGUID 12 f f t f i 2 16 "210 210" _null_ _null_ _null_ smgreq - _null_ ));
DESCR("storage manager");
-DATA(insert OID = 763 ( smgrne PGNSP PGUID 12 f f t f i 2 16 "210 210" _null_ _null_ _null_ smgrne - _null_ ));
+DATA(insert OID = 763 ( smgrne PGNSP PGUID 12 f f t f i 2 16 "210 210" _null_ _null_ _null_ smgrne - _null_ ));
DESCR("storage manager");
-DATA(insert OID = 764 ( lo_import PGNSP PGUID 12 f f t f v 1 26 "25" _null_ _null_ _null_ lo_import - _null_ ));
+DATA(insert OID = 764 ( lo_import PGNSP PGUID 12 f f t f v 1 26 "25" _null_ _null_ _null_ lo_import - _null_ ));
DESCR("large object import");
DATA(insert OID = 765 ( lo_export PGNSP PGUID 12 f f t f v 2 23 "26 25" _null_ _null_ _null_ lo_export - _null_ ));
DESCR("large object export");
-DATA(insert OID = 766 ( int4inc PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4inc - _null_ ));
+DATA(insert OID = 766 ( int4inc PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4inc - _null_ ));
DESCR("increment");
DATA(insert OID = 768 ( int4larger PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ _null_ _null_ int4larger - _null_ ));
DESCR("larger of two");
DESCR("gist(internal)");
DATA(insert OID = 777 ( gistbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ _null_ _null_ gistbeginscan - _null_ ));
DESCR("gist(internal)");
-DATA(insert OID = 778 ( gistrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ _null_ _null_ gistrescan - _null_ ));
+DATA(insert OID = 778 ( gistrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ _null_ _null_ gistrescan - _null_ ));
DESCR("gist(internal)");
DATA(insert OID = 779 ( gistendscan PGNSP PGUID 12 f f t f v 1 2278 "2281" _null_ _null_ _null_ gistendscan - _null_ ));
DESCR("gist(internal)");
DATA(insert OID = 772 ( gistcostestimate PGNSP PGUID 12 f f t f v 7 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ gistcostestimate - _null_ ));
DESCR("gist(internal)");
-DATA(insert OID = 784 ( tintervaleq PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervaleq - _null_ ));
+DATA(insert OID = 784 ( tintervaleq PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervaleq - _null_ ));
DESCR("equal");
-DATA(insert OID = 785 ( tintervalne PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalne - _null_ ));
+DATA(insert OID = 785 ( tintervalne PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 786 ( tintervallt PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervallt - _null_ ));
+DATA(insert OID = 786 ( tintervallt PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervallt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 787 ( tintervalgt PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalgt - _null_ ));
+DATA(insert OID = 787 ( tintervalgt PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalgt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 788 ( tintervalle PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalle - _null_ ));
+DATA(insert OID = 788 ( tintervalle PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalle - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 789 ( tintervalge PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalge - _null_ ));
+DATA(insert OID = 789 ( tintervalge PGNSP PGUID 12 f f t f i 2 16 "704 704" _null_ _null_ _null_ tintervalge - _null_ ));
DESCR("greater-than-or-equal");
/* OIDS 800 - 899 */
-DATA(insert OID = 817 ( oid PGNSP PGUID 12 f f t f i 1 26 "25" _null_ _null_ _null_ text_oid - _null_ ));
+DATA(insert OID = 817 ( oid PGNSP PGUID 12 f f t f i 1 26 "25" _null_ _null_ _null_ text_oid - _null_ ));
DESCR("convert text to oid");
-DATA(insert OID = 818 ( int2 PGNSP PGUID 12 f f t f i 1 21 "25" _null_ _null_ _null_ text_int2 - _null_ ));
+DATA(insert OID = 818 ( int2 PGNSP PGUID 12 f f t f i 1 21 "25" _null_ _null_ _null_ text_int2 - _null_ ));
DESCR("convert text to int2");
-DATA(insert OID = 819 ( int4 PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ text_int4 - _null_ ));
+DATA(insert OID = 819 ( int4 PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ text_int4 - _null_ ));
DESCR("convert text to int4");
-DATA(insert OID = 838 ( float8 PGNSP PGUID 12 f f t f i 1 701 "25" _null_ _null_ _null_ text_float8 - _null_ ));
+DATA(insert OID = 838 ( float8 PGNSP PGUID 12 f f t f i 1 701 "25" _null_ _null_ _null_ text_float8 - _null_ ));
DESCR("convert text to float8");
-DATA(insert OID = 839 ( float4 PGNSP PGUID 12 f f t f i 1 700 "25" _null_ _null_ _null_ text_float4 - _null_ ));
+DATA(insert OID = 839 ( float4 PGNSP PGUID 12 f f t f i 1 700 "25" _null_ _null_ _null_ text_float4 - _null_ ));
DESCR("convert text to float4");
DATA(insert OID = 840 ( text PGNSP PGUID 12 f f t f i 1 25 "701" _null_ _null_ _null_ float8_text - _null_ ));
DESCR("convert float8 to text");
DATA(insert OID = 861 ( current_database PGNSP PGUID 12 f f t f i 0 19 "" _null_ _null_ _null_ current_database - _null_ ));
DESCR("returns the current database");
-DATA(insert OID = 862 ( int4_mul_cash PGNSP PGUID 12 f f t f i 2 790 "23 790" _null_ _null_ _null_ int4_mul_cash - _null_ ));
+DATA(insert OID = 862 ( int4_mul_cash PGNSP PGUID 12 f f t f i 2 790 "23 790" _null_ _null_ _null_ int4_mul_cash - _null_ ));
DESCR("multiply");
-DATA(insert OID = 863 ( int2_mul_cash PGNSP PGUID 12 f f t f i 2 790 "21 790" _null_ _null_ _null_ int2_mul_cash - _null_ ));
+DATA(insert OID = 863 ( int2_mul_cash PGNSP PGUID 12 f f t f i 2 790 "21 790" _null_ _null_ _null_ int2_mul_cash - _null_ ));
DESCR("multiply");
-DATA(insert OID = 864 ( cash_mul_int4 PGNSP PGUID 12 f f t f i 2 790 "790 23" _null_ _null_ _null_ cash_mul_int4 - _null_ ));
+DATA(insert OID = 864 ( cash_mul_int4 PGNSP PGUID 12 f f t f i 2 790 "790 23" _null_ _null_ _null_ cash_mul_int4 - _null_ ));
DESCR("multiply");
-DATA(insert OID = 865 ( cash_div_int4 PGNSP PGUID 12 f f t f i 2 790 "790 23" _null_ _null_ _null_ cash_div_int4 - _null_ ));
+DATA(insert OID = 865 ( cash_div_int4 PGNSP PGUID 12 f f t f i 2 790 "790 23" _null_ _null_ _null_ cash_div_int4 - _null_ ));
DESCR("divide");
-DATA(insert OID = 866 ( cash_mul_int2 PGNSP PGUID 12 f f t f i 2 790 "790 21" _null_ _null_ _null_ cash_mul_int2 - _null_ ));
+DATA(insert OID = 866 ( cash_mul_int2 PGNSP PGUID 12 f f t f i 2 790 "790 21" _null_ _null_ _null_ cash_mul_int2 - _null_ ));
DESCR("multiply");
-DATA(insert OID = 867 ( cash_div_int2 PGNSP PGUID 12 f f t f i 2 790 "790 21" _null_ _null_ _null_ cash_div_int2 - _null_ ));
+DATA(insert OID = 867 ( cash_div_int2 PGNSP PGUID 12 f f t f i 2 790 "790 21" _null_ _null_ _null_ cash_div_int2 - _null_ ));
DESCR("divide");
DATA(insert OID = 886 ( cash_in PGNSP PGUID 12 f f t f i 1 790 "2275" _null_ _null_ _null_ cash_in - _null_ ));
DATA(insert OID = 947 ( mod PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ _null_ _null_ int8mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 944 ( char PGNSP PGUID 12 f f t f i 1 18 "25" _null_ _null_ _null_ text_char - _null_ ));
+DATA(insert OID = 944 ( char PGNSP PGUID 12 f f t f i 1 18 "25" _null_ _null_ _null_ text_char - _null_ ));
DESCR("convert text to char");
-DATA(insert OID = 946 ( text PGNSP PGUID 12 f f t f i 1 25 "18" _null_ _null_ _null_ char_text - _null_ ));
+DATA(insert OID = 946 ( text PGNSP PGUID 12 f f t f i 1 25 "18" _null_ _null_ _null_ char_text - _null_ ));
DESCR("convert char to text");
-DATA(insert OID = 950 ( istrue PGNSP PGUID 12 f f f f i 1 16 "16" _null_ _null_ _null_ istrue - _null_ ));
+DATA(insert OID = 950 ( istrue PGNSP PGUID 12 f f f f i 1 16 "16" _null_ _null_ _null_ istrue - _null_ ));
DESCR("bool is true (not false or unknown)");
-DATA(insert OID = 951 ( isfalse PGNSP PGUID 12 f f f f i 1 16 "16" _null_ _null_ _null_ isfalse - _null_ ));
+DATA(insert OID = 951 ( isfalse PGNSP PGUID 12 f f f f i 1 16 "16" _null_ _null_ _null_ isfalse - _null_ ));
DESCR("bool is false (not true or unknown)");
DATA(insert OID = 952 ( lo_open PGNSP PGUID 12 f f t f v 2 23 "26 23" _null_ _null_ _null_ lo_open - _null_ ));
DESCR("large object open");
-DATA(insert OID = 953 ( lo_close PGNSP PGUID 12 f f t f v 1 23 "23" _null_ _null_ _null_ lo_close - _null_ ));
+DATA(insert OID = 953 ( lo_close PGNSP PGUID 12 f f t f v 1 23 "23" _null_ _null_ _null_ lo_close - _null_ ));
DESCR("large object close");
DATA(insert OID = 954 ( loread PGNSP PGUID 12 f f t f v 2 17 "23 23" _null_ _null_ _null_ loread - _null_ ));
DESCR("large object read");
DESCR("large object write");
DATA(insert OID = 956 ( lo_lseek PGNSP PGUID 12 f f t f v 3 23 "23 23 23" _null_ _null_ _null_ lo_lseek - _null_ ));
DESCR("large object seek");
-DATA(insert OID = 957 ( lo_creat PGNSP PGUID 12 f f t f v 1 26 "23" _null_ _null_ _null_ lo_creat - _null_ ));
+DATA(insert OID = 957 ( lo_creat PGNSP PGUID 12 f f t f v 1 26 "23" _null_ _null_ _null_ lo_creat - _null_ ));
DESCR("large object create");
-DATA(insert OID = 715 ( lo_create PGNSP PGUID 12 f f t f v 1 26 "26" _null_ _null_ _null_ lo_create - _null_ ));
+DATA(insert OID = 715 ( lo_create PGNSP PGUID 12 f f t f v 1 26 "26" _null_ _null_ _null_ lo_create - _null_ ));
DESCR("large object create");
-DATA(insert OID = 958 ( lo_tell PGNSP PGUID 12 f f t f v 1 23 "23" _null_ _null_ _null_ lo_tell - _null_ ));
+DATA(insert OID = 958 ( lo_tell PGNSP PGUID 12 f f t f v 1 23 "23" _null_ _null_ _null_ lo_tell - _null_ ));
DESCR("large object position");
DATA(insert OID = 959 ( on_pl PGNSP PGUID 12 f f t f i 2 16 "600 628" _null_ _null_ _null_ on_pl - _null_ ));
DATA(insert OID = 963 ( close_lb PGNSP PGUID 12 f f t f i 2 600 "628 603" _null_ _null_ _null_ close_lb - _null_ ));
DESCR("closest point to line on box");
-DATA(insert OID = 964 ( lo_unlink PGNSP PGUID 12 f f t f v 1 23 "26" _null_ _null_ _null_ lo_unlink - _null_ ));
+DATA(insert OID = 964 ( lo_unlink PGNSP PGUID 12 f f t f v 1 23 "26" _null_ _null_ _null_ lo_unlink - _null_ ));
DESCR("large object unlink(delete)");
DATA(insert OID = 973 ( path_inter PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_inter - _null_ ));
DESCR("box intersection (another box)");
DATA(insert OID = 981 ( diagonal PGNSP PGUID 12 f f t f i 1 601 "603" _null_ _null_ _null_ box_diagonal - _null_ ));
DESCR("box diagonal");
-DATA(insert OID = 982 ( path_n_lt PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_lt - _null_ ));
+DATA(insert OID = 982 ( path_n_lt PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 983 ( path_n_gt PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_gt - _null_ ));
+DATA(insert OID = 983 ( path_n_gt PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 984 ( path_n_eq PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_eq - _null_ ));
+DATA(insert OID = 984 ( path_n_eq PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 985 ( path_n_le PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_le - _null_ ));
+DATA(insert OID = 985 ( path_n_le PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 986 ( path_n_ge PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_ge - _null_ ));
+DATA(insert OID = 986 ( path_n_ge PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ _null_ _null_ path_n_ge - _null_ ));
DESCR("greater-than-or-equal");
DATA(insert OID = 987 ( path_length PGNSP PGUID 12 f f t f i 1 701 "602" _null_ _null_ _null_ path_length - _null_ ));
DESCR("sum of path segment lengths");
-DATA(insert OID = 988 ( point_ne PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_ne - _null_ ));
+DATA(insert OID = 988 ( point_ne PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 989 ( point_vert PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_vert - _null_ ));
+DATA(insert OID = 989 ( point_vert PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_vert - _null_ ));
DESCR("vertically aligned?");
-DATA(insert OID = 990 ( point_horiz PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_horiz - _null_ ));
+DATA(insert OID = 990 ( point_horiz PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ _null_ _null_ point_horiz - _null_ ));
DESCR("horizontally aligned?");
DATA(insert OID = 991 ( point_distance PGNSP PGUID 12 f f t f i 2 701 "600 600" _null_ _null_ _null_ point_distance - _null_ ));
DESCR("distance between");
DESCR("slope between points");
DATA(insert OID = 993 ( lseg PGNSP PGUID 12 f f t f i 2 601 "600 600" _null_ _null_ _null_ lseg_construct - _null_ ));
DESCR("convert points to line segment");
-DATA(insert OID = 994 ( lseg_intersect PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ _null_ _null_ lseg_intersect - _null_ ));
+DATA(insert OID = 994 ( lseg_intersect PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ _null_ _null_ lseg_intersect - _null_ ));
DESCR("intersect?");
-DATA(insert OID = 995 ( lseg_parallel PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ _null_ _null_ lseg_parallel - _null_ ));
+DATA(insert OID = 995 ( lseg_parallel PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ _null_ _null_ lseg_parallel - _null_ ));
DESCR("parallel?");
-DATA(insert OID = 996 ( lseg_perp PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ _null_ _null_ lseg_perp - _null_ ));
+DATA(insert OID = 996 ( lseg_perp PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ _null_ _null_ lseg_perp - _null_ ));
DESCR("perpendicular?");
-DATA(insert OID = 997 ( lseg_vertical PGNSP PGUID 12 f f t f i 1 16 "601" _null_ _null_ _null_ lseg_vertical - _null_ ));
+DATA(insert OID = 997 ( lseg_vertical PGNSP PGUID 12 f f t f i 1 16 "601" _null_ _null_ _null_ lseg_vertical - _null_ ));
DESCR("vertical?");
-DATA(insert OID = 998 ( lseg_horizontal PGNSP PGUID 12 f f t f i 1 16 "601" _null_ _null_ _null_ lseg_horizontal - _null_ ));
+DATA(insert OID = 998 ( lseg_horizontal PGNSP PGUID 12 f f t f i 1 16 "601" _null_ _null_ _null_ lseg_horizontal - _null_ ));
DESCR("horizontal?");
-DATA(insert OID = 999 ( lseg_eq PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ _null_ _null_ lseg_eq - _null_ ));
+DATA(insert OID = 999 ( lseg_eq PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ _null_ _null_ lseg_eq - _null_ ));
DESCR("equal");
DATA(insert OID = 748 ( date PGNSP PGUID 12 f f t f s 1 1082 "25" _null_ _null_ _null_ text_date - _null_ ));
/* OIDS 1000 - 1999 */
-DATA(insert OID = 1026 ( timezone PGNSP PGUID 12 f f t f i 2 1114 "1186 1184" _null_ _null_ _null_ timestamptz_izone - _null_ ));
+DATA(insert OID = 1026 ( timezone PGNSP PGUID 12 f f t f i 2 1114 "1186 1184" _null_ _null_ _null_ timestamptz_izone - _null_ ));
DESCR("adjust timestamp to new time zone");
DATA(insert OID = 1029 ( nullvalue PGNSP PGUID 12 f f f f i 1 16 "2276" _null_ _null_ _null_ nullvalue - _null_ ));
DESCR("(internal)");
DATA(insert OID = 1030 ( nonnullvalue PGNSP PGUID 12 f f f f i 1 16 "2276" _null_ _null_ _null_ nonnullvalue - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1031 ( aclitemin PGNSP PGUID 12 f f t f s 1 1033 "2275" _null_ _null_ _null_ aclitemin - _null_ ));
+DATA(insert OID = 1031 ( aclitemin PGNSP PGUID 12 f f t f s 1 1033 "2275" _null_ _null_ _null_ aclitemin - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1032 ( aclitemout PGNSP PGUID 12 f f t f s 1 2275 "1033" _null_ _null_ _null_ aclitemout - _null_ ));
+DATA(insert OID = 1032 ( aclitemout PGNSP PGUID 12 f f t f s 1 2275 "1033" _null_ _null_ _null_ aclitemout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1035 ( aclinsert PGNSP PGUID 12 f f t f i 2 1034 "1034 1033" _null_ _null_ _null_ aclinsert - _null_ ));
+DATA(insert OID = 1035 ( aclinsert PGNSP PGUID 12 f f t f i 2 1034 "1034 1033" _null_ _null_ _null_ aclinsert - _null_ ));
DESCR("add/update ACL item");
-DATA(insert OID = 1036 ( aclremove PGNSP PGUID 12 f f t f i 2 1034 "1034 1033" _null_ _null_ _null_ aclremove - _null_ ));
+DATA(insert OID = 1036 ( aclremove PGNSP PGUID 12 f f t f i 2 1034 "1034 1033" _null_ _null_ _null_ aclremove - _null_ ));
DESCR("remove ACL item");
DATA(insert OID = 1037 ( aclcontains PGNSP PGUID 12 f f t f i 2 16 "1034 1033" _null_ _null_ _null_ aclcontains - _null_ ));
DESCR("does ACL contain item?");
DESCR("greater-than-or-equal");
DATA(insert OID = 1053 ( bpcharne PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ _null_ _null_ bpcharne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1063 ( bpchar_larger PGNSP PGUID 12 f f t f i 2 1042 "1042 1042" _null_ _null_ _null_ bpchar_larger - _null_ ));
+DATA(insert OID = 1063 ( bpchar_larger PGNSP PGUID 12 f f t f i 2 1042 "1042 1042" _null_ _null_ _null_ bpchar_larger - _null_ ));
DESCR("larger of two");
DATA(insert OID = 1064 ( bpchar_smaller PGNSP PGUID 12 f f t f i 2 1042 "1042 1042" _null_ _null_ _null_ bpchar_smaller - _null_ ));
DESCR("smaller of two");
DESCR("not equal");
DATA(insert OID = 1107 ( time_cmp PGNSP PGUID 12 f f t f i 2 23 "1083 1083" _null_ _null_ _null_ time_cmp - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 1138 ( date_larger PGNSP PGUID 12 f f t f i 2 1082 "1082 1082" _null_ _null_ _null_ date_larger - _null_ ));
+DATA(insert OID = 1138 ( date_larger PGNSP PGUID 12 f f t f i 2 1082 "1082 1082" _null_ _null_ _null_ date_larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 1139 ( date_smaller PGNSP PGUID 12 f f t f i 2 1082 "1082 1082" _null_ _null_ _null_ date_smaller - _null_ ));
+DATA(insert OID = 1139 ( date_smaller PGNSP PGUID 12 f f t f i 2 1082 "1082 1082" _null_ _null_ _null_ date_smaller - _null_ ));
DESCR("smaller of two");
DATA(insert OID = 1140 ( date_mi PGNSP PGUID 12 f f t f i 2 23 "1082 1082" _null_ _null_ _null_ date_mi - _null_ ));
DESCR("subtract");
DESCR("greater-than-or-equal");
DATA(insert OID = 1157 ( timestamptz_gt PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ _null_ _null_ timestamp_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1158 ( to_timestamp PGNSP PGUID 14 f f t f i 1 1184 "701" _null_ _null_ _null_ "select (''epoch''::timestamptz + $1 * ''1 second''::interval)" - _null_ ));
+DATA(insert OID = 1158 ( to_timestamp PGNSP PGUID 14 f f t f i 1 1184 "701" _null_ _null_ _null_ "select (''epoch''::timestamptz + $1 * ''1 second''::interval)" - _null_ ));
DESCR("convert UNIX epoch to timestamptz");
DATA(insert OID = 1159 ( timezone PGNSP PGUID 12 f f t f i 2 1114 "25 1184" _null_ _null_ _null_ timestamptz_zone - _null_ ));
DESCR("adjust timestamp to new time zone");
DESCR("greater-than-or-equal");
DATA(insert OID = 1167 ( interval_gt PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ _null_ _null_ interval_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1168 ( interval_um PGNSP PGUID 12 f f t f i 1 1186 "1186" _null_ _null_ _null_ interval_um - _null_ ));
+DATA(insert OID = 1168 ( interval_um PGNSP PGUID 12 f f t f i 1 1186 "1186" _null_ _null_ _null_ interval_um - _null_ ));
DESCR("subtract");
-DATA(insert OID = 1169 ( interval_pl PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ _null_ _null_ interval_pl - _null_ ));
+DATA(insert OID = 1169 ( interval_pl PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ _null_ _null_ interval_pl - _null_ ));
DESCR("add");
-DATA(insert OID = 1170 ( interval_mi PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ _null_ _null_ interval_mi - _null_ ));
+DATA(insert OID = 1170 ( interval_mi PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ _null_ _null_ interval_mi - _null_ ));
DESCR("subtract");
DATA(insert OID = 1171 ( date_part PGNSP PGUID 12 f f t f s 2 701 "25 1184" _null_ _null_ _null_ timestamptz_part - _null_ ));
DESCR("extract field from timestamp with time zone");
DESCR("extract field from interval");
DATA(insert OID = 1173 ( timestamptz PGNSP PGUID 12 f f t f i 1 1184 "702" _null_ _null_ _null_ abstime_timestamptz - _null_ ));
DESCR("convert abstime to timestamp with time zone");
-DATA(insert OID = 1174 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "1082" _null_ _null_ _null_ date_timestamptz - _null_ ));
+DATA(insert OID = 1174 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "1082" _null_ _null_ _null_ date_timestamptz - _null_ ));
DESCR("convert date to timestamp with time zone");
-DATA(insert OID = 1175 ( justify_hours PGNSP PGUID 12 f f t f i 1 1186 "1186" _null_ _null_ _null_ interval_justify_hours - _null_ ));
+DATA(insert OID = 1175 ( justify_hours PGNSP PGUID 12 f f t f i 1 1186 "1186" _null_ _null_ _null_ interval_justify_hours - _null_ ));
DESCR("promote groups of 24 hours to numbers of days");
-DATA(insert OID = 1295 ( justify_days PGNSP PGUID 12 f f t f i 1 1186 "1186" _null_ _null_ _null_ interval_justify_days - _null_ ));
+DATA(insert OID = 1295 ( justify_days PGNSP PGUID 12 f f t f i 1 1186 "1186" _null_ _null_ _null_ interval_justify_days - _null_ ));
DESCR("promote groups of 30 days to numbers of months");
-DATA(insert OID = 1176 ( timestamptz PGNSP PGUID 14 f f t f s 2 1184 "1082 1083" _null_ _null_ _null_ "select cast(($1 + $2) as timestamp with time zone)" - _null_ ));
+DATA(insert OID = 1176 ( timestamptz PGNSP PGUID 14 f f t f s 2 1184 "1082 1083" _null_ _null_ _null_ "select cast(($1 + $2) as timestamp with time zone)" - _null_ ));
DESCR("convert date and time to timestamp with time zone");
DATA(insert OID = 1177 ( interval PGNSP PGUID 12 f f t f i 1 1186 "703" _null_ _null_ _null_ reltime_interval - _null_ ));
DESCR("convert reltime to interval");
-DATA(insert OID = 1178 ( date PGNSP PGUID 12 f f t f s 1 1082 "1184" _null_ _null_ _null_ timestamptz_date - _null_ ));
+DATA(insert OID = 1178 ( date PGNSP PGUID 12 f f t f s 1 1082 "1184" _null_ _null_ _null_ timestamptz_date - _null_ ));
DESCR("convert timestamp with time zone to date");
DATA(insert OID = 1179 ( date PGNSP PGUID 12 f f t f s 1 1082 "702" _null_ _null_ _null_ abstime_date - _null_ ));
DESCR("convert abstime to date");
-DATA(insert OID = 1180 ( abstime PGNSP PGUID 12 f f t f i 1 702 "1184" _null_ _null_ _null_ timestamptz_abstime - _null_ ));
+DATA(insert OID = 1180 ( abstime PGNSP PGUID 12 f f t f i 1 702 "1184" _null_ _null_ _null_ timestamptz_abstime - _null_ ));
DESCR("convert timestamp with time zone to abstime");
-DATA(insert OID = 1181 ( age PGNSP PGUID 12 f f t f s 1 23 "28" _null_ _null_ _null_ xid_age - _null_ ));
+DATA(insert OID = 1181 ( age PGNSP PGUID 12 f f t f s 1 23 "28" _null_ _null_ _null_ xid_age - _null_ ));
DESCR("age of a transaction ID, in transactions before current transaction");
-DATA(insert OID = 1188 ( timestamptz_mi PGNSP PGUID 12 f f t f i 2 1186 "1184 1184" _null_ _null_ _null_ timestamp_mi - _null_ ));
+DATA(insert OID = 1188 ( timestamptz_mi PGNSP PGUID 12 f f t f i 2 1186 "1184 1184" _null_ _null_ _null_ timestamp_mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 1189 ( timestamptz_pl_interval PGNSP PGUID 12 f f t f s 2 1184 "1184 1186" _null_ _null_ _null_ timestamptz_pl_interval - _null_ ));
+DATA(insert OID = 1189 ( timestamptz_pl_interval PGNSP PGUID 12 f f t f s 2 1184 "1184 1186" _null_ _null_ _null_ timestamptz_pl_interval - _null_ ));
DESCR("plus");
-DATA(insert OID = 1190 ( timestamptz_mi_interval PGNSP PGUID 12 f f t f s 2 1184 "1184 1186" _null_ _null_ _null_ timestamptz_mi_interval - _null_ ));
+DATA(insert OID = 1190 ( timestamptz_mi_interval PGNSP PGUID 12 f f t f s 2 1184 "1184 1186" _null_ _null_ _null_ timestamptz_mi_interval - _null_ ));
DESCR("minus");
DATA(insert OID = 1191 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "25" _null_ _null_ _null_ text_timestamptz - _null_ ));
DESCR("convert text to timestamp with time zone");
-DATA(insert OID = 1192 ( text PGNSP PGUID 12 f f t f s 1 25 "1184" _null_ _null_ _null_ timestamptz_text - _null_ ));
+DATA(insert OID = 1192 ( text PGNSP PGUID 12 f f t f s 1 25 "1184" _null_ _null_ _null_ timestamptz_text - _null_ ));
DESCR("convert timestamp with time zone to text");
-DATA(insert OID = 1193 ( text PGNSP PGUID 12 f f t f i 1 25 "1186" _null_ _null_ _null_ interval_text - _null_ ));
+DATA(insert OID = 1193 ( text PGNSP PGUID 12 f f t f i 1 25 "1186" _null_ _null_ _null_ interval_text - _null_ ));
DESCR("convert interval to text");
-DATA(insert OID = 1194 ( reltime PGNSP PGUID 12 f f t f i 1 703 "1186" _null_ _null_ _null_ interval_reltime - _null_ ));
+DATA(insert OID = 1194 ( reltime PGNSP PGUID 12 f f t f i 1 703 "1186" _null_ _null_ _null_ interval_reltime - _null_ ));
DESCR("convert interval to reltime");
-DATA(insert OID = 1195 ( timestamptz_smaller PGNSP PGUID 12 f f t f i 2 1184 "1184 1184" _null_ _null_ _null_ timestamp_smaller - _null_ ));
+DATA(insert OID = 1195 ( timestamptz_smaller PGNSP PGUID 12 f f t f i 2 1184 "1184 1184" _null_ _null_ _null_ timestamp_smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 1196 ( timestamptz_larger PGNSP PGUID 12 f f t f i 2 1184 "1184 1184" _null_ _null_ _null_ timestamp_larger - _null_ ));
+DATA(insert OID = 1196 ( timestamptz_larger PGNSP PGUID 12 f f t f i 2 1184 "1184 1184" _null_ _null_ _null_ timestamp_larger - _null_ ));
DESCR("larger of two");
DATA(insert OID = 1197 ( interval_smaller PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ _null_ _null_ interval_smaller - _null_ ));
DESCR("smaller of two");
DATA(insert OID = 1200 ( interval PGNSP PGUID 12 f f t f i 2 1186 "1186 23" _null_ _null_ _null_ interval_scale - _null_ ));
DESCR("adjust interval precision");
-DATA(insert OID = 1215 ( obj_description PGNSP PGUID 14 f f t f s 2 25 "26 19" _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and classoid = (select oid from pg_catalog.pg_class where relname = $2 and relnamespace = PGNSP) and objsubid = 0" - _null_ ));
+DATA(insert OID = 1215 ( obj_description PGNSP PGUID 14 f f t f s 2 25 "26 19" _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and classoid = (select oid from pg_catalog.pg_class where relname = $2 and relnamespace = PGNSP) and objsubid = 0" - _null_ ));
DESCR("get description for object id and catalog name");
-DATA(insert OID = 1216 ( col_description PGNSP PGUID 14 f f t f s 2 25 "26 23" _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and classoid = ''pg_catalog.pg_class''::regclass and objsubid = $2" - _null_ ));
+DATA(insert OID = 1216 ( col_description PGNSP PGUID 14 f f t f s 2 25 "26 23" _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and classoid = ''pg_catalog.pg_class''::regclass and objsubid = $2" - _null_ ));
DESCR("get description for table column");
DATA(insert OID = 1217 ( date_trunc PGNSP PGUID 12 f f t f s 2 1184 "25 1184" _null_ _null_ _null_ timestamptz_trunc - _null_ ));
DATA(insert OID = 1218 ( date_trunc PGNSP PGUID 12 f f t f i 2 1186 "25 1186" _null_ _null_ _null_ interval_trunc - _null_ ));
DESCR("truncate interval to specified units");
-DATA(insert OID = 1219 ( int8inc PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8inc - _null_ ));
+DATA(insert OID = 1219 ( int8inc PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8inc - _null_ ));
DESCR("increment");
-DATA(insert OID = 1230 ( int8abs PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8abs - _null_ ));
+DATA(insert OID = 1230 ( int8abs PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8abs - _null_ ));
DESCR("absolute value");
DATA(insert OID = 1236 ( int8larger PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ _null_ _null_ int8larger - _null_ ));
DATA(insert OID = 2323 ( pg_tablespace_size PGNSP PGUID 12 f f t f v 1 20 "19" _null_ _null_ _null_ pg_tablespace_size_name - _null_ ));
DESCR("Calculate total disk space usage for the specified tablespace");
-DATA(insert OID = 1251 ( int4abs PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4abs - _null_ ));
+DATA(insert OID = 1251 ( int4abs PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 1253 ( int2abs PGNSP PGUID 12 f f t f i 1 21 "21" _null_ _null_ _null_ int2abs - _null_ ));
+DATA(insert OID = 1253 ( int2abs PGNSP PGUID 12 f f t f i 1 21 "21" _null_ _null_ _null_ int2abs - _null_ ));
DESCR("absolute value");
DATA(insert OID = 1263 ( interval PGNSP PGUID 12 f f t f s 1 1186 "25" _null_ _null_ _null_ text_interval - _null_ ));
DATA(insert OID = 2324 ( pg_database_size PGNSP PGUID 12 f f t f v 1 20 "26" _null_ _null_ _null_ pg_database_size_oid - _null_ ));
DESCR("Calculate total disk space usage for the specified database");
-DATA(insert OID = 1271 ( overlaps PGNSP PGUID 12 f f f f i 4 16 "1266 1266 1266 1266" _null_ _null_ _null_ overlaps_timetz - _null_ ));
+DATA(insert OID = 1271 ( overlaps PGNSP PGUID 12 f f f f i 4 16 "1266 1266 1266 1266" _null_ _null_ _null_ overlaps_timetz - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 1272 ( datetime_pl PGNSP PGUID 12 f f t f i 2 1114 "1082 1083" _null_ _null_ _null_ datetime_timestamp - _null_ ));
+DATA(insert OID = 1272 ( datetime_pl PGNSP PGUID 12 f f t f i 2 1114 "1082 1083" _null_ _null_ _null_ datetime_timestamp - _null_ ));
DESCR("convert date and time to timestamp");
DATA(insert OID = 1273 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1266" _null_ _null_ _null_ timetz_part - _null_ ));
DESCR("extract field from time with time zone");
DATA(insert OID = 1281 ( int48div PGNSP PGUID 12 f f t f i 2 20 "23 20" _null_ _null_ _null_ int48div - _null_ ));
DESCR("divide");
-DATA(insert OID = 1287 ( oid PGNSP PGUID 12 f f t f i 1 26 "20" _null_ _null_ _null_ i8tooid - _null_ ));
+DATA(insert OID = 1287 ( oid PGNSP PGUID 12 f f t f i 1 26 "20" _null_ _null_ _null_ i8tooid - _null_ ));
DESCR("convert int8 to oid");
-DATA(insert OID = 1288 ( int8 PGNSP PGUID 12 f f t f i 1 20 "26" _null_ _null_ _null_ oidtoi8 - _null_ ));
+DATA(insert OID = 1288 ( int8 PGNSP PGUID 12 f f t f i 1 20 "26" _null_ _null_ _null_ oidtoi8 - _null_ ));
DESCR("convert oid to int8");
-DATA(insert OID = 1289 ( text PGNSP PGUID 12 f f t f i 1 25 "20" _null_ _null_ _null_ int8_text - _null_ ));
+DATA(insert OID = 1289 ( text PGNSP PGUID 12 f f t f i 1 25 "20" _null_ _null_ _null_ int8_text - _null_ ));
DESCR("convert int8 to text");
-DATA(insert OID = 1290 ( int8 PGNSP PGUID 12 f f t f i 1 20 "25" _null_ _null_ _null_ text_int8 - _null_ ));
+DATA(insert OID = 1290 ( int8 PGNSP PGUID 12 f f t f i 1 20 "25" _null_ _null_ _null_ text_int8 - _null_ ));
DESCR("convert text to int8");
DATA(insert OID = 1291 ( array_length_coerce PGNSP PGUID 12 f f t f s 3 2277 "2277 23 16" _null_ _null_ _null_ array_length_coerce - _null_ ));
DATA(insert OID = 2168 ( pg_database_size PGNSP PGUID 12 f f t f v 1 20 "19" _null_ _null_ _null_ pg_database_size_name - _null_ ));
DESCR("Calculate total disk space usage for the specified database");
-DATA(insert OID = 1296 ( timedate_pl PGNSP PGUID 14 f f t f i 2 1114 "1083 1082" _null_ _null_ _null_ "select ($2 + $1)" - _null_ ));
+DATA(insert OID = 1296 ( timedate_pl PGNSP PGUID 14 f f t f i 2 1114 "1083 1082" _null_ _null_ _null_ "select ($2 + $1)" - _null_ ));
DESCR("convert time and date to timestamp");
-DATA(insert OID = 1297 ( datetimetz_pl PGNSP PGUID 12 f f t f i 2 1184 "1082 1266" _null_ _null_ _null_ datetimetz_timestamptz - _null_ ));
+DATA(insert OID = 1297 ( datetimetz_pl PGNSP PGUID 12 f f t f i 2 1184 "1082 1266" _null_ _null_ _null_ datetimetz_timestamptz - _null_ ));
DESCR("convert date and time with time zone to timestamp with time zone");
-DATA(insert OID = 1298 ( timetzdate_pl PGNSP PGUID 14 f f t f i 2 1184 "1266 1082" _null_ _null_ _null_ "select ($2 + $1)" - _null_ ));
+DATA(insert OID = 1298 ( timetzdate_pl PGNSP PGUID 14 f f t f i 2 1184 "1266 1082" _null_ _null_ _null_ "select ($2 + $1)" - _null_ ));
DESCR("convert time with time zone and date to timestamp with time zone");
-DATA(insert OID = 1299 ( now PGNSP PGUID 12 f f t f s 0 1184 "" _null_ _null_ _null_ now - _null_ ));
+DATA(insert OID = 1299 ( now PGNSP PGUID 12 f f t f s 0 1184 "" _null_ _null_ _null_ now - _null_ ));
DESCR("current transaction time");
/* OIDS 1300 - 1399 */
DESCR("I/O");
DATA(insert OID = 1313 ( timestamp_out PGNSP PGUID 12 f f t f s 1 2275 "1114" _null_ _null_ _null_ timestamp_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1314 ( timestamptz_cmp PGNSP PGUID 12 f f t f i 2 23 "1184 1184" _null_ _null_ _null_ timestamp_cmp - _null_ ));
+DATA(insert OID = 1314 ( timestamptz_cmp PGNSP PGUID 12 f f t f i 2 23 "1184 1184" _null_ _null_ _null_ timestamp_cmp - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 1315 ( interval_cmp PGNSP PGUID 12 f f t f i 2 23 "1186 1186" _null_ _null_ _null_ interval_cmp - _null_ ));
+DATA(insert OID = 1315 ( interval_cmp PGNSP PGUID 12 f f t f i 2 23 "1186 1186" _null_ _null_ _null_ interval_cmp - _null_ ));
DESCR("less-equal-greater");
DATA(insert OID = 1316 ( time PGNSP PGUID 12 f f t f i 1 1083 "1114" _null_ _null_ _null_ timestamp_time - _null_ ));
DESCR("convert timestamp to time");
DATA(insert OID = 1317 ( length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ textlen - _null_ ));
DESCR("length");
-DATA(insert OID = 1318 ( length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ _null_ _null_ bpcharlen - _null_ ));
+DATA(insert OID = 1318 ( length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ _null_ _null_ bpcharlen - _null_ ));
DESCR("character length");
-DATA(insert OID = 1319 ( xideqint4 PGNSP PGUID 12 f f t f i 2 16 "28 23" _null_ _null_ _null_ xideq - _null_ ));
+DATA(insert OID = 1319 ( xideqint4 PGNSP PGUID 12 f f t f i 2 16 "28 23" _null_ _null_ _null_ xideq - _null_ ));
DESCR("equal");
DATA(insert OID = 1326 ( interval_div PGNSP PGUID 12 f f t f i 2 1186 "1186 701" _null_ _null_ _null_ interval_div - _null_ ));
DESCR("divide");
-DATA(insert OID = 1339 ( dlog10 PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dlog10 - _null_ ));
+DATA(insert OID = 1339 ( dlog10 PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dlog10 - _null_ ));
DESCR("base 10 logarithm");
-DATA(insert OID = 1340 ( log PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dlog10 - _null_ ));
+DATA(insert OID = 1340 ( log PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dlog10 - _null_ ));
DESCR("base 10 logarithm");
-DATA(insert OID = 1341 ( ln PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dlog1 - _null_ ));
+DATA(insert OID = 1341 ( ln PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dlog1 - _null_ ));
DESCR("natural logarithm");
-DATA(insert OID = 1342 ( round PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dround - _null_ ));
+DATA(insert OID = 1342 ( round PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dround - _null_ ));
DESCR("round to nearest integer");
-DATA(insert OID = 1343 ( trunc PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dtrunc - _null_ ));
+DATA(insert OID = 1343 ( trunc PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dtrunc - _null_ ));
DESCR("truncate to integer");
-DATA(insert OID = 1344 ( sqrt PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dsqrt - _null_ ));
+DATA(insert OID = 1344 ( sqrt PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dsqrt - _null_ ));
DESCR("square root");
-DATA(insert OID = 1345 ( cbrt PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dcbrt - _null_ ));
+DATA(insert OID = 1345 ( cbrt PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dcbrt - _null_ ));
DESCR("cube root");
-DATA(insert OID = 1346 ( pow PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ _null_ _null_ dpow - _null_ ));
+DATA(insert OID = 1346 ( pow PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ _null_ _null_ dpow - _null_ ));
DESCR("exponentiation");
-DATA(insert OID = 1368 ( power PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ _null_ _null_ dpow - _null_ ));
+DATA(insert OID = 1368 ( power PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ _null_ _null_ dpow - _null_ ));
DESCR("exponentiation");
-DATA(insert OID = 1347 ( exp PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dexp - _null_ ));
+DATA(insert OID = 1347 ( exp PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ dexp - _null_ ));
DESCR("exponential");
/*
DESCR("greater-than");
DATA(insert OID = 1358 ( timetz_cmp PGNSP PGUID 12 f f t f i 2 23 "1266 1266" _null_ _null_ _null_ timetz_cmp - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 1359 ( timestamptz PGNSP PGUID 12 f f t f i 2 1184 "1082 1266" _null_ _null_ _null_ datetimetz_timestamptz - _null_ ));
+DATA(insert OID = 1359 ( timestamptz PGNSP PGUID 12 f f t f i 2 1184 "1082 1266" _null_ _null_ _null_ datetimetz_timestamptz - _null_ ));
DESCR("convert date and time with time zone to timestamp with time zone");
DATA(insert OID = 1364 ( time PGNSP PGUID 14 f f t f s 1 1083 "702" _null_ _null_ _null_ "select cast(cast($1 as timestamp without time zone) as time)" - _null_ ));
DESCR("convert abstime to time");
-DATA(insert OID = 1367 ( character_length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ _null_ _null_ bpcharlen - _null_ ));
+DATA(insert OID = 1367 ( character_length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ _null_ _null_ bpcharlen - _null_ ));
DESCR("character length");
DATA(insert OID = 1369 ( character_length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ textlen - _null_ ));
DESCR("character length");
DATA(insert OID = 1373 ( array_type_length_coerce PGNSP PGUID 12 f f t f s 3 2277 "2277 23 16" _null_ _null_ _null_ array_type_length_coerce - _null_ ));
DESCR("coerce array to another type and adjust element typmod");
-DATA(insert OID = 1374 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ textoctetlen - _null_ ));
+DATA(insert OID = 1374 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ textoctetlen - _null_ ));
DESCR("octet length");
DATA(insert OID = 1375 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ _null_ _null_ bpcharoctetlen - _null_ ));
DESCR("octet length");
-DATA(insert OID = 1377 ( time_larger PGNSP PGUID 12 f f t f i 2 1083 "1083 1083" _null_ _null_ _null_ time_larger - _null_ ));
+DATA(insert OID = 1377 ( time_larger PGNSP PGUID 12 f f t f i 2 1083 "1083 1083" _null_ _null_ _null_ time_larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 1378 ( time_smaller PGNSP PGUID 12 f f t f i 2 1083 "1083 1083" _null_ _null_ _null_ time_smaller - _null_ ));
+DATA(insert OID = 1378 ( time_smaller PGNSP PGUID 12 f f t f i 2 1083 "1083 1083" _null_ _null_ _null_ time_smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 1379 ( timetz_larger PGNSP PGUID 12 f f t f i 2 1266 "1266 1266" _null_ _null_ _null_ timetz_larger - _null_ ));
+DATA(insert OID = 1379 ( timetz_larger PGNSP PGUID 12 f f t f i 2 1266 "1266 1266" _null_ _null_ _null_ timetz_larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 1380 ( timetz_smaller PGNSP PGUID 12 f f t f i 2 1266 "1266 1266" _null_ _null_ _null_ timetz_smaller - _null_ ));
+DATA(insert OID = 1380 ( timetz_smaller PGNSP PGUID 12 f f t f i 2 1266 "1266 1266" _null_ _null_ _null_ timetz_smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 1381 ( char_length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ textlen - _null_ ));
+DATA(insert OID = 1381 ( char_length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ textlen - _null_ ));
DESCR("character length");
DATA(insert OID = 1382 ( date_part PGNSP PGUID 14 f f t f s 2 701 "25 702" _null_ _null_ _null_ "select pg_catalog.date_part($1, cast($2 as timestamp with time zone))" - _null_ ));
DESCR("extract field from date");
DATA(insert OID = 1385 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1083" _null_ _null_ _null_ time_part - _null_ ));
DESCR("extract field from time");
-DATA(insert OID = 1386 ( age PGNSP PGUID 14 f f t f s 1 1186 "1184" _null_ _null_ _null_ "select pg_catalog.age(cast(current_date as timestamp with time zone), $1)" - _null_ ));
+DATA(insert OID = 1386 ( age PGNSP PGUID 14 f f t f s 1 1186 "1184" _null_ _null_ _null_ "select pg_catalog.age(cast(current_date as timestamp with time zone), $1)" - _null_ ));
DESCR("date difference from today preserving months and years");
-DATA(insert OID = 1388 ( timetz PGNSP PGUID 12 f f t f s 1 1266 "1184" _null_ _null_ _null_ timestamptz_timetz - _null_ ));
+DATA(insert OID = 1388 ( timetz PGNSP PGUID 12 f f t f s 1 1266 "1184" _null_ _null_ _null_ timestamptz_timetz - _null_ ));
DESCR("convert timestamptz to timetz");
DATA(insert OID = 1389 ( isfinite PGNSP PGUID 12 f f t f i 1 16 "1184" _null_ _null_ _null_ timestamp_finite - _null_ ));
DESCR("absolute value");
DATA(insert OID = 1395 ( abs PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ float8abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 1396 ( abs PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8abs - _null_ ));
+DATA(insert OID = 1396 ( abs PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 1397 ( abs PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4abs - _null_ ));
+DATA(insert OID = 1397 ( abs PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 1398 ( abs PGNSP PGUID 12 f f t f i 1 21 "21" _null_ _null_ _null_ int2abs - _null_ ));
+DATA(insert OID = 1398 ( abs PGNSP PGUID 12 f f t f i 1 21 "21" _null_ _null_ _null_ int2abs - _null_ ));
DESCR("absolute value");
/* OIDS 1400 - 1499 */
DATA(insert OID = 1416 ( point PGNSP PGUID 12 f f t f i 1 600 "718" _null_ _null_ _null_ circle_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 1417 ( isnottrue PGNSP PGUID 12 f f f f i 1 16 "16" _null_ _null_ _null_ isnottrue - _null_ ));
+DATA(insert OID = 1417 ( isnottrue PGNSP PGUID 12 f f f f i 1 16 "16" _null_ _null_ _null_ isnottrue - _null_ ));
DESCR("bool is not true (ie, false or unknown)");
-DATA(insert OID = 1418 ( isnotfalse PGNSP PGUID 12 f f f f i 1 16 "16" _null_ _null_ _null_ isnotfalse - _null_ ));
+DATA(insert OID = 1418 ( isnotfalse PGNSP PGUID 12 f f f f i 1 16 "16" _null_ _null_ _null_ isnotfalse - _null_ ));
DESCR("bool is not false (ie, true or unknown)");
-DATA(insert OID = 1419 ( time PGNSP PGUID 12 f f t f i 1 1083 "1186" _null_ _null_ _null_ interval_time - _null_ ));
+DATA(insert OID = 1419 ( time PGNSP PGUID 12 f f t f i 1 1083 "1186" _null_ _null_ _null_ interval_time - _null_ ));
DESCR("convert interval to time");
DATA(insert OID = 1421 ( box PGNSP PGUID 12 f f t f i 2 603 "600 600" _null_ _null_ _null_ points_box - _null_ ));
DATA(insert OID = 1449 ( polygon PGNSP PGUID 12 f f t f i 1 604 "602" _null_ _null_ _null_ path_poly - _null_ ));
DESCR("convert path to polygon");
-DATA(insert OID = 1450 ( circle_in PGNSP PGUID 12 f f t f i 1 718 "2275" _null_ _null_ _null_ circle_in - _null_ ));
+DATA(insert OID = 1450 ( circle_in PGNSP PGUID 12 f f t f i 1 718 "2275" _null_ _null_ _null_ circle_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1451 ( circle_out PGNSP PGUID 12 f f t f i 1 2275 "718" _null_ _null_ _null_ circle_out - _null_ ));
+DATA(insert OID = 1451 ( circle_out PGNSP PGUID 12 f f t f i 1 2275 "718" _null_ _null_ _null_ circle_out - _null_ ));
DESCR("I/O");
DATA(insert OID = 1452 ( circle_same PGNSP PGUID 12 f f t f i 2 16 "718 718" _null_ _null_ _null_ circle_same - _null_ ));
DESCR("same as?");
DATA(insert OID = 1489 ( close_lseg PGNSP PGUID 12 f f t f i 2 600 "601 601" _null_ _null_ _null_ close_lseg - _null_ ));
DESCR("closest point to line segment on line segment");
-DATA(insert OID = 1490 ( line_in PGNSP PGUID 12 f f t f i 1 628 "2275" _null_ _null_ _null_ line_in - _null_ ));
+DATA(insert OID = 1490 ( line_in PGNSP PGUID 12 f f t f i 1 628 "2275" _null_ _null_ _null_ line_in - _null_ ));
DESCR("I/O");
DATA(insert OID = 1491 ( line_out PGNSP PGUID 12 f f t f i 1 2275 "628" _null_ _null_ _null_ line_out - _null_ ));
DESCR("I/O");
DATA(insert OID = 1564 ( bit_in PGNSP PGUID 12 f f t f i 3 1560 "2275 26 23" _null_ _null_ _null_ bit_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1565 ( bit_out PGNSP PGUID 12 f f t f i 1 2275 "1560" _null_ _null_ _null_ bit_out - _null_ ));
+DATA(insert OID = 1565 ( bit_out PGNSP PGUID 12 f f t f i 1 2275 "1560" _null_ _null_ _null_ bit_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1569 ( like PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ _null_ _null_ textlike - _null_ ));
+DATA(insert OID = 1569 ( like PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ _null_ _null_ textlike - _null_ ));
DESCR("matches LIKE expression");
-DATA(insert OID = 1570 ( notlike PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ _null_ _null_ textnlike - _null_ ));
+DATA(insert OID = 1570 ( notlike PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ _null_ _null_ textnlike - _null_ ));
DESCR("does not match LIKE expression");
-DATA(insert OID = 1571 ( like PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ _null_ _null_ namelike - _null_ ));
+DATA(insert OID = 1571 ( like PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ _null_ _null_ namelike - _null_ ));
DESCR("matches LIKE expression");
-DATA(insert OID = 1572 ( notlike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ _null_ _null_ namenlike - _null_ ));
+DATA(insert OID = 1572 ( notlike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ _null_ _null_ namenlike - _null_ ));
DESCR("does not match LIKE expression");
DATA(insert OID = 1579 ( varbit_in PGNSP PGUID 12 f f t f i 3 1562 "2275 26 23" _null_ _null_ _null_ varbit_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1580 ( varbit_out PGNSP PGUID 12 f f t f i 1 2275 "1562" _null_ _null_ _null_ varbit_out - _null_ ));
+DATA(insert OID = 1580 ( varbit_out PGNSP PGUID 12 f f t f i 1 2275 "1562" _null_ _null_ _null_ varbit_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1581 ( biteq PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ biteq - _null_ ));
+DATA(insert OID = 1581 ( biteq PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ biteq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1582 ( bitne PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitne - _null_ ));
+DATA(insert OID = 1582 ( bitne PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1592 ( bitge PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitge - _null_ ));
+DATA(insert OID = 1592 ( bitge PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitge - _null_ ));
DESCR("greater than or equal");
-DATA(insert OID = 1593 ( bitgt PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitgt - _null_ ));
+DATA(insert OID = 1593 ( bitgt PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitgt - _null_ ));
DESCR("greater than");
-DATA(insert OID = 1594 ( bitle PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitle - _null_ ));
+DATA(insert OID = 1594 ( bitle PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitle - _null_ ));
DESCR("less than or equal");
-DATA(insert OID = 1595 ( bitlt PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitlt - _null_ ));
+DATA(insert OID = 1595 ( bitlt PGNSP PGUID 12 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ bitlt - _null_ ));
DESCR("less than");
-DATA(insert OID = 1596 ( bitcmp PGNSP PGUID 12 f f t f i 2 23 "1560 1560" _null_ _null_ _null_ bitcmp - _null_ ));
+DATA(insert OID = 1596 ( bitcmp PGNSP PGUID 12 f f t f i 2 23 "1560 1560" _null_ _null_ _null_ bitcmp - _null_ ));
DESCR("compare");
-DATA(insert OID = 1598 ( random PGNSP PGUID 12 f f t f v 0 701 "" _null_ _null_ _null_ drandom - _null_ ));
+DATA(insert OID = 1598 ( random PGNSP PGUID 12 f f t f v 0 701 "" _null_ _null_ _null_ drandom - _null_ ));
DESCR("random value");
DATA(insert OID = 1599 ( setseed PGNSP PGUID 12 f f t f v 1 23 "701" _null_ _null_ _null_ setseed - _null_ ));
DESCR("set random seed");
DESCR("radians to degrees");
DATA(insert OID = 1609 ( radians PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ radians - _null_ ));
DESCR("degrees to radians");
-DATA(insert OID = 1610 ( pi PGNSP PGUID 12 f f t f i 0 701 "" _null_ _null_ _null_ dpi - _null_ ));
+DATA(insert OID = 1610 ( pi PGNSP PGUID 12 f f t f i 0 701 "" _null_ _null_ _null_ dpi - _null_ ));
DESCR("PI");
-DATA(insert OID = 1618 ( interval_mul PGNSP PGUID 12 f f t f i 2 1186 "1186 701" _null_ _null_ _null_ interval_mul - _null_ ));
+DATA(insert OID = 1618 ( interval_mul PGNSP PGUID 12 f f t f i 2 1186 "1186 701" _null_ _null_ _null_ interval_mul - _null_ ));
DESCR("multiply interval");
-DATA(insert OID = 1620 ( ascii PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ ascii - _null_ ));
+DATA(insert OID = 1620 ( ascii PGNSP PGUID 12 f f t f i 1 23 "25" _null_ _null_ _null_ ascii - _null_ ));
DESCR("convert first char to int4");
-DATA(insert OID = 1621 ( chr PGNSP PGUID 12 f f t f i 1 25 "23" _null_ _null_ _null_ chr - _null_ ));
+DATA(insert OID = 1621 ( chr PGNSP PGUID 12 f f t f i 1 25 "23" _null_ _null_ _null_ chr - _null_ ));
DESCR("convert int4 to char");
-DATA(insert OID = 1622 ( repeat PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ _null_ _null_ repeat - _null_ ));
+DATA(insert OID = 1622 ( repeat PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ _null_ _null_ repeat - _null_ ));
DESCR("replicate string int4 times");
DATA(insert OID = 1623 ( similar_escape PGNSP PGUID 12 f f f f i 2 25 "25 25" _null_ _null_ _null_ similar_escape - _null_ ));
DESCR("convert SQL99 regexp pattern to POSIX style");
-DATA(insert OID = 1624 ( mul_d_interval PGNSP PGUID 12 f f t f i 2 1186 "701 1186" _null_ _null_ _null_ mul_d_interval - _null_ ));
+DATA(insert OID = 1624 ( mul_d_interval PGNSP PGUID 12 f f t f i 2 1186 "701 1186" _null_ _null_ _null_ mul_d_interval - _null_ ));
DATA(insert OID = 1631 ( bpcharlike PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ _null_ _null_ textlike - _null_ ));
DESCR("matches LIKE expression");
DESCR("matches LIKE expression, case-insensitive");
DATA(insert OID = 1634 ( texticnlike PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ _null_ _null_ texticnlike - _null_ ));
DESCR("does not match LIKE expression, case-insensitive");
-DATA(insert OID = 1635 ( nameiclike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ _null_ _null_ nameiclike - _null_ ));
+DATA(insert OID = 1635 ( nameiclike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ _null_ _null_ nameiclike - _null_ ));
DESCR("matches LIKE expression, case-insensitive");
-DATA(insert OID = 1636 ( nameicnlike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ _null_ _null_ nameicnlike - _null_ ));
+DATA(insert OID = 1636 ( nameicnlike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ _null_ _null_ nameicnlike - _null_ ));
DESCR("does not match LIKE expression, case-insensitive");
DATA(insert OID = 1637 ( like_escape PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ _null_ _null_ like_escape - _null_ ));
DESCR("convert LIKE pattern to use backslash escapes");
DESCR("matches regex., case-insensitive");
DATA(insert OID = 1657 ( bpcharicregexne PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ _null_ _null_ texticregexne - _null_ ));
DESCR("does not match regex., case-insensitive");
-DATA(insert OID = 1658 ( bpcharregexeq PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ _null_ _null_ textregexeq - _null_ ));
+DATA(insert OID = 1658 ( bpcharregexeq PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ _null_ _null_ textregexeq - _null_ ));
DESCR("matches regex., case-sensitive");
-DATA(insert OID = 1659 ( bpcharregexne PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ _null_ _null_ textregexne - _null_ ));
+DATA(insert OID = 1659 ( bpcharregexne PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ _null_ _null_ textregexne - _null_ ));
DESCR("does not match regex., case-sensitive");
DATA(insert OID = 1660 ( bpchariclike PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ _null_ _null_ texticlike - _null_ ));
DESCR("matches LIKE expression, case-insensitive");
DATA(insert OID = 1661 ( bpcharicnlike PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ _null_ _null_ texticnlike - _null_ ));
DESCR("does not match LIKE expression, case-insensitive");
-DATA(insert OID = 1689 ( flatfile_update_trigger PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ flatfile_update_trigger - _null_ ));
+DATA(insert OID = 1689 ( flatfile_update_trigger PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ flatfile_update_trigger - _null_ ));
DESCR("update flat-file copy of a shared catalog");
/* Oracle Compatibility Related Functions - By Edmund Mergl */
DATA(insert OID = 868 ( strpos PGNSP PGUID 12 f f t f i 2 23 "25 25" _null_ _null_ _null_ textpos - _null_ ));
DESCR("find position of substring");
-DATA(insert OID = 870 ( lower PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ lower - _null_ ));
+DATA(insert OID = 870 ( lower PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ lower - _null_ ));
DESCR("lowercase");
-DATA(insert OID = 871 ( upper PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ upper - _null_ ));
+DATA(insert OID = 871 ( upper PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ upper - _null_ ));
DESCR("uppercase");
-DATA(insert OID = 872 ( initcap PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ initcap - _null_ ));
+DATA(insert OID = 872 ( initcap PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ initcap - _null_ ));
DESCR("capitalize each word");
DATA(insert OID = 873 ( lpad PGNSP PGUID 12 f f t f i 3 25 "25 23 25" _null_ _null_ _null_ lpad - _null_ ));
DESCR("left-pad string to length");
DESCR("left-pad string to length");
DATA(insert OID = 880 ( rpad PGNSP PGUID 14 f f t f i 2 25 "25 23" _null_ _null_ _null_ "select pg_catalog.rpad($1, $2, '' '')" - _null_ ));
DESCR("right-pad string to length");
-DATA(insert OID = 881 ( ltrim PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ ltrim1 - _null_ ));
+DATA(insert OID = 881 ( ltrim PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ ltrim1 - _null_ ));
DESCR("trim spaces from left end of string");
-DATA(insert OID = 882 ( rtrim PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ rtrim1 - _null_ ));
+DATA(insert OID = 882 ( rtrim PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ rtrim1 - _null_ ));
DESCR("trim spaces from right end of string");
DATA(insert OID = 883 ( substr PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ _null_ _null_ text_substr_no_len - _null_ ));
DESCR("return portion of string");
DATA(insert OID = 884 ( btrim PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ _null_ _null_ btrim - _null_ ));
DESCR("trim selected characters from both ends of string");
-DATA(insert OID = 885 ( btrim PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ btrim1 - _null_ ));
+DATA(insert OID = 885 ( btrim PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ btrim1 - _null_ ));
DESCR("trim spaces from both ends of string");
DATA(insert OID = 936 ( substring PGNSP PGUID 12 f f t f i 3 25 "25 23 23" _null_ _null_ _null_ text_substr - _null_ ));
DESCR("replace all occurrences of old_substr with new_substr in string");
DATA(insert OID = 2284 ( regexp_replace PGNSP PGUID 12 f f t f i 3 25 "25 25 25" _null_ _null_ _null_ textregexreplace_noopt - _null_ ));
DESCR("replace text using regexp");
-DATA(insert OID = 2285 ( regexp_replace PGNSP PGUID 12 f f t f i 4 25 "25 25 25 25" _null_ _null_ _null_ textregexreplace - _null_ ));
+DATA(insert OID = 2285 ( regexp_replace PGNSP PGUID 12 f f t f i 4 25 "25 25 25 25" _null_ _null_ _null_ textregexreplace - _null_ ));
DESCR("replace text using regexp");
DATA(insert OID = 2088 ( split_part PGNSP PGUID 12 f f t f i 3 25 "25 25 23" _null_ _null_ _null_ split_text - _null_ ));
DESCR("split string by field_sep and return field_num");
-DATA(insert OID = 2089 ( to_hex PGNSP PGUID 12 f f t f i 1 25 "23" _null_ _null_ _null_ to_hex32 - _null_ ));
+DATA(insert OID = 2089 ( to_hex PGNSP PGUID 12 f f t f i 1 25 "23" _null_ _null_ _null_ to_hex32 - _null_ ));
DESCR("convert int4 number to hex");
-DATA(insert OID = 2090 ( to_hex PGNSP PGUID 12 f f t f i 1 25 "20" _null_ _null_ _null_ to_hex64 - _null_ ));
+DATA(insert OID = 2090 ( to_hex PGNSP PGUID 12 f f t f i 1 25 "20" _null_ _null_ _null_ to_hex64 - _null_ ));
DESCR("convert int8 number to hex");
/* for character set encoding support */
DATA(insert OID = 1619 ( convert_using PGNSP PGUID 12 f f t f s 2 25 "25 25" _null_ _null_ _null_ pg_convert_using - _null_ ));
DESCR("convert string with specified conversion name");
-DATA(insert OID = 1264 ( pg_char_to_encoding PGNSP PGUID 12 f f t f s 1 23 "19" _null_ _null_ _null_ PG_char_to_encoding - _null_ ));
+DATA(insert OID = 1264 ( pg_char_to_encoding PGNSP PGUID 12 f f t f s 1 23 "19" _null_ _null_ _null_ PG_char_to_encoding - _null_ ));
DESCR("convert encoding name to encoding id");
-DATA(insert OID = 1597 ( pg_encoding_to_char PGNSP PGUID 12 f f t f s 1 19 "23" _null_ _null_ _null_ PG_encoding_to_char - _null_ ));
+DATA(insert OID = 1597 ( pg_encoding_to_char PGNSP PGUID 12 f f t f s 1 19 "23" _null_ _null_ _null_ PG_encoding_to_char - _null_ ));
DESCR("convert encoding id to encoding name");
DATA(insert OID = 1638 ( oidgt PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ _null_ _null_ oidgt - _null_ ));
DESCR("greater-than-or-equal");
/* System-view support functions */
-DATA(insert OID = 1573 ( pg_get_ruledef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_ruledef - _null_ ));
+DATA(insert OID = 1573 ( pg_get_ruledef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_ruledef - _null_ ));
DESCR("source text of a rule");
-DATA(insert OID = 1640 ( pg_get_viewdef PGNSP PGUID 12 f f t f s 1 25 "25" _null_ _null_ _null_ pg_get_viewdef_name - _null_ ));
+DATA(insert OID = 1640 ( pg_get_viewdef PGNSP PGUID 12 f f t f s 1 25 "25" _null_ _null_ _null_ pg_get_viewdef_name - _null_ ));
DESCR("select statement of a view");
-DATA(insert OID = 1641 ( pg_get_viewdef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_viewdef - _null_ ));
+DATA(insert OID = 1641 ( pg_get_viewdef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_viewdef - _null_ ));
DESCR("select statement of a view");
-DATA(insert OID = 1642 ( pg_get_userbyid PGNSP PGUID 12 f f t f s 1 19 "26" _null_ _null_ _null_ pg_get_userbyid - _null_ ));
+DATA(insert OID = 1642 ( pg_get_userbyid PGNSP PGUID 12 f f t f s 1 19 "26" _null_ _null_ _null_ pg_get_userbyid - _null_ ));
DESCR("role name by OID (with fallback)");
-DATA(insert OID = 1643 ( pg_get_indexdef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_indexdef - _null_ ));
+DATA(insert OID = 1643 ( pg_get_indexdef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_indexdef - _null_ ));
DESCR("index description");
-DATA(insert OID = 1662 ( pg_get_triggerdef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_triggerdef - _null_ ));
+DATA(insert OID = 1662 ( pg_get_triggerdef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_triggerdef - _null_ ));
DESCR("trigger description");
-DATA(insert OID = 1387 ( pg_get_constraintdef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_constraintdef - _null_ ));
+DATA(insert OID = 1387 ( pg_get_constraintdef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ _null_ _null_ pg_get_constraintdef - _null_ ));
DESCR("constraint description");
DATA(insert OID = 1716 ( pg_get_expr PGNSP PGUID 12 f f t f s 2 25 "25 26" _null_ _null_ _null_ pg_get_expr - _null_ ));
DESCR("deparse an encoded expression");
-DATA(insert OID = 1665 ( pg_get_serial_sequence PGNSP PGUID 12 f f t f s 2 25 "25 25" _null_ _null_ _null_ pg_get_serial_sequence - _null_ ));
+DATA(insert OID = 1665 ( pg_get_serial_sequence PGNSP PGUID 12 f f t f s 2 25 "25 25" _null_ _null_ _null_ pg_get_serial_sequence - _null_ ));
DESCR("name of sequence for a serial column");
/* Generic referential integrity constraint triggers */
-DATA(insert OID = 1644 ( RI_FKey_check_ins PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_check_ins - _null_ ));
+DATA(insert OID = 1644 ( RI_FKey_check_ins PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_check_ins - _null_ ));
DESCR("referential integrity FOREIGN KEY ... REFERENCES");
-DATA(insert OID = 1645 ( RI_FKey_check_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_check_upd - _null_ ));
+DATA(insert OID = 1645 ( RI_FKey_check_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_check_upd - _null_ ));
DESCR("referential integrity FOREIGN KEY ... REFERENCES");
-DATA(insert OID = 1646 ( RI_FKey_cascade_del PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_cascade_del - _null_ ));
+DATA(insert OID = 1646 ( RI_FKey_cascade_del PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_cascade_del - _null_ ));
DESCR("referential integrity ON DELETE CASCADE");
-DATA(insert OID = 1647 ( RI_FKey_cascade_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_cascade_upd - _null_ ));
+DATA(insert OID = 1647 ( RI_FKey_cascade_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_cascade_upd - _null_ ));
DESCR("referential integrity ON UPDATE CASCADE");
-DATA(insert OID = 1648 ( RI_FKey_restrict_del PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_restrict_del - _null_ ));
+DATA(insert OID = 1648 ( RI_FKey_restrict_del PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_restrict_del - _null_ ));
DESCR("referential integrity ON DELETE RESTRICT");
-DATA(insert OID = 1649 ( RI_FKey_restrict_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_restrict_upd - _null_ ));
+DATA(insert OID = 1649 ( RI_FKey_restrict_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_restrict_upd - _null_ ));
DESCR("referential integrity ON UPDATE RESTRICT");
-DATA(insert OID = 1650 ( RI_FKey_setnull_del PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_setnull_del - _null_ ));
+DATA(insert OID = 1650 ( RI_FKey_setnull_del PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_setnull_del - _null_ ));
DESCR("referential integrity ON DELETE SET NULL");
-DATA(insert OID = 1651 ( RI_FKey_setnull_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_setnull_upd - _null_ ));
+DATA(insert OID = 1651 ( RI_FKey_setnull_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_setnull_upd - _null_ ));
DESCR("referential integrity ON UPDATE SET NULL");
DATA(insert OID = 1652 ( RI_FKey_setdefault_del PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_setdefault_del - _null_ ));
DESCR("referential integrity ON DELETE SET DEFAULT");
DATA(insert OID = 1653 ( RI_FKey_setdefault_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_setdefault_upd - _null_ ));
DESCR("referential integrity ON UPDATE SET DEFAULT");
-DATA(insert OID = 1654 ( RI_FKey_noaction_del PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_noaction_del - _null_ ));
+DATA(insert OID = 1654 ( RI_FKey_noaction_del PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_noaction_del - _null_ ));
DESCR("referential integrity ON DELETE NO ACTION");
-DATA(insert OID = 1655 ( RI_FKey_noaction_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_noaction_upd - _null_ ));
+DATA(insert OID = 1655 ( RI_FKey_noaction_upd PGNSP PGUID 12 f f t f v 0 2279 "" _null_ _null_ _null_ RI_FKey_noaction_upd - _null_ ));
DESCR("referential integrity ON UPDATE NO ACTION");
-DATA(insert OID = 1666 ( varbiteq PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ biteq - _null_ ));
+DATA(insert OID = 1666 ( varbiteq PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ biteq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1667 ( varbitne PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitne - _null_ ));
+DATA(insert OID = 1667 ( varbitne PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1668 ( varbitge PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitge - _null_ ));
+DATA(insert OID = 1668 ( varbitge PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitge - _null_ ));
DESCR("greater than or equal");
-DATA(insert OID = 1669 ( varbitgt PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitgt - _null_ ));
+DATA(insert OID = 1669 ( varbitgt PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitgt - _null_ ));
DESCR("greater than");
-DATA(insert OID = 1670 ( varbitle PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitle - _null_ ));
+DATA(insert OID = 1670 ( varbitle PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitle - _null_ ));
DESCR("less than or equal");
-DATA(insert OID = 1671 ( varbitlt PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitlt - _null_ ));
+DATA(insert OID = 1671 ( varbitlt PGNSP PGUID 12 f f t f i 2 16 "1562 1562" _null_ _null_ _null_ bitlt - _null_ ));
DESCR("less than");
-DATA(insert OID = 1672 ( varbitcmp PGNSP PGUID 12 f f t f i 2 23 "1562 1562" _null_ _null_ _null_ bitcmp - _null_ ));
+DATA(insert OID = 1672 ( varbitcmp PGNSP PGUID 12 f f t f i 2 23 "1562 1562" _null_ _null_ _null_ bitcmp - _null_ ));
DESCR("compare");
DATA(insert OID = 1673 ( bitand PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ _null_ _null_ bitand - _null_ ));
DESCR("bitwise or");
DATA(insert OID = 1675 ( bitxor PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ _null_ _null_ bitxor - _null_ ));
DESCR("bitwise exclusive or");
-DATA(insert OID = 1676 ( bitnot PGNSP PGUID 12 f f t f i 1 1560 "1560" _null_ _null_ _null_ bitnot - _null_ ));
+DATA(insert OID = 1676 ( bitnot PGNSP PGUID 12 f f t f i 1 1560 "1560" _null_ _null_ _null_ bitnot - _null_ ));
DESCR("bitwise negation");
-DATA(insert OID = 1677 ( bitshiftleft PGNSP PGUID 12 f f t f i 2 1560 "1560 23" _null_ _null_ _null_ bitshiftleft - _null_ ));
+DATA(insert OID = 1677 ( bitshiftleft PGNSP PGUID 12 f f t f i 2 1560 "1560 23" _null_ _null_ _null_ bitshiftleft - _null_ ));
DESCR("bitwise left shift");
-DATA(insert OID = 1678 ( bitshiftright PGNSP PGUID 12 f f t f i 2 1560 "1560 23" _null_ _null_ _null_ bitshiftright - _null_ ));
+DATA(insert OID = 1678 ( bitshiftright PGNSP PGUID 12 f f t f i 2 1560 "1560 23" _null_ _null_ _null_ bitshiftright - _null_ ));
DESCR("bitwise right shift");
DATA(insert OID = 1679 ( bitcat PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ _null_ _null_ bitcat - _null_ ));
DESCR("bitwise concatenation");
DATA(insert OID = 1698 ( position PGNSP PGUID 12 f f t f i 2 23 "1560 1560" _null_ _null_ _null_ bitposition - _null_ ));
DESCR("return position of sub-bitstring");
-DATA(insert OID = 1699 ( substring PGNSP PGUID 14 f f t f i 2 1560 "1560 23" _null_ _null_ _null_ "select pg_catalog.substring($1, $2, -1)" - _null_ ));
+DATA(insert OID = 1699 ( substring PGNSP PGUID 14 f f t f i 2 1560 "1560 23" _null_ _null_ _null_ "select pg_catalog.substring($1, $2, -1)" - _null_ ));
DESCR("return portion of bitstring");
/* for mac type support */
-DATA(insert OID = 436 ( macaddr_in PGNSP PGUID 12 f f t f i 1 829 "2275" _null_ _null_ _null_ macaddr_in - _null_ ));
+DATA(insert OID = 436 ( macaddr_in PGNSP PGUID 12 f f t f i 1 829 "2275" _null_ _null_ _null_ macaddr_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 437 ( macaddr_out PGNSP PGUID 12 f f t f i 1 2275 "829" _null_ _null_ _null_ macaddr_out - _null_ ));
+DATA(insert OID = 437 ( macaddr_out PGNSP PGUID 12 f f t f i 1 2275 "829" _null_ _null_ _null_ macaddr_out - _null_ ));
DESCR("I/O");
DATA(insert OID = 752 ( text PGNSP PGUID 12 f f t f i 1 25 "829" _null_ _null_ _null_ macaddr_text - _null_ ));
DESCR("less-equal-greater");
/* for inet type support */
-DATA(insert OID = 910 ( inet_in PGNSP PGUID 12 f f t f i 1 869 "2275" _null_ _null_ _null_ inet_in - _null_ ));
+DATA(insert OID = 910 ( inet_in PGNSP PGUID 12 f f t f i 1 869 "2275" _null_ _null_ _null_ inet_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 911 ( inet_out PGNSP PGUID 12 f f t f i 1 2275 "869" _null_ _null_ _null_ inet_out - _null_ ));
+DATA(insert OID = 911 ( inet_out PGNSP PGUID 12 f f t f i 1 2275 "869" _null_ _null_ _null_ inet_out - _null_ ));
DESCR("I/O");
/* for cidr type support */
-DATA(insert OID = 1267 ( cidr_in PGNSP PGUID 12 f f t f i 1 650 "2275" _null_ _null_ _null_ cidr_in - _null_ ));
+DATA(insert OID = 1267 ( cidr_in PGNSP PGUID 12 f f t f i 1 650 "2275" _null_ _null_ _null_ cidr_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1427 ( cidr_out PGNSP PGUID 12 f f t f i 1 2275 "650" _null_ _null_ _null_ cidr_out - _null_ ));
+DATA(insert OID = 1427 ( cidr_out PGNSP PGUID 12 f f t f i 1 2275 "650" _null_ _null_ _null_ cidr_out - _null_ ));
DESCR("I/O");
/* these are used for both inet and cidr */
DATA(insert OID = 1715 ( set_masklen PGNSP PGUID 12 f f t f i 2 869 "869 23" _null_ _null_ _null_ inet_set_masklen - _null_ ));
DESCR("change the netmask of an inet");
-DATA(insert OID = 2196 ( inet_client_addr PGNSP PGUID 12 f f f f s 0 869 "" _null_ _null_ _null_ inet_client_addr - _null_ ));
+DATA(insert OID = 2196 ( inet_client_addr PGNSP PGUID 12 f f f f s 0 869 "" _null_ _null_ _null_ inet_client_addr - _null_ ));
DESCR("INET address of the client");
DATA(insert OID = 2197 ( inet_client_port PGNSP PGUID 12 f f f f s 0 23 "" _null_ _null_ _null_ inet_client_port - _null_ ));
DESCR("client's port number for this connection");
-DATA(insert OID = 2198 ( inet_server_addr PGNSP PGUID 12 f f f f s 0 869 "" _null_ _null_ _null_ inet_server_addr - _null_ ));
+DATA(insert OID = 2198 ( inet_server_addr PGNSP PGUID 12 f f f f s 0 869 "" _null_ _null_ _null_ inet_server_addr - _null_ ));
DESCR("INET address of the server");
DATA(insert OID = 2199 ( inet_server_port PGNSP PGUID 12 f f f f s 0 23 "" _null_ _null_ _null_ inet_server_port - _null_ ));
DESCR("server's port number for this connection");
DATA(insert OID = 1690 ( time_mi_time PGNSP PGUID 12 f f t f i 2 1186 "1083 1083" _null_ _null_ _null_ time_mi_time - _null_ ));
DESCR("minus");
-DATA(insert OID = 1691 ( boolle PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ _null_ _null_ boolle - _null_ ));
+DATA(insert OID = 1691 ( boolle PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ _null_ _null_ boolle - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1692 ( boolge PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ _null_ _null_ boolge - _null_ ));
+DATA(insert OID = 1692 ( boolge PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ _null_ _null_ boolge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1693 ( btboolcmp PGNSP PGUID 12 f f t f i 2 23 "16 16" _null_ _null_ _null_ btboolcmp - _null_ ));
+DATA(insert OID = 1693 ( btboolcmp PGNSP PGUID 12 f f t f i 2 23 "16 16" _null_ _null_ _null_ btboolcmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 1696 ( timetz_hash PGNSP PGUID 12 f f t f i 1 23 "1266" _null_ _null_ _null_ timetz_hash - _null_ ));
/* OID's 1700 - 1799 NUMERIC data type */
DATA(insert OID = 1701 ( numeric_in PGNSP PGUID 12 f f t f i 3 1700 "2275 26 23" _null_ _null_ _null_ numeric_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1702 ( numeric_out PGNSP PGUID 12 f f t f i 1 2275 "1700" _null_ _null_ _null_ numeric_out - _null_ ));
+DATA(insert OID = 1702 ( numeric_out PGNSP PGUID 12 f f t f i 1 2275 "1700" _null_ _null_ _null_ numeric_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1703 ( numeric PGNSP PGUID 12 f f t f i 2 1700 "1700 23" _null_ _null_ _null_ numeric - _null_ ));
+DATA(insert OID = 1703 ( numeric PGNSP PGUID 12 f f t f i 2 1700 "1700 23" _null_ _null_ _null_ numeric - _null_ ));
DESCR("adjust numeric to typmod precision/scale");
-DATA(insert OID = 1704 ( numeric_abs PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_abs - _null_ ));
+DATA(insert OID = 1704 ( numeric_abs PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 1705 ( abs PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_abs - _null_ ));
+DATA(insert OID = 1705 ( abs PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 1706 ( sign PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_sign - _null_ ));
+DATA(insert OID = 1706 ( sign PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_sign - _null_ ));
DESCR("sign of value");
-DATA(insert OID = 1707 ( round PGNSP PGUID 12 f f t f i 2 1700 "1700 23" _null_ _null_ _null_ numeric_round - _null_ ));
+DATA(insert OID = 1707 ( round PGNSP PGUID 12 f f t f i 2 1700 "1700 23" _null_ _null_ _null_ numeric_round - _null_ ));
DESCR("value rounded to 'scale'");
-DATA(insert OID = 1708 ( round PGNSP PGUID 14 f f t f i 1 1700 "1700" _null_ _null_ _null_ "select pg_catalog.round($1,0)" - _null_ ));
+DATA(insert OID = 1708 ( round PGNSP PGUID 14 f f t f i 1 1700 "1700" _null_ _null_ _null_ "select pg_catalog.round($1,0)" - _null_ ));
DESCR("value rounded to 'scale' of zero");
-DATA(insert OID = 1709 ( trunc PGNSP PGUID 12 f f t f i 2 1700 "1700 23" _null_ _null_ _null_ numeric_trunc - _null_ ));
+DATA(insert OID = 1709 ( trunc PGNSP PGUID 12 f f t f i 2 1700 "1700 23" _null_ _null_ _null_ numeric_trunc - _null_ ));
DESCR("value truncated to 'scale'");
-DATA(insert OID = 1710 ( trunc PGNSP PGUID 14 f f t f i 1 1700 "1700" _null_ _null_ _null_ "select pg_catalog.trunc($1,0)" - _null_ ));
+DATA(insert OID = 1710 ( trunc PGNSP PGUID 14 f f t f i 1 1700 "1700" _null_ _null_ _null_ "select pg_catalog.trunc($1,0)" - _null_ ));
DESCR("value truncated to 'scale' of zero");
-DATA(insert OID = 1711 ( ceil PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_ceil - _null_ ));
+DATA(insert OID = 1711 ( ceil PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_ceil - _null_ ));
DESCR("smallest integer >= value");
-DATA(insert OID = 2167 ( ceiling PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_ceil - _null_ ));
+DATA(insert OID = 2167 ( ceiling PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_ceil - _null_ ));
DESCR("smallest integer >= value");
-DATA(insert OID = 1712 ( floor PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_floor - _null_ ));
+DATA(insert OID = 1712 ( floor PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_floor - _null_ ));
DESCR("largest integer <= value");
-DATA(insert OID = 1718 ( numeric_eq PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_eq - _null_ ));
+DATA(insert OID = 1718 ( numeric_eq PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1719 ( numeric_ne PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_ne - _null_ ));
+DATA(insert OID = 1719 ( numeric_ne PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1720 ( numeric_gt PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_gt - _null_ ));
+DATA(insert OID = 1720 ( numeric_gt PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1721 ( numeric_ge PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_ge - _null_ ));
+DATA(insert OID = 1721 ( numeric_ge PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1722 ( numeric_lt PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_lt - _null_ ));
+DATA(insert OID = 1722 ( numeric_lt PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1723 ( numeric_le PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_le - _null_ ));
+DATA(insert OID = 1723 ( numeric_le PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ _null_ _null_ numeric_le - _null_ ));
DESCR("less-than-or-equal");
DATA(insert OID = 1724 ( numeric_add PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ _null_ _null_ numeric_add - _null_ ));
DESCR("add");
DESCR("modulus");
DATA(insert OID = 1729 ( numeric_mod PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ _null_ _null_ numeric_mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 1730 ( sqrt PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_sqrt - _null_ ));
+DATA(insert OID = 1730 ( sqrt PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_sqrt - _null_ ));
DESCR("square root");
-DATA(insert OID = 1731 ( numeric_sqrt PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_sqrt - _null_ ));
+DATA(insert OID = 1731 ( numeric_sqrt PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_sqrt - _null_ ));
DESCR("square root");
-DATA(insert OID = 1732 ( exp PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_exp - _null_ ));
+DATA(insert OID = 1732 ( exp PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_exp - _null_ ));
DESCR("e raised to the power of n");
-DATA(insert OID = 1733 ( numeric_exp PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_exp - _null_ ));
+DATA(insert OID = 1733 ( numeric_exp PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_exp - _null_ ));
DESCR("e raised to the power of n");
-DATA(insert OID = 1734 ( ln PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_ln - _null_ ));
+DATA(insert OID = 1734 ( ln PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_ln - _null_ ));
DESCR("natural logarithm of n");
-DATA(insert OID = 1735 ( numeric_ln PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_ln - _null_ ));
+DATA(insert OID = 1735 ( numeric_ln PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_ln - _null_ ));
DESCR("natural logarithm of n");
DATA(insert OID = 1736 ( log PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ _null_ _null_ numeric_log - _null_ ));
DESCR("logarithm base m of n");
DESCR("m raised to the power of n");
DATA(insert OID = 1740 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "23" _null_ _null_ _null_ int4_numeric - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1741 ( log PGNSP PGUID 14 f f t f i 1 1700 "1700" _null_ _null_ _null_ "select pg_catalog.log(10, $1)" - _null_ ));
+DATA(insert OID = 1741 ( log PGNSP PGUID 14 f f t f i 1 1700 "1700" _null_ _null_ _null_ "select pg_catalog.log(10, $1)" - _null_ ));
DESCR("logarithm base 10 of n");
-DATA(insert OID = 1742 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "700" _null_ _null_ _null_ float4_numeric - _null_ ));
+DATA(insert OID = 1742 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "700" _null_ _null_ _null_ float4_numeric - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1743 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "701" _null_ _null_ _null_ float8_numeric - _null_ ));
+DATA(insert OID = 1743 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "701" _null_ _null_ _null_ float8_numeric - _null_ ));
DESCR("(internal)");
DATA(insert OID = 1744 ( int4 PGNSP PGUID 12 f f t f i 1 23 "1700" _null_ _null_ _null_ numeric_int4 - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1745 ( float4 PGNSP PGUID 12 f f t f i 1 700 "1700" _null_ _null_ _null_ numeric_float4 - _null_ ));
+DATA(insert OID = 1745 ( float4 PGNSP PGUID 12 f f t f i 1 700 "1700" _null_ _null_ _null_ numeric_float4 - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1746 ( float8 PGNSP PGUID 12 f f t f i 1 701 "1700" _null_ _null_ _null_ numeric_float8 - _null_ ));
+DATA(insert OID = 1746 ( float8 PGNSP PGUID 12 f f t f i 1 701 "1700" _null_ _null_ _null_ numeric_float8 - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 2170 ( width_bucket PGNSP PGUID 12 f f t f i 4 23 "1700 1700 1700 23" _null_ _null_ _null_ width_bucket_numeric - _null_ ));
+DATA(insert OID = 2170 ( width_bucket PGNSP PGUID 12 f f t f i 4 23 "1700 1700 1700 23" _null_ _null_ _null_ width_bucket_numeric - _null_ ));
DESCR("bucket number of operand in equidepth histogram");
DATA(insert OID = 1747 ( time_pl_interval PGNSP PGUID 12 f f t f i 2 1083 "1083 1186" _null_ _null_ _null_ time_pl_interval - _null_ ));
DATA(insert OID = 1750 ( timetz_mi_interval PGNSP PGUID 12 f f t f i 2 1266 "1266 1186" _null_ _null_ _null_ timetz_mi_interval - _null_ ));
DESCR("minus");
-DATA(insert OID = 1764 ( numeric_inc PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_inc - _null_ ));
+DATA(insert OID = 1764 ( numeric_inc PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_inc - _null_ ));
DESCR("increment by one");
DATA(insert OID = 1766 ( numeric_smaller PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ _null_ _null_ numeric_smaller - _null_ ));
DESCR("smaller of two numbers");
DATA(insert OID = 1767 ( numeric_larger PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ _null_ _null_ numeric_larger - _null_ ));
DESCR("larger of two numbers");
-DATA(insert OID = 1769 ( numeric_cmp PGNSP PGUID 12 f f t f i 2 23 "1700 1700" _null_ _null_ _null_ numeric_cmp - _null_ ));
+DATA(insert OID = 1769 ( numeric_cmp PGNSP PGUID 12 f f t f i 2 23 "1700 1700" _null_ _null_ _null_ numeric_cmp - _null_ ));
DESCR("compare two numbers");
-DATA(insert OID = 1771 ( numeric_uminus PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_uminus - _null_ ));
+DATA(insert OID = 1771 ( numeric_uminus PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_uminus - _null_ ));
DESCR("negate");
DATA(insert OID = 1779 ( int8 PGNSP PGUID 12 f f t f i 1 20 "1700" _null_ _null_ _null_ numeric_int8 - _null_ ));
DESCR("(internal)");
DESCR("format timestamp with time zone to text");
DATA(insert OID = 1772 ( to_char PGNSP PGUID 12 f f t f i 2 25 "1700 25" _null_ _null_ _null_ numeric_to_char - _null_ ));
DESCR("format numeric to text");
-DATA(insert OID = 1773 ( to_char PGNSP PGUID 12 f f t f i 2 25 "23 25" _null_ _null_ _null_ int4_to_char - _null_ ));
+DATA(insert OID = 1773 ( to_char PGNSP PGUID 12 f f t f i 2 25 "23 25" _null_ _null_ _null_ int4_to_char - _null_ ));
DESCR("format int4 to text");
-DATA(insert OID = 1774 ( to_char PGNSP PGUID 12 f f t f i 2 25 "20 25" _null_ _null_ _null_ int8_to_char - _null_ ));
+DATA(insert OID = 1774 ( to_char PGNSP PGUID 12 f f t f i 2 25 "20 25" _null_ _null_ _null_ int8_to_char - _null_ ));
DESCR("format int8 to text");
DATA(insert OID = 1775 ( to_char PGNSP PGUID 12 f f t f i 2 25 "700 25" _null_ _null_ _null_ float4_to_char - _null_ ));
DESCR("format float4 to text");
DESCR("VARIANCE aggregate final function");
DATA(insert OID = 1832 ( float8_stddev PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ _null_ _null_ float8_stddev - _null_ ));
DESCR("STDDEV aggregate final function");
-DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 1700" _null_ _null_ _null_ numeric_accum - _null_ ));
+DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 1700" _null_ _null_ _null_ numeric_accum - _null_ ));
DESCR("aggregate transition function");
DATA(insert OID = 1834 ( int2_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 21" _null_ _null_ _null_ int2_accum - _null_ ));
DESCR("aggregate transition function");
DESCR("aggregate transition function");
DATA(insert OID = 1836 ( int8_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 20" _null_ _null_ _null_ int8_accum - _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 1837 ( numeric_avg PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ _null_ _null_ numeric_avg - _null_ ));
+DATA(insert OID = 1837 ( numeric_avg PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ _null_ _null_ numeric_avg - _null_ ));
DESCR("AVG aggregate final function");
-DATA(insert OID = 1838 ( numeric_variance PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ _null_ _null_ numeric_variance - _null_ ));
+DATA(insert OID = 1838 ( numeric_variance PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ _null_ _null_ numeric_variance - _null_ ));
DESCR("VARIANCE aggregate final function");
-DATA(insert OID = 1839 ( numeric_stddev PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ _null_ _null_ numeric_stddev - _null_ ));
+DATA(insert OID = 1839 ( numeric_stddev PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ _null_ _null_ numeric_stddev - _null_ ));
DESCR("STDDEV aggregate final function");
DATA(insert OID = 1840 ( int2_sum PGNSP PGUID 12 f f f f i 2 20 "20 21" _null_ _null_ _null_ int2_sum - _null_ ));
DESCR("SUM(int2) transition function");
DESCR("SUM(int4) transition function");
DATA(insert OID = 1842 ( int8_sum PGNSP PGUID 12 f f f f i 2 1700 "1700 20" _null_ _null_ _null_ int8_sum - _null_ ));
DESCR("SUM(int8) transition function");
-DATA(insert OID = 1843 ( interval_accum PGNSP PGUID 12 f f t f i 2 1187 "1187 1186" _null_ _null_ _null_ interval_accum - _null_ ));
+DATA(insert OID = 1843 ( interval_accum PGNSP PGUID 12 f f t f i 2 1187 "1187 1186" _null_ _null_ _null_ interval_accum - _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 1844 ( interval_avg PGNSP PGUID 12 f f t f i 1 1186 "1187" _null_ _null_ _null_ interval_avg - _null_ ));
+DATA(insert OID = 1844 ( interval_avg PGNSP PGUID 12 f f t f i 1 1186 "1187" _null_ _null_ _null_ interval_avg - _null_ ));
DESCR("AVG aggregate final function");
DATA(insert OID = 1962 ( int2_avg_accum PGNSP PGUID 12 f f t f i 2 1016 "1016 21" _null_ _null_ _null_ int2_avg_accum - _null_ ));
DESCR("AVG(int2) transition function");
DATA(insert OID = 1963 ( int4_avg_accum PGNSP PGUID 12 f f t f i 2 1016 "1016 23" _null_ _null_ _null_ int4_avg_accum - _null_ ));
DESCR("AVG(int4) transition function");
-DATA(insert OID = 1964 ( int8_avg PGNSP PGUID 12 f f t f i 1 1700 "1016" _null_ _null_ _null_ int8_avg - _null_ ));
+DATA(insert OID = 1964 ( int8_avg PGNSP PGUID 12 f f t f i 1 1700 "1016" _null_ _null_ _null_ int8_avg - _null_ ));
DESCR("AVG(int) aggregate final function");
/* To ASCII conversion */
DATA(insert OID = 1845 ( to_ascii PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ to_ascii_default - _null_ ));
DESCR("encode text from DB encoding to ASCII text");
-DATA(insert OID = 1846 ( to_ascii PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ _null_ _null_ to_ascii_enc - _null_ ));
+DATA(insert OID = 1846 ( to_ascii PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ _null_ _null_ to_ascii_enc - _null_ ));
DESCR("encode text from encoding to ASCII text");
-DATA(insert OID = 1847 ( to_ascii PGNSP PGUID 12 f f t f i 2 25 "25 19" _null_ _null_ _null_ to_ascii_encname - _null_ ));
+DATA(insert OID = 1847 ( to_ascii PGNSP PGUID 12 f f t f i 2 25 "25 19" _null_ _null_ _null_ to_ascii_encname - _null_ ));
DESCR("encode text from encoding to ASCII text");
DATA(insert OID = 1848 ( interval_pl_time PGNSP PGUID 14 f f t f i 2 1083 "1186 1083" _null_ _null_ _null_ "select $2 + $1" - _null_ ));
DESCR("binary or");
DATA(insert OID = 1894 ( int2xor PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ _null_ _null_ int2xor - _null_ ));
DESCR("binary xor");
-DATA(insert OID = 1895 ( int2not PGNSP PGUID 12 f f t f i 1 21 "21" _null_ _null_ _null_ int2not - _null_ ));
+DATA(insert OID = 1895 ( int2not PGNSP PGUID 12 f f t f i 1 21 "21" _null_ _null_ _null_ int2not - _null_ ));
DESCR("binary not");
DATA(insert OID = 1896 ( int2shl PGNSP PGUID 12 f f t f i 2 21 "21 23" _null_ _null_ _null_ int2shl - _null_ ));
DESCR("binary shift left");
DESCR("binary or");
DATA(insert OID = 1900 ( int4xor PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ _null_ _null_ int4xor - _null_ ));
DESCR("binary xor");
-DATA(insert OID = 1901 ( int4not PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4not - _null_ ));
+DATA(insert OID = 1901 ( int4not PGNSP PGUID 12 f f t f i 1 23 "23" _null_ _null_ _null_ int4not - _null_ ));
DESCR("binary not");
DATA(insert OID = 1902 ( int4shl PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ _null_ _null_ int4shl - _null_ ));
DESCR("binary shift left");
DESCR("binary or");
DATA(insert OID = 1906 ( int8xor PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ _null_ _null_ int8xor - _null_ ));
DESCR("binary xor");
-DATA(insert OID = 1907 ( int8not PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8not - _null_ ));
+DATA(insert OID = 1907 ( int8not PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8not - _null_ ));
DESCR("binary not");
DATA(insert OID = 1908 ( int8shl PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ _null_ _null_ int8shl - _null_ ));
DESCR("binary shift left");
DESCR("unary plus");
DATA(insert OID = 1914 ( float8up PGNSP PGUID 12 f f t f i 1 701 "701" _null_ _null_ _null_ float8up - _null_ ));
DESCR("unary plus");
-DATA(insert OID = 1915 ( numeric_uplus PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_uplus - _null_ ));
+DATA(insert OID = 1915 ( numeric_uplus PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ _null_ _null_ numeric_uplus - _null_ ));
DESCR("unary plus");
DATA(insert OID = 1922 ( has_table_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ _null_ _null_ has_table_privilege_name_name - _null_ ));
DESCR("current user privilege on relation by rel oid");
-DATA(insert OID = 1928 ( pg_stat_get_numscans PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_numscans - _null_ ));
+DATA(insert OID = 1928 ( pg_stat_get_numscans PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_numscans - _null_ ));
DESCR("Statistics: Number of scans done for table/index");
-DATA(insert OID = 1929 ( pg_stat_get_tuples_returned PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_returned - _null_ ));
+DATA(insert OID = 1929 ( pg_stat_get_tuples_returned PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_returned - _null_ ));
DESCR("Statistics: Number of tuples read by seqscan");
-DATA(insert OID = 1930 ( pg_stat_get_tuples_fetched PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_fetched - _null_ ));
+DATA(insert OID = 1930 ( pg_stat_get_tuples_fetched PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_fetched - _null_ ));
DESCR("Statistics: Number of tuples fetched by idxscan");
-DATA(insert OID = 1931 ( pg_stat_get_tuples_inserted PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_inserted - _null_ ));
+DATA(insert OID = 1931 ( pg_stat_get_tuples_inserted PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_inserted - _null_ ));
DESCR("Statistics: Number of tuples inserted");
-DATA(insert OID = 1932 ( pg_stat_get_tuples_updated PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_updated - _null_ ));
+DATA(insert OID = 1932 ( pg_stat_get_tuples_updated PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_updated - _null_ ));
DESCR("Statistics: Number of tuples updated");
-DATA(insert OID = 1933 ( pg_stat_get_tuples_deleted PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_deleted - _null_ ));
+DATA(insert OID = 1933 ( pg_stat_get_tuples_deleted PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_tuples_deleted - _null_ ));
DESCR("Statistics: Number of tuples deleted");
-DATA(insert OID = 1934 ( pg_stat_get_blocks_fetched PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_blocks_fetched - _null_ ));
+DATA(insert OID = 1934 ( pg_stat_get_blocks_fetched PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_blocks_fetched - _null_ ));
DESCR("Statistics: Number of blocks fetched");
-DATA(insert OID = 1935 ( pg_stat_get_blocks_hit PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_blocks_hit - _null_ ));
+DATA(insert OID = 1935 ( pg_stat_get_blocks_hit PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_blocks_hit - _null_ ));
DESCR("Statistics: Number of blocks found in cache");
DATA(insert OID = 1936 ( pg_stat_get_backend_idset PGNSP PGUID 12 f f t t s 0 23 "" _null_ _null_ _null_ pg_stat_get_backend_idset - _null_ ));
DESCR("Statistics: Currently active backend IDs");
DESCR("Statistics: Current backend PID");
DATA(insert OID = 2274 ( pg_stat_reset PGNSP PGUID 12 f f f f v 0 16 "" _null_ _null_ _null_ pg_stat_reset - _null_ ));
DESCR("Statistics: Reset collected statistics");
-DATA(insert OID = 1937 ( pg_stat_get_backend_pid PGNSP PGUID 12 f f t f s 1 23 "23" _null_ _null_ _null_ pg_stat_get_backend_pid - _null_ ));
+DATA(insert OID = 1937 ( pg_stat_get_backend_pid PGNSP PGUID 12 f f t f s 1 23 "23" _null_ _null_ _null_ pg_stat_get_backend_pid - _null_ ));
DESCR("Statistics: PID of backend");
-DATA(insert OID = 1938 ( pg_stat_get_backend_dbid PGNSP PGUID 12 f f t f s 1 26 "23" _null_ _null_ _null_ pg_stat_get_backend_dbid - _null_ ));
+DATA(insert OID = 1938 ( pg_stat_get_backend_dbid PGNSP PGUID 12 f f t f s 1 26 "23" _null_ _null_ _null_ pg_stat_get_backend_dbid - _null_ ));
DESCR("Statistics: Database ID of backend");
-DATA(insert OID = 1939 ( pg_stat_get_backend_userid PGNSP PGUID 12 f f t f s 1 26 "23" _null_ _null_ _null_ pg_stat_get_backend_userid - _null_ ));
+DATA(insert OID = 1939 ( pg_stat_get_backend_userid PGNSP PGUID 12 f f t f s 1 26 "23" _null_ _null_ _null_ pg_stat_get_backend_userid - _null_ ));
DESCR("Statistics: User ID of backend");
-DATA(insert OID = 1940 ( pg_stat_get_backend_activity PGNSP PGUID 12 f f t f s 1 25 "23" _null_ _null_ _null_ pg_stat_get_backend_activity - _null_ ));
+DATA(insert OID = 1940 ( pg_stat_get_backend_activity PGNSP PGUID 12 f f t f s 1 25 "23" _null_ _null_ _null_ pg_stat_get_backend_activity - _null_ ));
DESCR("Statistics: Current query of backend");
-DATA(insert OID = 2094 ( pg_stat_get_backend_activity_start PGNSP PGUID 12 f f t f s 1 1184 "23" _null_ _null_ _null_ pg_stat_get_backend_activity_start - _null_));
+DATA(insert OID = 2094 ( pg_stat_get_backend_activity_start PGNSP PGUID 12 f f t f s 1 1184 "23" _null_ _null_ _null_ pg_stat_get_backend_activity_start - _null_));
DESCR("Statistics: Start time for current query of backend");
DATA(insert OID = 1391 ( pg_stat_get_backend_start PGNSP PGUID 12 f f t f s 1 1184 "23" _null_ _null_ _null_ pg_stat_get_backend_start - _null_));
DESCR("Statistics: Start time for current backend session");
DESCR("Statistics: Address of client connected to backend");
DATA(insert OID = 1393 ( pg_stat_get_backend_client_port PGNSP PGUID 12 f f t f s 1 23 "23" _null_ _null_ _null_ pg_stat_get_backend_client_port - _null_));
DESCR("Statistics: Port number of client connected to backend");
-DATA(insert OID = 1941 ( pg_stat_get_db_numbackends PGNSP PGUID 12 f f t f s 1 23 "26" _null_ _null_ _null_ pg_stat_get_db_numbackends - _null_ ));
+DATA(insert OID = 1941 ( pg_stat_get_db_numbackends PGNSP PGUID 12 f f t f s 1 23 "26" _null_ _null_ _null_ pg_stat_get_db_numbackends - _null_ ));
DESCR("Statistics: Number of backends in database");
-DATA(insert OID = 1942 ( pg_stat_get_db_xact_commit PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_db_xact_commit - _null_ ));
+DATA(insert OID = 1942 ( pg_stat_get_db_xact_commit PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_db_xact_commit - _null_ ));
DESCR("Statistics: Transactions committed");
-DATA(insert OID = 1943 ( pg_stat_get_db_xact_rollback PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_db_xact_rollback - _null_ ));
+DATA(insert OID = 1943 ( pg_stat_get_db_xact_rollback PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_db_xact_rollback - _null_ ));
DESCR("Statistics: Transactions rolled back");
-DATA(insert OID = 1944 ( pg_stat_get_db_blocks_fetched PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_db_blocks_fetched - _null_ ));
+DATA(insert OID = 1944 ( pg_stat_get_db_blocks_fetched PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_db_blocks_fetched - _null_ ));
DESCR("Statistics: Blocks fetched for database");
-DATA(insert OID = 1945 ( pg_stat_get_db_blocks_hit PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_db_blocks_hit - _null_ ));
+DATA(insert OID = 1945 ( pg_stat_get_db_blocks_hit PGNSP PGUID 12 f f t f s 1 20 "26" _null_ _null_ _null_ pg_stat_get_db_blocks_hit - _null_ ));
DESCR("Statistics: Blocks found in cache for database");
-DATA(insert OID = 1946 ( encode PGNSP PGUID 12 f f t f i 2 25 "17 25" _null_ _null_ _null_ binary_encode - _null_ ));
+DATA(insert OID = 1946 ( encode PGNSP PGUID 12 f f t f i 2 25 "17 25" _null_ _null_ _null_ binary_encode - _null_ ));
DESCR("Convert bytea value into some ascii-only text string");
-DATA(insert OID = 1947 ( decode PGNSP PGUID 12 f f t f i 2 17 "25 25" _null_ _null_ _null_ binary_decode - _null_ ));
+DATA(insert OID = 1947 ( decode PGNSP PGUID 12 f f t f i 2 17 "25 25" _null_ _null_ _null_ binary_decode - _null_ ));
DESCR("Convert ascii-encoded text string into bytea value");
DATA(insert OID = 1948 ( byteaeq PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ _null_ _null_ byteaeq - _null_ ));
DESCR("does not match LIKE expression");
DATA(insert OID = 2009 ( like_escape PGNSP PGUID 12 f f t f i 2 17 "17 17" _null_ _null_ _null_ like_escape_bytea - _null_ ));
DESCR("convert LIKE pattern to use backslash escapes");
-DATA(insert OID = 2010 ( length PGNSP PGUID 12 f f t f i 1 23 "17" _null_ _null_ _null_ byteaoctetlen - _null_ ));
+DATA(insert OID = 2010 ( length PGNSP PGUID 12 f f t f i 1 23 "17" _null_ _null_ _null_ byteaoctetlen - _null_ ));
DESCR("octet length");
DATA(insert OID = 2011 ( byteacat PGNSP PGUID 12 f f t f i 2 17 "17 17" _null_ _null_ _null_ byteacat - _null_ ));
DESCR("concatenate");
DATA(insert OID = 2015 ( btrim PGNSP PGUID 12 f f t f i 2 17 "17 17" _null_ _null_ _null_ byteatrim - _null_ ));
DESCR("trim both ends of string");
-DATA(insert OID = 2019 ( time PGNSP PGUID 12 f f t f s 1 1083 "1184" _null_ _null_ _null_ timestamptz_time - _null_ ));
+DATA(insert OID = 2019 ( time PGNSP PGUID 12 f f t f s 1 1083 "1184" _null_ _null_ _null_ timestamptz_time - _null_ ));
DESCR("convert timestamptz to time");
-DATA(insert OID = 2020 ( date_trunc PGNSP PGUID 12 f f t f i 2 1114 "25 1114" _null_ _null_ _null_ timestamp_trunc - _null_ ));
+DATA(insert OID = 2020 ( date_trunc PGNSP PGUID 12 f f t f i 2 1114 "25 1114" _null_ _null_ _null_ timestamp_trunc - _null_ ));
DESCR("truncate timestamp to specified units");
-DATA(insert OID = 2021 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1114" _null_ _null_ _null_ timestamp_part - _null_ ));
+DATA(insert OID = 2021 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1114" _null_ _null_ _null_ timestamp_part - _null_ ));
DESCR("extract field from timestamp");
DATA(insert OID = 2022 ( timestamp PGNSP PGUID 12 f f t f s 1 1114 "25" _null_ _null_ _null_ text_timestamp - _null_ ));
DESCR("convert text to timestamp");
-DATA(insert OID = 2023 ( timestamp PGNSP PGUID 12 f f t f s 1 1114 "702" _null_ _null_ _null_ abstime_timestamp - _null_ ));
+DATA(insert OID = 2023 ( timestamp PGNSP PGUID 12 f f t f s 1 1114 "702" _null_ _null_ _null_ abstime_timestamp - _null_ ));
DESCR("convert abstime to timestamp");
-DATA(insert OID = 2024 ( timestamp PGNSP PGUID 12 f f t f i 1 1114 "1082" _null_ _null_ _null_ date_timestamp - _null_ ));
+DATA(insert OID = 2024 ( timestamp PGNSP PGUID 12 f f t f i 1 1114 "1082" _null_ _null_ _null_ date_timestamp - _null_ ));
DESCR("convert date to timestamp");
DATA(insert OID = 2025 ( timestamp PGNSP PGUID 12 f f t f i 2 1114 "1082 1083" _null_ _null_ _null_ datetime_timestamp - _null_ ));
DESCR("convert date and time to timestamp");
-DATA(insert OID = 2027 ( timestamp PGNSP PGUID 12 f f t f s 1 1114 "1184" _null_ _null_ _null_ timestamptz_timestamp - _null_ ));
+DATA(insert OID = 2027 ( timestamp PGNSP PGUID 12 f f t f s 1 1114 "1184" _null_ _null_ _null_ timestamptz_timestamp - _null_ ));
DESCR("convert timestamp with time zone to timestamp");
-DATA(insert OID = 2028 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "1114" _null_ _null_ _null_ timestamp_timestamptz - _null_ ));
+DATA(insert OID = 2028 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "1114" _null_ _null_ _null_ timestamp_timestamptz - _null_ ));
DESCR("convert timestamp to timestamp with time zone");
-DATA(insert OID = 2029 ( date PGNSP PGUID 12 f f t f i 1 1082 "1114" _null_ _null_ _null_ timestamp_date - _null_ ));
+DATA(insert OID = 2029 ( date PGNSP PGUID 12 f f t f i 1 1082 "1114" _null_ _null_ _null_ timestamp_date - _null_ ));
DESCR("convert timestamp to date");
-DATA(insert OID = 2030 ( abstime PGNSP PGUID 12 f f t f s 1 702 "1114" _null_ _null_ _null_ timestamp_abstime - _null_ ));
+DATA(insert OID = 2030 ( abstime PGNSP PGUID 12 f f t f s 1 702 "1114" _null_ _null_ _null_ timestamp_abstime - _null_ ));
DESCR("convert timestamp to abstime");
DATA(insert OID = 2031 ( timestamp_mi PGNSP PGUID 12 f f t f i 2 1186 "1114 1114" _null_ _null_ _null_ timestamp_mi - _null_ ));
DESCR("subtract");
DESCR("plus");
DATA(insert OID = 2033 ( timestamp_mi_interval PGNSP PGUID 12 f f t f i 2 1114 "1114 1186" _null_ _null_ _null_ timestamp_mi_interval - _null_ ));
DESCR("minus");
-DATA(insert OID = 2034 ( text PGNSP PGUID 12 f f t f s 1 25 "1114" _null_ _null_ _null_ timestamp_text - _null_ ));
+DATA(insert OID = 2034 ( text PGNSP PGUID 12 f f t f s 1 25 "1114" _null_ _null_ _null_ timestamp_text - _null_ ));
DESCR("convert timestamp to text");
DATA(insert OID = 2035 ( timestamp_smaller PGNSP PGUID 12 f f t f i 2 1114 "1114 1114" _null_ _null_ _null_ timestamp_smaller - _null_ ));
DESCR("smaller of two");
DATA(insert OID = 2036 ( timestamp_larger PGNSP PGUID 12 f f t f i 2 1114 "1114 1114" _null_ _null_ _null_ timestamp_larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 2037 ( timezone PGNSP PGUID 12 f f t f v 2 1266 "25 1266" _null_ _null_ _null_ timetz_zone - _null_ ));
+DATA(insert OID = 2037 ( timezone PGNSP PGUID 12 f f t f v 2 1266 "25 1266" _null_ _null_ _null_ timetz_zone - _null_ ));
DESCR("adjust time with time zone to new zone");
DATA(insert OID = 2038 ( timezone PGNSP PGUID 12 f f t f i 2 1266 "1186 1266" _null_ _null_ _null_ timetz_izone - _null_ ));
DESCR("adjust time with time zone to new zone");
DESCR("SQL92 interval comparison");
DATA(insert OID = 2044 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1114 1186 1114 1114" _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 2045 ( timestamp_cmp PGNSP PGUID 12 f f t f i 2 23 "1114 1114" _null_ _null_ _null_ timestamp_cmp - _null_ ));
+DATA(insert OID = 2045 ( timestamp_cmp PGNSP PGUID 12 f f t f i 2 23 "1114 1114" _null_ _null_ _null_ timestamp_cmp - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 2046 ( time PGNSP PGUID 12 f f t f i 1 1083 "1266" _null_ _null_ _null_ timetz_time - _null_ ));
+DATA(insert OID = 2046 ( time PGNSP PGUID 12 f f t f i 1 1083 "1266" _null_ _null_ _null_ timetz_time - _null_ ));
DESCR("convert time with time zone to time");
-DATA(insert OID = 2047 ( timetz PGNSP PGUID 12 f f t f s 1 1266 "1083" _null_ _null_ _null_ time_timetz - _null_ ));
+DATA(insert OID = 2047 ( timetz PGNSP PGUID 12 f f t f s 1 1266 "1083" _null_ _null_ _null_ time_timetz - _null_ ));
DESCR("convert time to timetz");
-DATA(insert OID = 2048 ( isfinite PGNSP PGUID 12 f f t f i 1 16 "1114" _null_ _null_ _null_ timestamp_finite - _null_ ));
+DATA(insert OID = 2048 ( isfinite PGNSP PGUID 12 f f t f i 1 16 "1114" _null_ _null_ _null_ timestamp_finite - _null_ ));
DESCR("finite timestamp?");
DATA(insert OID = 2049 ( to_char PGNSP PGUID 12 f f t f i 2 25 "1114 25" _null_ _null_ _null_ timestamp_to_char - _null_ ));
DESCR("format timestamp to text");
-DATA(insert OID = 2052 ( timestamp_eq PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_eq - _null_ ));
+DATA(insert OID = 2052 ( timestamp_eq PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 2053 ( timestamp_ne PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_ne - _null_ ));
+DATA(insert OID = 2053 ( timestamp_ne PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 2054 ( timestamp_lt PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_lt - _null_ ));
+DATA(insert OID = 2054 ( timestamp_lt PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 2055 ( timestamp_le PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_le - _null_ ));
+DATA(insert OID = 2055 ( timestamp_le PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 2056 ( timestamp_ge PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_ge - _null_ ));
+DATA(insert OID = 2056 ( timestamp_ge PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 2057 ( timestamp_gt PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_gt - _null_ ));
+DATA(insert OID = 2057 ( timestamp_gt PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ _null_ _null_ timestamp_gt - _null_ ));
DESCR("greater-than");
DATA(insert OID = 2058 ( age PGNSP PGUID 12 f f t f i 2 1186 "1114 1114" _null_ _null_ _null_ timestamp_age - _null_ ));
DESCR("date difference preserving months and years");
-DATA(insert OID = 2059 ( age PGNSP PGUID 14 f f t f s 1 1186 "1114" _null_ _null_ _null_ "select pg_catalog.age(cast(current_date as timestamp without time zone), $1)" - _null_ ));
+DATA(insert OID = 2059 ( age PGNSP PGUID 14 f f t f s 1 1186 "1114" _null_ _null_ _null_ "select pg_catalog.age(cast(current_date as timestamp without time zone), $1)" - _null_ ));
DESCR("date difference from today preserving months and years");
-DATA(insert OID = 2069 ( timezone PGNSP PGUID 12 f f t f i 2 1184 "25 1114" _null_ _null_ _null_ timestamp_zone - _null_ ));
+DATA(insert OID = 2069 ( timezone PGNSP PGUID 12 f f t f i 2 1184 "25 1114" _null_ _null_ _null_ timestamp_zone - _null_ ));
DESCR("adjust timestamp to new time zone");
DATA(insert OID = 2070 ( timezone PGNSP PGUID 12 f f t f i 2 1184 "1186 1114" _null_ _null_ _null_ timestamp_izone - _null_ ));
DESCR("adjust timestamp to new time zone");
DATA(insert OID = 1065 ( pg_prepared_xact PGNSP PGUID 12 f f t t v 0 2249 "" _null_ _null_ _null_ pg_prepared_xact - _null_ ));
DESCR("view two-phase transactions");
-DATA(insert OID = 2079 ( pg_table_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_table_is_visible - _null_ ));
+DATA(insert OID = 2079 ( pg_table_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_table_is_visible - _null_ ));
DESCR("is table visible in search path?");
-DATA(insert OID = 2080 ( pg_type_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_type_is_visible - _null_ ));
+DATA(insert OID = 2080 ( pg_type_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_type_is_visible - _null_ ));
DESCR("is type visible in search path?");
-DATA(insert OID = 2081 ( pg_function_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_function_is_visible - _null_ ));
+DATA(insert OID = 2081 ( pg_function_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_function_is_visible - _null_ ));
DESCR("is function visible in search path?");
-DATA(insert OID = 2082 ( pg_operator_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_operator_is_visible - _null_ ));
+DATA(insert OID = 2082 ( pg_operator_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_operator_is_visible - _null_ ));
DESCR("is operator visible in search path?");
-DATA(insert OID = 2083 ( pg_opclass_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_opclass_is_visible - _null_ ));
+DATA(insert OID = 2083 ( pg_opclass_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_opclass_is_visible - _null_ ));
DESCR("is opclass visible in search path?");
-DATA(insert OID = 2093 ( pg_conversion_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_conversion_is_visible - _null_ ));
+DATA(insert OID = 2093 ( pg_conversion_is_visible PGNSP PGUID 12 f f t f s 1 16 "26" _null_ _null_ _null_ pg_conversion_is_visible - _null_ ));
DESCR("is conversion visible in search path?");
DATA(insert OID = 2173 ( pg_stop_backup PGNSP PGUID 12 f f t f v 0 25 "" _null_ _null_ _null_ pg_stop_backup - _null_ ));
DESCR("Finish taking an online backup");
-DATA(insert OID = 2621 ( pg_reload_conf PGNSP PGUID 12 f f t f v 0 16 "" _null_ _null_ _null_ pg_reload_conf - _null_ ));
+DATA(insert OID = 2621 ( pg_reload_conf PGNSP PGUID 12 f f t f v 0 16 "" _null_ _null_ _null_ pg_reload_conf - _null_ ));
DESCR("Reload configuration files");
DATA(insert OID = 2622 ( pg_rotate_logfile PGNSP PGUID 12 f f t f v 0 16 "" _null_ _null_ _null_ pg_rotate_logfile - _null_ ));
DESCR("Rotate log file");
-DATA(insert OID = 2623 ( pg_stat_file PGNSP PGUID 12 f f t f v 1 2249 "25" _null_ _null_ _null_ pg_stat_file - _null_ ));
+DATA(insert OID = 2623 ( pg_stat_file PGNSP PGUID 12 f f t f v 1 2249 "25" _null_ _null_ _null_ pg_stat_file - _null_ ));
DESCR("Return file information");
-DATA(insert OID = 2624 ( pg_read_file PGNSP PGUID 12 f f t f v 3 25 "25 20 20" _null_ _null_ _null_ pg_read_file - _null_ ));
+DATA(insert OID = 2624 ( pg_read_file PGNSP PGUID 12 f f t f v 3 25 "25 20 20" _null_ _null_ _null_ pg_read_file - _null_ ));
DESCR("Read text from a file");
-DATA(insert OID = 2625 ( pg_ls_dir PGNSP PGUID 12 f f t t v 1 25 "25" _null_ _null_ _null_ pg_ls_dir - _null_ ));
+DATA(insert OID = 2625 ( pg_ls_dir PGNSP PGUID 12 f f t t v 1 25 "25" _null_ _null_ _null_ pg_ls_dir - _null_ ));
DESCR("List all files in a directory");
-
+
/* Aggregates (moved here from pg_aggregate for 7.3) */
DATA(insert OID = 2100 ( avg PGNSP PGUID 12 t f f f i 1 1700 "20" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2101 ( avg PGNSP PGUID 12 t f f f i 1 1700 "23" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2102 ( avg PGNSP PGUID 12 t f f f i 1 1700 "21" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2103 ( avg PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2103 ( avg PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2104 ( avg PGNSP PGUID 12 t f f f i 1 701 "700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2105 ( avg PGNSP PGUID 12 t f f f i 1 701 "701" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2106 ( avg PGNSP PGUID 12 t f f f i 1 1186 "1186" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2106 ( avg PGNSP PGUID 12 t f f f i 1 1186 "1186" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2107 ( sum PGNSP PGUID 12 t f f f i 1 1700 "20" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2108 ( sum PGNSP PGUID 12 t f f f i 1 20 "23" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2109 ( sum PGNSP PGUID 12 t f f f i 1 20 "21" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2108 ( sum PGNSP PGUID 12 t f f f i 1 20 "23" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2109 ( sum PGNSP PGUID 12 t f f f i 1 20 "21" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2110 ( sum PGNSP PGUID 12 t f f f i 1 700 "700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2111 ( sum PGNSP PGUID 12 t f f f i 1 701 "701" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2112 ( sum PGNSP PGUID 12 t f f f i 1 790 "790" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2113 ( sum PGNSP PGUID 12 t f f f i 1 1186 "1186" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2114 ( sum PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2113 ( sum PGNSP PGUID 12 t f f f i 1 1186 "1186" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2114 ( sum PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2115 ( max PGNSP PGUID 12 t f f f i 1 20 "20" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2116 ( max PGNSP PGUID 12 t f f f i 1 23 "23" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2117 ( max PGNSP PGUID 12 t f f f i 1 21 "21" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2118 ( max PGNSP PGUID 12 t f f f i 1 26 "26" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2115 ( max PGNSP PGUID 12 t f f f i 1 20 "20" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2116 ( max PGNSP PGUID 12 t f f f i 1 23 "23" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2117 ( max PGNSP PGUID 12 t f f f i 1 21 "21" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2118 ( max PGNSP PGUID 12 t f f f i 1 26 "26" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2119 ( max PGNSP PGUID 12 t f f f i 1 700 "700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2120 ( max PGNSP PGUID 12 t f f f i 1 701 "701" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2121 ( max PGNSP PGUID 12 t f f f i 1 702 "702" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2122 ( max PGNSP PGUID 12 t f f f i 1 1082 "1082" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2123 ( max PGNSP PGUID 12 t f f f i 1 1083 "1083" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2124 ( max PGNSP PGUID 12 t f f f i 1 1266 "1266" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2122 ( max PGNSP PGUID 12 t f f f i 1 1082 "1082" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2123 ( max PGNSP PGUID 12 t f f f i 1 1083 "1083" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2124 ( max PGNSP PGUID 12 t f f f i 1 1266 "1266" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2125 ( max PGNSP PGUID 12 t f f f i 1 790 "790" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2126 ( max PGNSP PGUID 12 t f f f i 1 1114 "1114" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2127 ( max PGNSP PGUID 12 t f f f i 1 1184 "1184" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2128 ( max PGNSP PGUID 12 t f f f i 1 1186 "1186" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2129 ( max PGNSP PGUID 12 t f f f i 1 25 "25" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2130 ( max PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2050 ( max PGNSP PGUID 12 t f f f i 1 2277 "2277" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2244 ( max PGNSP PGUID 12 t f f f i 1 1042 "1042" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-
-DATA(insert OID = 2131 ( min PGNSP PGUID 12 t f f f i 1 20 "20" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2132 ( min PGNSP PGUID 12 t f f f i 1 23 "23" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2133 ( min PGNSP PGUID 12 t f f f i 1 21 "21" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2134 ( min PGNSP PGUID 12 t f f f i 1 26 "26" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2126 ( max PGNSP PGUID 12 t f f f i 1 1114 "1114" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2127 ( max PGNSP PGUID 12 t f f f i 1 1184 "1184" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2128 ( max PGNSP PGUID 12 t f f f i 1 1186 "1186" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2129 ( max PGNSP PGUID 12 t f f f i 1 25 "25" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2130 ( max PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2050 ( max PGNSP PGUID 12 t f f f i 1 2277 "2277" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2244 ( max PGNSP PGUID 12 t f f f i 1 1042 "1042" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+
+DATA(insert OID = 2131 ( min PGNSP PGUID 12 t f f f i 1 20 "20" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2132 ( min PGNSP PGUID 12 t f f f i 1 23 "23" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2133 ( min PGNSP PGUID 12 t f f f i 1 21 "21" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2134 ( min PGNSP PGUID 12 t f f f i 1 26 "26" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2135 ( min PGNSP PGUID 12 t f f f i 1 700 "700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2136 ( min PGNSP PGUID 12 t f f f i 1 701 "701" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2137 ( min PGNSP PGUID 12 t f f f i 1 702 "702" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2138 ( min PGNSP PGUID 12 t f f f i 1 1082 "1082" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2139 ( min PGNSP PGUID 12 t f f f i 1 1083 "1083" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2140 ( min PGNSP PGUID 12 t f f f i 1 1266 "1266" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2138 ( min PGNSP PGUID 12 t f f f i 1 1082 "1082" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2139 ( min PGNSP PGUID 12 t f f f i 1 1083 "1083" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2140 ( min PGNSP PGUID 12 t f f f i 1 1266 "1266" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2141 ( min PGNSP PGUID 12 t f f f i 1 790 "790" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2142 ( min PGNSP PGUID 12 t f f f i 1 1114 "1114" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2143 ( min PGNSP PGUID 12 t f f f i 1 1184 "1184" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2144 ( min PGNSP PGUID 12 t f f f i 1 1186 "1186" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2145 ( min PGNSP PGUID 12 t f f f i 1 25 "25" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2146 ( min PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2051 ( min PGNSP PGUID 12 t f f f i 1 2277 "2277" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2245 ( min PGNSP PGUID 12 t f f f i 1 1042 "1042" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2142 ( min PGNSP PGUID 12 t f f f i 1 1114 "1114" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2143 ( min PGNSP PGUID 12 t f f f i 1 1184 "1184" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2144 ( min PGNSP PGUID 12 t f f f i 1 1186 "1186" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2145 ( min PGNSP PGUID 12 t f f f i 1 25 "25" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2146 ( min PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2051 ( min PGNSP PGUID 12 t f f f i 1 2277 "2277" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2245 ( min PGNSP PGUID 12 t f f f i 1 1042 "1042" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2147 ( count PGNSP PGUID 12 t f f f i 1 20 "2276" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2150 ( variance PGNSP PGUID 12 t f f f i 1 1700 "21" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2151 ( variance PGNSP PGUID 12 t f f f i 1 701 "700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2152 ( variance PGNSP PGUID 12 t f f f i 1 701 "701" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2153 ( variance PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2153 ( variance PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2154 ( stddev PGNSP PGUID 12 t f f f i 1 1700 "20" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2155 ( stddev PGNSP PGUID 12 t f f f i 1 1700 "23" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2156 ( stddev PGNSP PGUID 12 t f f f i 1 1700 "21" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2157 ( stddev PGNSP PGUID 12 t f f f i 1 701 "700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2158 ( stddev PGNSP PGUID 12 t f f f i 1 701 "701" _null_ _null_ _null_ aggregate_dummy - _null_ ));
-DATA(insert OID = 2159 ( stddev PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2159 ( stddev PGNSP PGUID 12 t f f f i 1 1700 "1700" _null_ _null_ _null_ aggregate_dummy - _null_ ));
DATA(insert OID = 2160 ( text_pattern_lt PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ _null_ _null_ text_pattern_lt - _null_ ));
DATA(insert OID = 2161 ( text_pattern_le PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ _null_ _null_ text_pattern_le - _null_ ));
DATA(insert OID = 2195 ( btfloat84cmp PGNSP PGUID 12 f f t f i 2 23 "701 700" _null_ _null_ _null_ btfloat84cmp - _null_ ));
-DATA(insert OID = 2212 ( regprocedurein PGNSP PGUID 12 f f t f s 1 2202 "2275" _null_ _null_ _null_ regprocedurein - _null_ ));
+DATA(insert OID = 2212 ( regprocedurein PGNSP PGUID 12 f f t f s 1 2202 "2275" _null_ _null_ _null_ regprocedurein - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2213 ( regprocedureout PGNSP PGUID 12 f f t f s 1 2275 "2202" _null_ _null_ _null_ regprocedureout - _null_ ));
+DATA(insert OID = 2213 ( regprocedureout PGNSP PGUID 12 f f t f s 1 2275 "2202" _null_ _null_ _null_ regprocedureout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2214 ( regoperin PGNSP PGUID 12 f f t f s 1 2203 "2275" _null_ _null_ _null_ regoperin - _null_ ));
+DATA(insert OID = 2214 ( regoperin PGNSP PGUID 12 f f t f s 1 2203 "2275" _null_ _null_ _null_ regoperin - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2215 ( regoperout PGNSP PGUID 12 f f t f s 1 2275 "2203" _null_ _null_ _null_ regoperout - _null_ ));
+DATA(insert OID = 2215 ( regoperout PGNSP PGUID 12 f f t f s 1 2275 "2203" _null_ _null_ _null_ regoperout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2216 ( regoperatorin PGNSP PGUID 12 f f t f s 1 2204 "2275" _null_ _null_ _null_ regoperatorin - _null_ ));
+DATA(insert OID = 2216 ( regoperatorin PGNSP PGUID 12 f f t f s 1 2204 "2275" _null_ _null_ _null_ regoperatorin - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2217 ( regoperatorout PGNSP PGUID 12 f f t f s 1 2275 "2204" _null_ _null_ _null_ regoperatorout - _null_ ));
+DATA(insert OID = 2217 ( regoperatorout PGNSP PGUID 12 f f t f s 1 2275 "2204" _null_ _null_ _null_ regoperatorout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2218 ( regclassin PGNSP PGUID 12 f f t f s 1 2205 "2275" _null_ _null_ _null_ regclassin - _null_ ));
+DATA(insert OID = 2218 ( regclassin PGNSP PGUID 12 f f t f s 1 2205 "2275" _null_ _null_ _null_ regclassin - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2219 ( regclassout PGNSP PGUID 12 f f t f s 1 2275 "2205" _null_ _null_ _null_ regclassout - _null_ ));
+DATA(insert OID = 2219 ( regclassout PGNSP PGUID 12 f f t f s 1 2275 "2205" _null_ _null_ _null_ regclassout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2220 ( regtypein PGNSP PGUID 12 f f t f s 1 2206 "2275" _null_ _null_ _null_ regtypein - _null_ ));
+DATA(insert OID = 2220 ( regtypein PGNSP PGUID 12 f f t f s 1 2206 "2275" _null_ _null_ _null_ regtypein - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2221 ( regtypeout PGNSP PGUID 12 f f t f s 1 2275 "2206" _null_ _null_ _null_ regtypeout - _null_ ));
+DATA(insert OID = 2221 ( regtypeout PGNSP PGUID 12 f f t f s 1 2275 "2206" _null_ _null_ _null_ regtypeout - _null_ ));
DESCR("I/O");
DATA(insert OID = 1079 ( regclass PGNSP PGUID 12 f f t f s 1 2205 "25" _null_ _null_ _null_ text_regclass - _null_ ));
DESCR("convert text to regclass");
DATA(insert OID = 2246 ( fmgr_internal_validator PGNSP PGUID 12 f f t f s 1 2278 "26" _null_ _null_ _null_ fmgr_internal_validator - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 2247 ( fmgr_c_validator PGNSP PGUID 12 f f t f s 1 2278 "26" _null_ _null_ _null_ fmgr_c_validator - _null_ ));
+DATA(insert OID = 2247 ( fmgr_c_validator PGNSP PGUID 12 f f t f s 1 2278 "26" _null_ _null_ _null_ fmgr_c_validator - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 2248 ( fmgr_sql_validator PGNSP PGUID 12 f f t f s 1 2278 "26" _null_ _null_ _null_ fmgr_sql_validator - _null_ ));
+DATA(insert OID = 2248 ( fmgr_sql_validator PGNSP PGUID 12 f f t f s 1 2278 "26" _null_ _null_ _null_ fmgr_sql_validator - _null_ ));
DESCR("(internal)");
DATA(insert OID = 2250 ( has_database_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ _null_ _null_ has_database_privilege_name_name - _null_ ));
DESCR("Calculate total disk space usage for the specified table and associated indexes and toast tables");
DATA(insert OID = 2287 ( pg_total_relation_size PGNSP PGUID 12 f f t f v 1 20 "25" _null_ _null_ _null_ pg_total_relation_size_name - _null_ ));
DESCR("Calculate total disk space usage for the specified table and associated indexes and toast tables");
-DATA(insert OID = 2288 ( pg_size_pretty PGNSP PGUID 12 f f t f v 1 25 "20" _null_ _null_ _null_ pg_size_pretty - _null_ ));
+DATA(insert OID = 2288 ( pg_size_pretty PGNSP PGUID 12 f f t f v 1 25 "20" _null_ _null_ _null_ pg_size_pretty - _null_ ));
DESCR("Convert a long int to a human readable text using size units");
DATA(insert OID = 2390 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ _null_ _null_ has_tablespace_privilege_name_name - _null_ ));
DATA(insert OID = 2290 ( record_in PGNSP PGUID 12 f f t f v 3 2249 "2275 26 23" _null_ _null_ _null_ record_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2291 ( record_out PGNSP PGUID 12 f f t f v 1 2275 "2249" _null_ _null_ _null_ record_out - _null_ ));
+DATA(insert OID = 2291 ( record_out PGNSP PGUID 12 f f t f v 1 2275 "2249" _null_ _null_ _null_ record_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2292 ( cstring_in PGNSP PGUID 12 f f t f i 1 2275 "2275" _null_ _null_ _null_ cstring_in - _null_ ));
+DATA(insert OID = 2292 ( cstring_in PGNSP PGUID 12 f f t f i 1 2275 "2275" _null_ _null_ _null_ cstring_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2293 ( cstring_out PGNSP PGUID 12 f f t f i 1 2275 "2275" _null_ _null_ _null_ cstring_out - _null_ ));
+DATA(insert OID = 2293 ( cstring_out PGNSP PGUID 12 f f t f i 1 2275 "2275" _null_ _null_ _null_ cstring_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2294 ( any_in PGNSP PGUID 12 f f t f i 1 2276 "2275" _null_ _null_ _null_ any_in - _null_ ));
+DATA(insert OID = 2294 ( any_in PGNSP PGUID 12 f f t f i 1 2276 "2275" _null_ _null_ _null_ any_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2295 ( any_out PGNSP PGUID 12 f f t f i 1 2275 "2276" _null_ _null_ _null_ any_out - _null_ ));
+DATA(insert OID = 2295 ( any_out PGNSP PGUID 12 f f t f i 1 2275 "2276" _null_ _null_ _null_ any_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2296 ( anyarray_in PGNSP PGUID 12 f f t f i 1 2277 "2275" _null_ _null_ _null_ anyarray_in - _null_ ));
+DATA(insert OID = 2296 ( anyarray_in PGNSP PGUID 12 f f t f i 1 2277 "2275" _null_ _null_ _null_ anyarray_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2297 ( anyarray_out PGNSP PGUID 12 f f t f s 1 2275 "2277" _null_ _null_ _null_ anyarray_out - _null_ ));
+DATA(insert OID = 2297 ( anyarray_out PGNSP PGUID 12 f f t f s 1 2275 "2277" _null_ _null_ _null_ anyarray_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2298 ( void_in PGNSP PGUID 12 f f t f i 1 2278 "2275" _null_ _null_ _null_ void_in - _null_ ));
+DATA(insert OID = 2298 ( void_in PGNSP PGUID 12 f f t f i 1 2278 "2275" _null_ _null_ _null_ void_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2299 ( void_out PGNSP PGUID 12 f f t f i 1 2275 "2278" _null_ _null_ _null_ void_out - _null_ ));
+DATA(insert OID = 2299 ( void_out PGNSP PGUID 12 f f t f i 1 2275 "2278" _null_ _null_ _null_ void_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2300 ( trigger_in PGNSP PGUID 12 f f t f i 1 2279 "2275" _null_ _null_ _null_ trigger_in - _null_ ));
+DATA(insert OID = 2300 ( trigger_in PGNSP PGUID 12 f f t f i 1 2279 "2275" _null_ _null_ _null_ trigger_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2301 ( trigger_out PGNSP PGUID 12 f f t f i 1 2275 "2279" _null_ _null_ _null_ trigger_out - _null_ ));
+DATA(insert OID = 2301 ( trigger_out PGNSP PGUID 12 f f t f i 1 2275 "2279" _null_ _null_ _null_ trigger_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2302 ( language_handler_in PGNSP PGUID 12 f f t f i 1 2280 "2275" _null_ _null_ _null_ language_handler_in - _null_ ));
+DATA(insert OID = 2302 ( language_handler_in PGNSP PGUID 12 f f t f i 1 2280 "2275" _null_ _null_ _null_ language_handler_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2303 ( language_handler_out PGNSP PGUID 12 f f t f i 1 2275 "2280" _null_ _null_ _null_ language_handler_out - _null_ ));
+DATA(insert OID = 2303 ( language_handler_out PGNSP PGUID 12 f f t f i 1 2275 "2280" _null_ _null_ _null_ language_handler_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2304 ( internal_in PGNSP PGUID 12 f f t f i 1 2281 "2275" _null_ _null_ _null_ internal_in - _null_ ));
+DATA(insert OID = 2304 ( internal_in PGNSP PGUID 12 f f t f i 1 2281 "2275" _null_ _null_ _null_ internal_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2305 ( internal_out PGNSP PGUID 12 f f t f i 1 2275 "2281" _null_ _null_ _null_ internal_out - _null_ ));
+DATA(insert OID = 2305 ( internal_out PGNSP PGUID 12 f f t f i 1 2275 "2281" _null_ _null_ _null_ internal_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2306 ( opaque_in PGNSP PGUID 12 f f t f i 1 2282 "2275" _null_ _null_ _null_ opaque_in - _null_ ));
+DATA(insert OID = 2306 ( opaque_in PGNSP PGUID 12 f f t f i 1 2282 "2275" _null_ _null_ _null_ opaque_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2307 ( opaque_out PGNSP PGUID 12 f f t f i 1 2275 "2282" _null_ _null_ _null_ opaque_out - _null_ ));
+DATA(insert OID = 2307 ( opaque_out PGNSP PGUID 12 f f t f i 1 2275 "2282" _null_ _null_ _null_ opaque_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2312 ( anyelement_in PGNSP PGUID 12 f f t f i 1 2283 "2275" _null_ _null_ _null_ anyelement_in - _null_ ));
+DATA(insert OID = 2312 ( anyelement_in PGNSP PGUID 12 f f t f i 1 2283 "2275" _null_ _null_ _null_ anyelement_in - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2313 ( anyelement_out PGNSP PGUID 12 f f t f i 1 2275 "2283" _null_ _null_ _null_ anyelement_out - _null_ ));
+DATA(insert OID = 2313 ( anyelement_out PGNSP PGUID 12 f f t f i 1 2275 "2283" _null_ _null_ _null_ anyelement_out - _null_ ));
DESCR("I/O");
/* cryptographic */
-DATA(insert OID = 2311 ( md5 PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ md5_text - _null_ ));
+DATA(insert OID = 2311 ( md5 PGNSP PGUID 12 f f t f i 1 25 "25" _null_ _null_ _null_ md5_text - _null_ ));
DESCR("calculates md5 hash");
-DATA(insert OID = 2321 ( md5 PGNSP PGUID 12 f f t f i 1 25 "17" _null_ _null_ _null_ md5_bytea - _null_ ));
+DATA(insert OID = 2321 ( md5 PGNSP PGUID 12 f f t f i 1 25 "17" _null_ _null_ _null_ md5_bytea - _null_ ));
DESCR("calculates md5 hash");
/* crosstype operations for date vs. timestamp and timestamptz */
DESCR("I/O");
DATA(insert OID = 2404 ( int2recv PGNSP PGUID 12 f f t f i 1 21 "2281" _null_ _null_ _null_ int2recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2405 ( int2send PGNSP PGUID 12 f f t f i 1 17 "21" _null_ _null_ _null_ int2send - _null_ ));
+DATA(insert OID = 2405 ( int2send PGNSP PGUID 12 f f t f i 1 17 "21" _null_ _null_ _null_ int2send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2406 ( int4recv PGNSP PGUID 12 f f t f i 1 23 "2281" _null_ _null_ _null_ int4recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2407 ( int4send PGNSP PGUID 12 f f t f i 1 17 "23" _null_ _null_ _null_ int4send - _null_ ));
+DATA(insert OID = 2407 ( int4send PGNSP PGUID 12 f f t f i 1 17 "23" _null_ _null_ _null_ int4send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2408 ( int8recv PGNSP PGUID 12 f f t f i 1 20 "2281" _null_ _null_ _null_ int8recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2409 ( int8send PGNSP PGUID 12 f f t f i 1 17 "20" _null_ _null_ _null_ int8send - _null_ ));
+DATA(insert OID = 2409 ( int8send PGNSP PGUID 12 f f t f i 1 17 "20" _null_ _null_ _null_ int8send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2410 ( int2vectorrecv PGNSP PGUID 12 f f t f i 1 22 "2281" _null_ _null_ _null_ int2vectorrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2411 ( int2vectorsend PGNSP PGUID 12 f f t f i 1 17 "22" _null_ _null_ _null_ int2vectorsend - _null_ ));
+DATA(insert OID = 2411 ( int2vectorsend PGNSP PGUID 12 f f t f i 1 17 "22" _null_ _null_ _null_ int2vectorsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2412 ( bytearecv PGNSP PGUID 12 f f t f i 1 17 "2281" _null_ _null_ _null_ bytearecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2413 ( byteasend PGNSP PGUID 12 f f t f i 1 17 "17" _null_ _null_ _null_ byteasend - _null_ ));
+DATA(insert OID = 2413 ( byteasend PGNSP PGUID 12 f f t f i 1 17 "17" _null_ _null_ _null_ byteasend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2414 ( textrecv PGNSP PGUID 12 f f t f s 1 25 "2281" _null_ _null_ _null_ textrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2415 ( textsend PGNSP PGUID 12 f f t f s 1 17 "25" _null_ _null_ _null_ textsend - _null_ ));
+DATA(insert OID = 2415 ( textsend PGNSP PGUID 12 f f t f s 1 17 "25" _null_ _null_ _null_ textsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2416 ( unknownrecv PGNSP PGUID 12 f f t f i 1 705 "2281" _null_ _null_ _null_ unknownrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2417 ( unknownsend PGNSP PGUID 12 f f t f i 1 17 "705" _null_ _null_ _null_ unknownsend - _null_ ));
+DATA(insert OID = 2417 ( unknownsend PGNSP PGUID 12 f f t f i 1 17 "705" _null_ _null_ _null_ unknownsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2418 ( oidrecv PGNSP PGUID 12 f f t f i 1 26 "2281" _null_ _null_ _null_ oidrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2419 ( oidsend PGNSP PGUID 12 f f t f i 1 17 "26" _null_ _null_ _null_ oidsend - _null_ ));
+DATA(insert OID = 2419 ( oidsend PGNSP PGUID 12 f f t f i 1 17 "26" _null_ _null_ _null_ oidsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2420 ( oidvectorrecv PGNSP PGUID 12 f f t f i 1 30 "2281" _null_ _null_ _null_ oidvectorrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2421 ( oidvectorsend PGNSP PGUID 12 f f t f i 1 17 "30" _null_ _null_ _null_ oidvectorsend - _null_ ));
+DATA(insert OID = 2421 ( oidvectorsend PGNSP PGUID 12 f f t f i 1 17 "30" _null_ _null_ _null_ oidvectorsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2422 ( namerecv PGNSP PGUID 12 f f t f s 1 19 "2281" _null_ _null_ _null_ namerecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2423 ( namesend PGNSP PGUID 12 f f t f s 1 17 "19" _null_ _null_ _null_ namesend - _null_ ));
+DATA(insert OID = 2423 ( namesend PGNSP PGUID 12 f f t f s 1 17 "19" _null_ _null_ _null_ namesend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2424 ( float4recv PGNSP PGUID 12 f f t f i 1 700 "2281" _null_ _null_ _null_ float4recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2425 ( float4send PGNSP PGUID 12 f f t f i 1 17 "700" _null_ _null_ _null_ float4send - _null_ ));
+DATA(insert OID = 2425 ( float4send PGNSP PGUID 12 f f t f i 1 17 "700" _null_ _null_ _null_ float4send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2426 ( float8recv PGNSP PGUID 12 f f t f i 1 701 "2281" _null_ _null_ _null_ float8recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2427 ( float8send PGNSP PGUID 12 f f t f i 1 17 "701" _null_ _null_ _null_ float8send - _null_ ));
+DATA(insert OID = 2427 ( float8send PGNSP PGUID 12 f f t f i 1 17 "701" _null_ _null_ _null_ float8send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2428 ( point_recv PGNSP PGUID 12 f f t f i 1 600 "2281" _null_ _null_ _null_ point_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2429 ( point_send PGNSP PGUID 12 f f t f i 1 17 "600" _null_ _null_ _null_ point_send - _null_ ));
+DATA(insert OID = 2429 ( point_send PGNSP PGUID 12 f f t f i 1 17 "600" _null_ _null_ _null_ point_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2430 ( bpcharrecv PGNSP PGUID 12 f f t f s 3 1042 "2281 26 23" _null_ _null_ _null_ bpcharrecv - _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2434 ( charrecv PGNSP PGUID 12 f f t f i 1 18 "2281" _null_ _null_ _null_ charrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2435 ( charsend PGNSP PGUID 12 f f t f i 1 17 "18" _null_ _null_ _null_ charsend - _null_ ));
+DATA(insert OID = 2435 ( charsend PGNSP PGUID 12 f f t f i 1 17 "18" _null_ _null_ _null_ charsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2436 ( boolrecv PGNSP PGUID 12 f f t f i 1 16 "2281" _null_ _null_ _null_ boolrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2437 ( boolsend PGNSP PGUID 12 f f t f i 1 17 "16" _null_ _null_ _null_ boolsend - _null_ ));
+DATA(insert OID = 2437 ( boolsend PGNSP PGUID 12 f f t f i 1 17 "16" _null_ _null_ _null_ boolsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2438 ( tidrecv PGNSP PGUID 12 f f t f i 1 27 "2281" _null_ _null_ _null_ tidrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2439 ( tidsend PGNSP PGUID 12 f f t f i 1 17 "27" _null_ _null_ _null_ tidsend - _null_ ));
+DATA(insert OID = 2439 ( tidsend PGNSP PGUID 12 f f t f i 1 17 "27" _null_ _null_ _null_ tidsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2440 ( xidrecv PGNSP PGUID 12 f f t f i 1 28 "2281" _null_ _null_ _null_ xidrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2441 ( xidsend PGNSP PGUID 12 f f t f i 1 17 "28" _null_ _null_ _null_ xidsend - _null_ ));
+DATA(insert OID = 2441 ( xidsend PGNSP PGUID 12 f f t f i 1 17 "28" _null_ _null_ _null_ xidsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2442 ( cidrecv PGNSP PGUID 12 f f t f i 1 29 "2281" _null_ _null_ _null_ cidrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2443 ( cidsend PGNSP PGUID 12 f f t f i 1 17 "29" _null_ _null_ _null_ cidsend - _null_ ));
+DATA(insert OID = 2443 ( cidsend PGNSP PGUID 12 f f t f i 1 17 "29" _null_ _null_ _null_ cidsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2444 ( regprocrecv PGNSP PGUID 12 f f t f i 1 24 "2281" _null_ _null_ _null_ regprocrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2445 ( regprocsend PGNSP PGUID 12 f f t f i 1 17 "24" _null_ _null_ _null_ regprocsend - _null_ ));
+DATA(insert OID = 2445 ( regprocsend PGNSP PGUID 12 f f t f i 1 17 "24" _null_ _null_ _null_ regprocsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2446 ( regprocedurerecv PGNSP PGUID 12 f f t f i 1 2202 "2281" _null_ _null_ _null_ regprocedurerecv - _null_ ));
+DATA(insert OID = 2446 ( regprocedurerecv PGNSP PGUID 12 f f t f i 1 2202 "2281" _null_ _null_ _null_ regprocedurerecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2447 ( regproceduresend PGNSP PGUID 12 f f t f i 1 17 "2202" _null_ _null_ _null_ regproceduresend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2448 ( regoperrecv PGNSP PGUID 12 f f t f i 1 2203 "2281" _null_ _null_ _null_ regoperrecv - _null_ ));
+DATA(insert OID = 2448 ( regoperrecv PGNSP PGUID 12 f f t f i 1 2203 "2281" _null_ _null_ _null_ regoperrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2449 ( regopersend PGNSP PGUID 12 f f t f i 1 17 "2203" _null_ _null_ _null_ regopersend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2450 ( regoperatorrecv PGNSP PGUID 12 f f t f i 1 2204 "2281" _null_ _null_ _null_ regoperatorrecv - _null_ ));
+DATA(insert OID = 2450 ( regoperatorrecv PGNSP PGUID 12 f f t f i 1 2204 "2281" _null_ _null_ _null_ regoperatorrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2451 ( regoperatorsend PGNSP PGUID 12 f f t f i 1 17 "2204" _null_ _null_ _null_ regoperatorsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2452 ( regclassrecv PGNSP PGUID 12 f f t f i 1 2205 "2281" _null_ _null_ _null_ regclassrecv - _null_ ));
+DATA(insert OID = 2452 ( regclassrecv PGNSP PGUID 12 f f t f i 1 2205 "2281" _null_ _null_ _null_ regclassrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2453 ( regclasssend PGNSP PGUID 12 f f t f i 1 17 "2205" _null_ _null_ _null_ regclasssend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2454 ( regtyperecv PGNSP PGUID 12 f f t f i 1 2206 "2281" _null_ _null_ _null_ regtyperecv - _null_ ));
+DATA(insert OID = 2454 ( regtyperecv PGNSP PGUID 12 f f t f i 1 2206 "2281" _null_ _null_ _null_ regtyperecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2455 ( regtypesend PGNSP PGUID 12 f f t f i 1 17 "2206" _null_ _null_ _null_ regtypesend - _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2462 ( abstimerecv PGNSP PGUID 12 f f t f i 1 702 "2281" _null_ _null_ _null_ abstimerecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2463 ( abstimesend PGNSP PGUID 12 f f t f i 1 17 "702" _null_ _null_ _null_ abstimesend - _null_ ));
+DATA(insert OID = 2463 ( abstimesend PGNSP PGUID 12 f f t f i 1 17 "702" _null_ _null_ _null_ abstimesend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2464 ( reltimerecv PGNSP PGUID 12 f f t f i 1 703 "2281" _null_ _null_ _null_ reltimerecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2465 ( reltimesend PGNSP PGUID 12 f f t f i 1 17 "703" _null_ _null_ _null_ reltimesend - _null_ ));
+DATA(insert OID = 2465 ( reltimesend PGNSP PGUID 12 f f t f i 1 17 "703" _null_ _null_ _null_ reltimesend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2466 ( tintervalrecv PGNSP PGUID 12 f f t f i 1 704 "2281" _null_ _null_ _null_ tintervalrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2467 ( tintervalsend PGNSP PGUID 12 f f t f i 1 17 "704" _null_ _null_ _null_ tintervalsend - _null_ ));
+DATA(insert OID = 2467 ( tintervalsend PGNSP PGUID 12 f f t f i 1 17 "704" _null_ _null_ _null_ tintervalsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2468 ( date_recv PGNSP PGUID 12 f f t f i 1 1082 "2281" _null_ _null_ _null_ date_recv - _null_ ));
+DATA(insert OID = 2468 ( date_recv PGNSP PGUID 12 f f t f i 1 1082 "2281" _null_ _null_ _null_ date_recv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2469 ( date_send PGNSP PGUID 12 f f t f i 1 17 "1082" _null_ _null_ _null_ date_send - _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2480 ( lseg_recv PGNSP PGUID 12 f f t f i 1 601 "2281" _null_ _null_ _null_ lseg_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2481 ( lseg_send PGNSP PGUID 12 f f t f i 1 17 "601" _null_ _null_ _null_ lseg_send - _null_ ));
+DATA(insert OID = 2481 ( lseg_send PGNSP PGUID 12 f f t f i 1 17 "601" _null_ _null_ _null_ lseg_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2482 ( path_recv PGNSP PGUID 12 f f t f i 1 602 "2281" _null_ _null_ _null_ path_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2483 ( path_send PGNSP PGUID 12 f f t f i 1 17 "602" _null_ _null_ _null_ path_send - _null_ ));
+DATA(insert OID = 2483 ( path_send PGNSP PGUID 12 f f t f i 1 17 "602" _null_ _null_ _null_ path_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2484 ( box_recv PGNSP PGUID 12 f f t f i 1 603 "2281" _null_ _null_ _null_ box_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2485 ( box_send PGNSP PGUID 12 f f t f i 1 17 "603" _null_ _null_ _null_ box_send - _null_ ));
+DATA(insert OID = 2485 ( box_send PGNSP PGUID 12 f f t f i 1 17 "603" _null_ _null_ _null_ box_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2486 ( poly_recv PGNSP PGUID 12 f f t f i 1 604 "2281" _null_ _null_ _null_ poly_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2487 ( poly_send PGNSP PGUID 12 f f t f i 1 17 "604" _null_ _null_ _null_ poly_send - _null_ ));
+DATA(insert OID = 2487 ( poly_send PGNSP PGUID 12 f f t f i 1 17 "604" _null_ _null_ _null_ poly_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2488 ( line_recv PGNSP PGUID 12 f f t f i 1 628 "2281" _null_ _null_ _null_ line_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2489 ( line_send PGNSP PGUID 12 f f t f i 1 17 "628" _null_ _null_ _null_ line_send - _null_ ));
+DATA(insert OID = 2489 ( line_send PGNSP PGUID 12 f f t f i 1 17 "628" _null_ _null_ _null_ line_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2490 ( circle_recv PGNSP PGUID 12 f f t f i 1 718 "2281" _null_ _null_ _null_ circle_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2491 ( circle_send PGNSP PGUID 12 f f t f i 1 17 "718" _null_ _null_ _null_ circle_send - _null_ ));
+DATA(insert OID = 2491 ( circle_send PGNSP PGUID 12 f f t f i 1 17 "718" _null_ _null_ _null_ circle_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2492 ( cash_recv PGNSP PGUID 12 f f t f i 1 790 "2281" _null_ _null_ _null_ cash_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2493 ( cash_send PGNSP PGUID 12 f f t f i 1 17 "790" _null_ _null_ _null_ cash_send - _null_ ));
+DATA(insert OID = 2493 ( cash_send PGNSP PGUID 12 f f t f i 1 17 "790" _null_ _null_ _null_ cash_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2494 ( macaddr_recv PGNSP PGUID 12 f f t f i 1 829 "2281" _null_ _null_ _null_ macaddr_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2495 ( macaddr_send PGNSP PGUID 12 f f t f i 1 17 "829" _null_ _null_ _null_ macaddr_send - _null_ ));
+DATA(insert OID = 2495 ( macaddr_send PGNSP PGUID 12 f f t f i 1 17 "829" _null_ _null_ _null_ macaddr_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2496 ( inet_recv PGNSP PGUID 12 f f t f i 1 869 "2281" _null_ _null_ _null_ inet_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2497 ( inet_send PGNSP PGUID 12 f f t f i 1 17 "869" _null_ _null_ _null_ inet_send - _null_ ));
+DATA(insert OID = 2497 ( inet_send PGNSP PGUID 12 f f t f i 1 17 "869" _null_ _null_ _null_ inet_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2498 ( cidr_recv PGNSP PGUID 12 f f t f i 1 650 "2281" _null_ _null_ _null_ cidr_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2499 ( cidr_send PGNSP PGUID 12 f f t f i 1 17 "650" _null_ _null_ _null_ cidr_send - _null_ ));
+DATA(insert OID = 2499 ( cidr_send PGNSP PGUID 12 f f t f i 1 17 "650" _null_ _null_ _null_ cidr_send - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2500 ( cstring_recv PGNSP PGUID 12 f f t f s 1 2275 "2281" _null_ _null_ _null_ cstring_recv - _null_ ));
+DATA(insert OID = 2500 ( cstring_recv PGNSP PGUID 12 f f t f s 1 2275 "2281" _null_ _null_ _null_ cstring_recv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2501 ( cstring_send PGNSP PGUID 12 f f t f s 1 17 "2275" _null_ _null_ _null_ cstring_send - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2502 ( anyarray_recv PGNSP PGUID 12 f f t f s 1 2277 "2281" _null_ _null_ _null_ anyarray_recv - _null_ ));
+DATA(insert OID = 2502 ( anyarray_recv PGNSP PGUID 12 f f t f s 1 2277 "2281" _null_ _null_ _null_ anyarray_recv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2503 ( anyarray_send PGNSP PGUID 12 f f t f s 1 17 "2277" _null_ _null_ _null_ anyarray_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2547 ( interval_pl_timetz PGNSP PGUID 14 f f t f i 2 1266 "1186 1266" _null_ _null_ _null_ "select $2 + $1" - _null_ ));
DATA(insert OID = 2548 ( interval_pl_timestamp PGNSP PGUID 14 f f t f i 2 1114 "1186 1114" _null_ _null_ _null_ "select $2 + $1" - _null_ ));
DATA(insert OID = 2549 ( interval_pl_timestamptz PGNSP PGUID 14 f f t f s 2 1184 "1186 1184" _null_ _null_ _null_ "select $2 + $1" - _null_ ));
-DATA(insert OID = 2550 ( integer_pl_date PGNSP PGUID 14 f f t f i 2 1082 "23 1082" _null_ _null_ _null_ "select $2 + $1" - _null_ ));
+DATA(insert OID = 2550 ( integer_pl_date PGNSP PGUID 14 f f t f i 2 1082 "23 1082" _null_ _null_ _null_ "select $2 + $1" - _null_ ));
DATA(insert OID = 2556 ( pg_tablespace_databases PGNSP PGUID 12 f f t t s 1 26 "26" _null_ _null_ _null_ pg_tablespace_databases - _null_));
DESCR("returns database oids in a tablespace");
-DATA(insert OID = 2557 ( bool PGNSP PGUID 12 f f t f i 1 16 "23" _null_ _null_ _null_ int4_bool - _null_ ));
+DATA(insert OID = 2557 ( bool PGNSP PGUID 12 f f t f i 1 16 "23" _null_ _null_ _null_ int4_bool - _null_ ));
DESCR("convert int4 to boolean");
-DATA(insert OID = 2558 ( int4 PGNSP PGUID 12 f f t f i 1 23 "16" _null_ _null_ _null_ bool_int4 - _null_ ));
+DATA(insert OID = 2558 ( int4 PGNSP PGUID 12 f f t f i 1 23 "16" _null_ _null_ _null_ bool_int4 - _null_ ));
DESCR("convert boolean to int4");
DATA(insert OID = 2559 ( lastval PGNSP PGUID 12 f f t f v 0 20 "" _null_ _null_ _null_ lastval - _null_ ));
DESCR("current value from last used sequence");
DESCR("bytes required to store the value, perhaps with compression");
/* new functions for Y-direction rtree opclasses */
-DATA(insert OID = 2562 ( box_below PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_below - _null_ ));
+DATA(insert OID = 2562 ( box_below PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_below - _null_ ));
DESCR("is below");
-DATA(insert OID = 2563 ( box_overbelow PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overbelow - _null_ ));
+DATA(insert OID = 2563 ( box_overbelow PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overbelow - _null_ ));
DESCR("overlaps or is below");
-DATA(insert OID = 2564 ( box_overabove PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overabove - _null_ ));
+DATA(insert OID = 2564 ( box_overabove PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_overabove - _null_ ));
DESCR("overlaps or is above");
-DATA(insert OID = 2565 ( box_above PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_above - _null_ ));
+DATA(insert OID = 2565 ( box_above PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ _null_ _null_ box_above - _null_ ));
DESCR("is above");
-DATA(insert OID = 2566 ( poly_below PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_below - _null_ ));
+DATA(insert OID = 2566 ( poly_below PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_below - _null_ ));
DESCR("is below");
-DATA(insert OID = 2567 ( poly_overbelow PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overbelow - _null_ ));
+DATA(insert OID = 2567 ( poly_overbelow PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overbelow - _null_ ));
DESCR("overlaps or is below");
-DATA(insert OID = 2568 ( poly_overabove PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overabove - _null_ ));
+DATA(insert OID = 2568 ( poly_overabove PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_overabove - _null_ ));
DESCR("overlaps or is above");
-DATA(insert OID = 2569 ( poly_above PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_above - _null_ ));
+DATA(insert OID = 2569 ( poly_above PGNSP PGUID 12 f f t f i 2 16 "604 604" _null_ _null_ _null_ poly_above - _null_ ));
DESCR("is above");
DATA(insert OID = 2587 ( circle_overbelow PGNSP PGUID 12 f f t f i 2 16 "718 718" _null_ _null_ _null_ circle_overbelow - _null_ ));
DESCR("overlaps or is below");
/* support functions for GiST r-tree emulation */
DATA(insert OID = 2578 ( gist_box_consistent PGNSP PGUID 12 f f t f i 3 16 "2281 603 23" _null_ _null_ _null_ gist_box_consistent - _null_ ));
DESCR("GiST support");
-DATA(insert OID = 2579 ( gist_box_compress PGNSP PGUID 12 f f t f i 1 2281 "2281" _null_ _null_ _null_ gist_box_compress - _null_ ));
+DATA(insert OID = 2579 ( gist_box_compress PGNSP PGUID 12 f f t f i 1 2281 "2281" _null_ _null_ _null_ gist_box_compress - _null_ ));
DESCR("GiST support");
-DATA(insert OID = 2580 ( gist_box_decompress PGNSP PGUID 12 f f t f i 1 2281 "2281" _null_ _null_ _null_ gist_box_decompress - _null_ ));
+DATA(insert OID = 2580 ( gist_box_decompress PGNSP PGUID 12 f f t f i 1 2281 "2281" _null_ _null_ _null_ gist_box_decompress - _null_ ));
DESCR("GiST support");
DATA(insert OID = 2581 ( gist_box_penalty PGNSP PGUID 12 f f t f i 3 2281 "2281 2281 2281" _null_ _null_ _null_ gist_box_penalty - _null_ ));
DESCR("GiST support");
DATA(insert OID = 2582 ( gist_box_picksplit PGNSP PGUID 12 f f t f i 2 2281 "2281 2281" _null_ _null_ _null_ gist_box_picksplit - _null_ ));
DESCR("GiST support");
-DATA(insert OID = 2583 ( gist_box_union PGNSP PGUID 12 f f t f i 2 603 "2281 2281" _null_ _null_ _null_ gist_box_union - _null_ ));
+DATA(insert OID = 2583 ( gist_box_union PGNSP PGUID 12 f f t f i 2 603 "2281 2281" _null_ _null_ _null_ gist_box_union - _null_ ));
DESCR("GiST support");
-DATA(insert OID = 2584 ( gist_box_same PGNSP PGUID 12 f f t f i 3 2281 "603 603 2281" _null_ _null_ _null_ gist_box_same - _null_ ));
+DATA(insert OID = 2584 ( gist_box_same PGNSP PGUID 12 f f t f i 3 2281 "603 603 2281" _null_ _null_ _null_ gist_box_same - _null_ ));
DESCR("GiST support");
DATA(insert OID = 2585 ( gist_poly_consistent PGNSP PGUID 12 f f t f i 3 16 "2281 604 23" _null_ _null_ _null_ gist_poly_consistent - _null_ ));
DESCR("GiST support");
-DATA(insert OID = 2586 ( gist_poly_compress PGNSP PGUID 12 f f t f i 1 2281 "2281" _null_ _null_ _null_ gist_poly_compress - _null_ ));
+DATA(insert OID = 2586 ( gist_poly_compress PGNSP PGUID 12 f f t f i 1 2281 "2281" _null_ _null_ _null_ gist_poly_compress - _null_ ));
DESCR("GiST support");
DATA(insert OID = 2591 ( gist_circle_consistent PGNSP PGUID 12 f f t f i 3 16 "2281 718 23" _null_ _null_ _null_ gist_circle_consistent - _null_ ));
DESCR("GiST support");
-DATA(insert OID = 2592 ( gist_circle_compress PGNSP PGUID 12 f f t f i 1 2281 "2281" _null_ _null_ _null_ gist_circle_compress - _null_ ));
+DATA(insert OID = 2592 ( gist_circle_compress PGNSP PGUID 12 f f t f i 1 2281 "2281" _null_ _null_ _null_ gist_circle_compress - _null_ ));
DESCR("GiST support");
#define PROVOLATILE_VOLATILE 'v' /* can change even within a scan */
/*
- * Symbolic values for proargmodes column. Note that these must agree with
+ * Symbolic values for proargmodes column. Note that these must agree with
* the FunctionParameterMode enum in parsenodes.h; we declare them here to
* be accessible from either header.
*/
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_shdepend.h,v 1.1 2005/07/07 20:39:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_shdepend.h,v 1.2 2005/10/15 02:49:44 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
/*
* Identification of the dependent (referencing) object.
*
- * These fields are all zeroes for a DEPENDENCY_PIN entry. Also,
- * dbid can be zero to denote a shared object.
+ * These fields are all zeroes for a DEPENDENCY_PIN entry. Also, dbid can be
+ * zero to denote a shared object.
*/
Oid dbid; /* OID of database containing object */
Oid classid; /* OID of table containing object */
Oid objid; /* OID of object itself */
/*
- * Identification of the independent (referenced) object. This is
- * always a shared object, so we need no database ID field.
+ * Identification of the independent (referenced) object. This is always
+ * a shared object, so we need no database ID field.
*/
Oid refclassid; /* OID of table containing object */
Oid refobjid; /* OID of object itself */
#define Anum_pg_shdepend_dbid 1
#define Anum_pg_shdepend_classid 2
#define Anum_pg_shdepend_objid 3
-#define Anum_pg_shdepend_refclassid 4
+#define Anum_pg_shdepend_refclassid 4
#define Anum_pg_shdepend_refobjid 5
#define Anum_pg_shdepend_deptype 6
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_statistic.h,v 1.29 2005/04/14 01:38:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_statistic.h,v 1.30 2005/10/15 02:49:44 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
/*
* stawidth is the average width in bytes of non-null entries. For
- * fixed-width datatypes this is of course the same as the typlen, but
- * for var-width types it is more useful. Note that this is the
- * average width of the data as actually stored, post-TOASTing (eg,
- * for a moved-out-of-line value, only the size of the pointer object
- * is counted). This is the appropriate definition for the primary
- * use of the statistic, which is to estimate sizes of in-memory hash
- * tables of tuples.
+ * fixed-width datatypes this is of course the same as the typlen, but for
+ * var-width types it is more useful. Note that this is the average width
+ * of the data as actually stored, post-TOASTing (eg, for a
+ * moved-out-of-line value, only the size of the pointer object is
+ * counted). This is the appropriate definition for the primary use of
+ * the statistic, which is to estimate sizes of in-memory hash tables of
+ * tuples.
*/
int4 stawidth;
Oid staop4;
/*
- * THE REST OF THESE ARE VARIABLE LENGTH FIELDS, and may even be
- * absent (NULL). They cannot be accessed as C struct entries; you
- * have to use the full field access machinery (heap_getattr) for
- * them. We declare them here for the catalog machinery.
+ * THE REST OF THESE ARE VARIABLE LENGTH FIELDS, and may even be absent
+ * (NULL). They cannot be accessed as C struct entries; you have to use
+ * the full field access machinery (heap_getattr) for them. We declare
+ * them here for the catalog machinery.
*/
float4 stanumbers1[1];
/*
* Values in these arrays are values of the column's data type. We
- * presently have to cheat quite a bit to allow polymorphic arrays of
- * this kind, but perhaps someday it'll be a less bogus facility.
+ * presently have to cheat quite a bit to allow polymorphic arrays of this
+ * kind, but perhaps someday it'll be a less bogus facility.
*/
anyarray stavalues1;
anyarray stavalues2;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.165 2005/08/12 01:36:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.166 2005/10/15 02:49:44 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* See struct FormData_pg_attribute for details.
* ----------------
*/
-#define TypeRelationId 1247
+#define TypeRelationId 1247
CATALOG(pg_type,1247) BKI_BOOTSTRAP
{
int2 typlen;
/*
- * typbyval determines whether internal Postgres routines pass a value
- * of this type by value or by reference. typbyval had better be
- * FALSE if the length is not 1, 2, or 4 (or 8 on 8-byte-Datum
- * machines). Variable-length types are always passed by reference.
- * Note that typbyval can be false even if the length would allow
- * pass-by-value; this is currently true for type float4, for example.
+ * typbyval determines whether internal Postgres routines pass a value of
+ * this type by value or by reference. typbyval had better be FALSE if
+ * the length is not 1, 2, or 4 (or 8 on 8-byte-Datum machines).
+ * Variable-length types are always passed by reference. Note that
+ * typbyval can be false even if the length would allow pass-by-value;
+ * this is currently true for type float4, for example.
*/
bool typbyval;
/*
- * typtype is 'b' for a basic type, 'c' for a complex type (ie a
- * table's rowtype), 'd' for a domain type, or 'p' for a pseudo type.
+ * typtype is 'b' for a basic type, 'c' for a complex type (ie a table's
+ * rowtype), 'd' for a domain type, or 'p' for a pseudo type.
*
- * If typtype is 'c', typrelid is the OID of the class' entry in
- * pg_class.
+ * If typtype is 'c', typrelid is the OID of the class' entry in pg_class.
*/
char typtype;
/*
* If typisdefined is false, the entry is only a placeholder (forward
- * reference). We know the type name, but not yet anything else about
- * it.
+ * reference). We know the type name, but not yet anything else about it.
*/
bool typisdefined;
/*
* If typelem is not 0 then it identifies another row in pg_type. The
- * current type can then be subscripted like an array yielding values
- * of type typelem. A non-zero typelem does not guarantee this type to
- * be a "real" array type; some ordinary fixed-length types can also
- * be subscripted (e.g., name, point). Variable-length types can *not*
- * be turned into pseudo-arrays like that. Hence, the way to determine
+ * current type can then be subscripted like an array yielding values of
+ * type typelem. A non-zero typelem does not guarantee this type to be a
+ * "real" array type; some ordinary fixed-length types can also be
+ * subscripted (e.g., name, point). Variable-length types can *not* be
+ * turned into pseudo-arrays like that. Hence, the way to determine
* whether a type is a "true" array type is if:
*
* typelem != 0 and typlen == -1.
/*
* This flag represents a "NOT NULL" constraint against this datatype.
*
- * If true, the attnotnull column for a corresponding table column using
- * this datatype will always enforce the NOT NULL constraint.
+ * If true, the attnotnull column for a corresponding table column using this
+ * datatype will always enforce the NOT NULL constraint.
*
* Used primarily for domain types.
*/
Oid typbasetype;
/*
- * Domains use typtypmod to record the typmod to be applied to their
- * base type (-1 if base type does not use a typmod). -1 if this type
- * is not a domain.
+ * Domains use typtypmod to record the typmod to be applied to their base
+ * type (-1 if base type does not use a typmod). -1 if this type is not a
+ * domain.
*/
int4 typtypmod;
/*
- * typndims is the declared number of dimensions for an array domain
- * type (i.e., typbasetype is an array type; the domain's typelem will
- * match the base type's typelem). Otherwise zero.
+ * typndims is the declared number of dimensions for an array domain type
+ * (i.e., typbasetype is an array type; the domain's typelem will match
+ * the base type's typelem). Otherwise zero.
*/
int4 typndims;
/*
- * If typdefaultbin is not NULL, it is the nodeToString representation
- * of a default expression for the type. Currently this is only used
- * for domains.
+ * If typdefaultbin is not NULL, it is the nodeToString representation of
+ * a default expression for the type. Currently this is only used for
+ * domains.
*/
text typdefaultbin; /* VARIABLE LENGTH FIELD */
* typdefaultbin is not NULL, typdefault must contain a human-readable
* version of the default expression represented by typdefaultbin. If
* typdefaultbin is NULL and typdefault is not, then typdefault is the
- * external representation of the type's default value, which may be
- * fed to the type's input converter to produce a constant.
+ * external representation of the type's default value, which may be fed
+ * to the type's input converter to produce a constant.
*/
text typdefault; /* VARIABLE LENGTH FIELD */
DESCR("-32 thousand to 32 thousand, 2-byte storage");
#define INT2OID 21
-DATA(insert OID = 22 ( int2vector PGNSP PGUID -1 f b t \054 0 21 int2vectorin int2vectorout int2vectorrecv int2vectorsend - i p f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 22 ( int2vector PGNSP PGUID -1 f b t \054 0 21 int2vectorin int2vectorout int2vectorrecv int2vectorsend - i p f 0 -1 0 _null_ _null_ ));
DESCR("array of int2, used in system tables");
#define INT2VECTOROID 22
DESCR("command identifier type, sequence in transaction id");
#define CIDOID 29
-DATA(insert OID = 30 ( oidvector PGNSP PGUID -1 f b t \054 0 26 oidvectorin oidvectorout oidvectorrecv oidvectorsend - i p f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 30 ( oidvector PGNSP PGUID -1 f b t \054 0 26 oidvectorin oidvectorout oidvectorrecv oidvectorsend - i p f 0 -1 0 _null_ _null_ ));
DESCR("array of oids, used in system tables");
#define OIDVECTOROID 30
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/async.h,v 1.29 2005/10/06 21:30:39 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/commands/async.h,v 1.30 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern bool DisableNotifyInterrupt(void);
extern void notify_twophase_postcommit(TransactionId xid, uint16 info,
- void *recdata, uint32 len);
+ void *recdata, uint32 len);
#endif /* ASYNC_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/cluster.h,v 1.28 2005/05/10 13:16:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/cluster.h,v 1.29 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void cluster(ClusterStmt *stmt);
extern void check_index_is_clusterable(Relation OldHeap, Oid indexOid,
- bool recheck);
+ bool recheck);
extern void mark_index_clustered(Relation rel, Oid indexOid);
extern Oid make_new_heap(Oid OIDOldHeap, const char *NewName,
Oid NewTableSpace);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/dbcommands.h,v 1.41 2005/07/31 17:19:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/dbcommands.h,v 1.42 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Oid db_id;
char src_path[1]; /* VARIABLE LENGTH STRING */
/* dst_path follows src_path */
-} xl_dbase_create_rec_old;
+} xl_dbase_create_rec_old;
typedef struct xl_dbase_drop_rec_old
{
/* Records dropping of a single subdirectory incl. contents */
Oid db_id;
char dir_path[1]; /* VARIABLE LENGTH STRING */
-} xl_dbase_drop_rec_old;
+} xl_dbase_drop_rec_old;
typedef struct xl_dbase_create_rec
{
Oid tablespace_id;
Oid src_db_id;
Oid src_tablespace_id;
-} xl_dbase_create_rec;
+} xl_dbase_create_rec;
typedef struct xl_dbase_drop_rec
{
/* Records dropping of a single subdirectory incl. contents */
Oid db_id;
Oid tablespace_id;
-} xl_dbase_drop_rec;
+} xl_dbase_drop_rec;
extern void createdb(const CreatedbStmt *stmt);
extern void dropdb(const char *dbname);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.67 2005/08/01 04:03:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.68 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void ReindexIndex(RangeVar *indexRelation);
extern void ReindexTable(RangeVar *relation);
extern void ReindexDatabase(const char *databaseName,
- bool do_system, bool do_user);
+ bool do_system, bool do_user);
extern char *makeObjectName(const char *name1, const char *name2,
const char *label);
extern char *ChooseRelationName(const char *name1, const char *name2,
extern void CreateCast(CreateCastStmt *stmt);
extern void DropCast(DropCastStmt *stmt);
extern void DropCastById(Oid castOid);
-extern void AlterFunctionNamespace(List *name, List *argtypes,
- const char *newschema);
+extern void AlterFunctionNamespace(List *name, List *argtypes,
+ const char *newschema);
/* commands/operatorcmds.c */
extern void DefineOperator(List *names, List *parameters);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/tablecmds.h,v 1.23 2005/08/01 04:03:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/tablecmds.h,v 1.24 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void AlterTableNamespace(RangeVar *relation, const char *newschema);
extern void AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
- Oid oldNspOid, Oid newNspOid,
- bool hasDependEntry);
+ Oid oldNspOid, Oid newNspOid,
+ bool hasDependEntry);
extern void ExecuteTruncate(List *relations);
extern void PreCommit_on_commit_actions(void);
extern void AtEOXact_on_commit_actions(bool isCommit);
extern void AtEOSubXact_on_commit_actions(bool isCommit,
- SubTransactionId mySubid,
- SubTransactionId parentSubid);
+ SubTransactionId mySubid,
+ SubTransactionId parentSubid);
#endif /* TABLECMDS_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/tablespace.h,v 1.10 2005/06/28 05:09:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/tablespace.h,v 1.11 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
Oid ts_id;
char ts_path[1]; /* VARIABLE LENGTH STRING */
-} xl_tblspc_create_rec;
+} xl_tblspc_create_rec;
typedef struct xl_tblspc_drop_rec
{
Oid ts_id;
-} xl_tblspc_drop_rec;
+} xl_tblspc_drop_rec;
extern void CreateTableSpace(CreateTableSpaceStmt *stmt);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/trigger.h,v 1.55 2005/08/23 22:40:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/trigger.h,v 1.56 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define RI_FK_RELNAME_ARGNO 1
#define RI_PK_RELNAME_ARGNO 2
#define RI_MATCH_TYPE_ARGNO 3
-#define RI_FIRST_ATTNAME_ARGNO 4 /* first attname pair
- * starts here */
+#define RI_FIRST_ATTNAME_ARGNO 4 /* first attname pair starts
+ * here */
#define RI_KEYPAIR_FK_IDX 0
#define RI_KEYPAIR_PK_IDX 1
extern void renametrig(Oid relid, const char *oldname, const char *newname);
extern void EnableDisableTrigger(Relation rel, const char *tgname,
- bool enable, bool skip_system);
+ bool enable, bool skip_system);
extern void RelationBuildTriggers(Relation relation);
* in utils/adt/ri_triggers.c
*/
extern bool RI_FKey_keyequal_upd_pk(Trigger *trigger, Relation pk_rel,
- HeapTuple old_row, HeapTuple new_row);
+ HeapTuple old_row, HeapTuple new_row);
extern bool RI_FKey_keyequal_upd_fk(Trigger *trigger, Relation fk_rel,
- HeapTuple old_row, HeapTuple new_row);
+ HeapTuple old_row, HeapTuple new_row);
extern bool RI_Initial_Check(FkConstraint *fkconstraint,
Relation rel,
Relation pkrel);
#define RI_TRIGGER_FK 2 /* is a trigger on the FK relation */
#define RI_TRIGGER_NONE 0 /* is not an RI trigger function */
-extern int RI_FKey_trigger_type(Oid tgfoid);
+extern int RI_FKey_trigger_type(Oid tgfoid);
#endif /* TRIGGER_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/typecmds.h,v 1.13 2005/08/04 01:09:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/typecmds.h,v 1.14 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId);
extern void AlterTypeNamespace(List *names, const char *newschema);
extern void AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid,
- bool errorOnTableType);
+ bool errorOnTableType);
#endif /* TYPECMDS_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/vacuum.h,v 1.61 2005/10/03 22:52:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/vacuum.h,v 1.62 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct VacAttrStats
{
/*
- * These fields are set up by the main ANALYZE code before invoking
- * the type-specific typanalyze function.
+ * These fields are set up by the main ANALYZE code before invoking the
+ * type-specific typanalyze function.
*/
Form_pg_attribute attr; /* copy of pg_attribute row for column */
Form_pg_type attrtype; /* copy of pg_type row for column */
* returns FALSE.
*/
void (*compute_stats) (VacAttrStatsP stats,
- AnalyzeAttrFetchFunc fetchfunc,
+ AnalyzeAttrFetchFunc fetchfunc,
int samplerows,
double totalrows);
int minrows; /* Minimum # of rows wanted for stats */
void *extra_data; /* for extra type-specific data */
/*
- * These fields are to be filled in by the compute_stats routine.
- * (They are initialized to zero when the struct is created.)
+ * These fields are to be filled in by the compute_stats routine. (They
+ * are initialized to zero when the struct is created.)
*/
bool stats_valid;
float4 stanullfrac; /* fraction of entries that are NULL */
/* in commands/vacuum.c */
extern void vacuum(VacuumStmt *vacstmt, List *relids);
extern void vac_open_indexes(Relation relation, LOCKMODE lockmode,
- int *nindexes, Relation **Irel);
+ int *nindexes, Relation **Irel);
extern void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode);
extern void vac_update_relstats(Oid relid,
BlockNumber num_pages,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/variable.h,v 1.26 2005/07/25 22:12:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/variable.h,v 1.27 2005/10/15 02:49:44 momjian Exp $
*/
#ifndef VARIABLE_H
#define VARIABLE_H
extern const char *assign_client_encoding(const char *value,
bool doit, GucSource source);
extern const char *assign_role(const char *value,
- bool doit, GucSource source);
+ bool doit, GucSource source);
extern const char *show_role(void);
extern const char *assign_session_authorization(const char *value,
bool doit, GucSource source);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/execdebug.h,v 1.27 2005/05/13 21:20:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/execdebug.h,v 1.28 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define MJ_DEBUG_PROC_NODE(slot) \
MJ2_printf(" %s = ExecProcNode(...) returns %s\n", \
CppAsString(slot), NULL_OR_TUPLE(slot))
-
#else
#define MJ_nodeDisplay(l)
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.119 2005/08/20 00:40:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.120 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* prototypes from functions in execGrouping.c
*/
extern bool execTuplesMatch(TupleTableSlot *slot1,
- TupleTableSlot *slot2,
- int numCols,
- AttrNumber *matchColIdx,
- FmgrInfo *eqfunctions,
- MemoryContext evalContext);
+ TupleTableSlot *slot2,
+ int numCols,
+ AttrNumber *matchColIdx,
+ FmgrInfo *eqfunctions,
+ MemoryContext evalContext);
extern bool execTuplesUnequal(TupleTableSlot *slot1,
- TupleTableSlot *slot2,
- int numCols,
- AttrNumber *matchColIdx,
- FmgrInfo *eqfunctions,
- MemoryContext evalContext);
+ TupleTableSlot *slot2,
+ int numCols,
+ AttrNumber *matchColIdx,
+ FmgrInfo *eqfunctions,
+ MemoryContext evalContext);
extern FmgrInfo *execTuplesMatchPrepare(TupleDesc tupdesc,
int numCols,
AttrNumber *matchColIdx);
extern JunkFilter *ExecInitJunkFilter(List *targetList, bool hasoid,
TupleTableSlot *slot);
extern JunkFilter *ExecInitJunkFilterConversion(List *targetList,
- TupleDesc cleanTupType,
- TupleTableSlot *slot);
+ TupleDesc cleanTupType,
+ TupleTableSlot *slot);
extern bool ExecGetJunkAttribute(JunkFilter *junkfilter, TupleTableSlot *slot,
char *attrName, Datum *value, bool *isNull);
extern TupleTableSlot *ExecFilterJunk(JunkFilter *junkfilter,
- TupleTableSlot *slot);
+ TupleTableSlot *slot);
extern HeapTuple ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot);
extern void ExecConstraints(ResultRelInfo *resultRelInfo,
TupleTableSlot *slot, EState *estate);
extern TupleTableSlot *EvalPlanQual(EState *estate, Index rti,
- ItemPointer tid, TransactionId priorXmax);
+ ItemPointer tid, TransactionId priorXmax);
/*
* prototypes from functions in execProcnode.c
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/functions.h,v 1.25 2005/03/31 22:46:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/functions.h,v 1.26 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Datum fmgr_sql(PG_FUNCTION_ARGS);
extern bool check_sql_fn_retval(Oid func_id, Oid rettype,
- List *queryTreeList,
- JunkFilter **junkFilter);
+ List *queryTreeList,
+ JunkFilter **junkFilter);
#endif /* FUNCTIONS_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/hashjoin.h,v 1.36 2005/04/16 20:07:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/hashjoin.h,v 1.37 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* If nbatch > 1 then tuples that don't belong in first batch get saved
* into inner-batch temp files. The same statements apply for the
* first scan of the outer relation, except we write tuples to outer-batch
- * temp files. After finishing the first scan, we do the following for
+ * temp files. After finishing the first scan, we do the following for
* each remaining batch:
* 1. Read tuples from inner batch file, load into hash buckets.
* 2. Read tuples from outer batch file, match to hash buckets and output.
typedef struct HashJoinTupleData
{
- struct HashJoinTupleData *next; /* link to next tuple in same bucket */
+ struct HashJoinTupleData *next; /* link to next tuple in same bucket */
uint32 hashvalue; /* tuple's hash code */
HeapTupleData htup; /* tuple header */
} HashJoinTupleData;
double totalTuples; /* # tuples obtained from inner plan */
/*
- * These arrays are allocated for the life of the hash join, but
- * only if nbatch > 1. A file is opened only when we first write
- * a tuple into it (otherwise its pointer remains NULL). Note that
- * the zero'th array elements never get used, since we will process
- * rather than dump out any tuples of batch zero.
+ * These arrays are allocated for the life of the hash join, but only if
+ * nbatch > 1. A file is opened only when we first write a tuple into it
+ * (otherwise its pointer remains NULL). Note that the zero'th array
+ * elements never get used, since we will process rather than dump out any
+ * tuples of batch zero.
*/
BufFile **innerBatchFile; /* buffered virtual temp file per batch */
BufFile **outerBatchFile; /* buffered virtual temp file per batch */
/*
- * Info about the datatype-specific hash functions for the datatypes
- * being hashed. We assume that the inner and outer sides of each
- * hashclause are the same type, or at least share the same hash
- * function. This is an array of the same length as the number of hash
- * keys.
+ * Info about the datatype-specific hash functions for the datatypes being
+ * hashed. We assume that the inner and outer sides of each hashclause
+ * are the same type, or at least share the same hash function. This is an
+ * array of the same length as the number of hash keys.
*/
FmgrInfo *hashfunctions; /* lookup data for hash functions */
*
* Copyright (c) 2001-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/executor/instrument.h,v 1.11 2005/04/16 20:07:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/instrument.h,v 1.12 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* gettimeofday() does not have sufficient resolution on Windows,
* so we must use QueryPerformanceCounter() instead. These macros
* also give some breathing room to use other high-precision-timing APIs
- * on yet other platforms. (The macro-ization is not complete, however;
+ * on yet other platforms. (The macro-ization is not complete, however;
* see subtraction code in instrument.c and explain.c.)
*/
#ifndef WIN32
#define INSTR_TIME_SET_CURRENT(t) gettimeofday(&(t), NULL)
#define INSTR_TIME_GET_DOUBLE(t) \
(((double) (t).tv_sec) + ((double) (t).tv_usec) / 1000000.0)
-
-#else /* WIN32 */
+#else /* WIN32 */
typedef LARGE_INTEGER instr_time;
QueryPerformanceFrequency(&f);
return (double) f.QuadPart;
}
-
-#endif /* WIN32 */
+#endif /* WIN32 */
typedef struct Instrumentation
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/nodeHash.h,v 1.37 2005/04/16 20:07:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/nodeHash.h,v 1.38 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern HashJoinTable ExecHashTableCreate(Hash *node, List *hashOperators);
extern void ExecHashTableDestroy(HashJoinTable hashtable);
extern void ExecHashTableInsert(HashJoinTable hashtable,
- HeapTuple tuple,
- uint32 hashvalue);
+ HeapTuple tuple,
+ uint32 hashvalue);
extern uint32 ExecHashGetHashValue(HashJoinTable hashtable,
- ExprContext *econtext,
- List *hashkeys);
+ ExprContext *econtext,
+ List *hashkeys);
extern void ExecHashGetBucketAndBatch(HashJoinTable hashtable,
- uint32 hashvalue,
- int *bucketno,
- int *batchno);
+ uint32 hashvalue,
+ int *bucketno,
+ int *batchno);
extern HeapTuple ExecScanHashBucket(HashJoinState *hjstate,
- ExprContext *econtext);
+ ExprContext *econtext);
extern void ExecHashTableReset(HashJoinTable hashtable);
extern void ExecChooseHashTableSize(double ntuples, int tupwidth,
int *numbuckets,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/nodeHashjoin.h,v 1.29 2005/03/06 22:15:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/nodeHashjoin.h,v 1.30 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt);
extern void ExecHashJoinSaveTuple(HeapTuple heapTuple, uint32 hashvalue,
- BufFile **fileptr);
+ BufFile **fileptr);
#endif /* NODEHASHJOIN_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/nodeIndexscan.h,v 1.23 2005/04/25 01:30:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/nodeIndexscan.h,v 1.24 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ExprState ***runtimeKeyInfo,
ScanKey *scanKeys, int *numScanKeys);
extern void ExecIndexEvalRuntimeKeys(ExprContext *econtext,
- ExprState **run_keys,
- ScanKey scan_keys,
- int n_keys);
+ ExprState **run_keys,
+ ScanKey scan_keys,
+ int n_keys);
#endif /* NODEINDEXSCAN_H */
*
* spi.h
*
- * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.52 2005/05/02 00:37:06 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.53 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void SPI_pop(void);
extern void SPI_restore_connection(void);
extern int SPI_execute(const char *src, bool read_only, long tcount);
-extern int SPI_execute_plan(void *plan, Datum *Values, const char *Nulls,
- bool read_only, long tcount);
+extern int SPI_execute_plan(void *plan, Datum *Values, const char *Nulls,
+ bool read_only, long tcount);
extern int SPI_exec(const char *src, long tcount);
-extern int SPI_execp(void *plan, Datum *Values, const char *Nulls,
- long tcount);
-extern int SPI_execute_snapshot(void *plan,
- Datum *Values, const char *Nulls,
- Snapshot snapshot,
- Snapshot crosscheck_snapshot,
- bool read_only, long tcount);
+extern int SPI_execp(void *plan, Datum *Values, const char *Nulls,
+ long tcount);
+extern int SPI_execute_snapshot(void *plan,
+ Datum *Values, const char *Nulls,
+ Snapshot snapshot,
+ Snapshot crosscheck_snapshot,
+ bool read_only, long tcount);
extern void *SPI_prepare(const char *src, int nargs, Oid *argtypes);
extern void *SPI_saveplan(void *plan);
extern int SPI_freeplan(void *plan);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/spi_priv.h,v 1.23 2005/10/01 18:43:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/spi_priv.h,v 1.24 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
MemoryContext procCxt; /* procedure context */
MemoryContext execCxt; /* executor context */
MemoryContext savedcxt;
- SubTransactionId connectSubid; /* ID of connecting subtransaction */
+ SubTransactionId connectSubid; /* ID of connecting subtransaction */
} _SPI_connection;
typedef struct
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/tuptable.h,v 1.28 2005/03/16 21:38:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/tuptable.h,v 1.29 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* a lower plan node's output TupleTableSlot, or to a function result
* constructed in a plan node's per-tuple econtext. It is the responsibility
* of the generating plan node to be sure these resources are not released
- * for as long as the virtual tuple needs to be valid. We only use virtual
+ * for as long as the virtual tuple needs to be valid. We only use virtual
* tuples in the result slots of plan nodes --- tuples to be copied anywhere
* else need to be "materialized" into physical tuples. Note also that a
* virtual tuple does not have any "system columns".
*
* The Datum/isnull arrays of a TupleTableSlot serve double duty. When the
- * slot contains a virtual tuple, they are the authoritative data. When the
+ * slot contains a virtual tuple, they are the authoritative data. When the
* slot contains a physical tuple, the arrays contain data extracted from
* the tuple. (In this state, any pass-by-reference Datums point into
* the physical tuple.) The extracted information is built "lazily",
- * ie, only as needed. This serves to avoid repeated extraction of data
+ * ie, only as needed. This serves to avoid repeated extraction of data
* from the physical tuple.
*
* A TupleTableSlot can also be "empty", holding no valid data. This is
* buffer page.)
*
* tts_nvalid indicates the number of valid columns in the tts_values/isnull
- * arrays. When the slot is holding a "virtual" tuple this must be equal
+ * arrays. When the slot is holding a "virtual" tuple this must be equal
* to the descriptor's natts. When the slot is holding a physical tuple
* this is equal to the number of columns we have extracted (we always
* extract columns from left to right, so there are no holes).
typedef struct TupleTableSlot
{
NodeTag type; /* vestigial ... allows IsA tests */
- bool tts_isempty; /* true = slot is empty */
- bool tts_shouldFree; /* should pfree tuple? */
+ bool tts_isempty; /* true = slot is empty */
+ bool tts_shouldFree; /* should pfree tuple? */
bool tts_shouldFreeDesc; /* should pfree descriptor? */
bool tts_slow; /* saved state for slot_deform_tuple */
HeapTuple tts_tuple; /* physical tuple, or NULL if none */
extern HeapTuple ExecFetchSlotTuple(TupleTableSlot *slot);
extern HeapTuple ExecMaterializeSlot(TupleTableSlot *slot);
extern TupleTableSlot *ExecCopySlot(TupleTableSlot *dstslot,
- TupleTableSlot *srcslot);
+ TupleTableSlot *srcslot);
+
/* in access/common/heaptuple.c */
extern Datum slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull);
extern void slot_getallattrs(TupleTableSlot *slot);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/fmgr.h,v 1.39 2005/06/09 18:44:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/fmgr.h,v 1.40 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef struct FmgrInfo
{
- PGFunction fn_addr; /* pointer to function or handler to be
- * called */
- Oid fn_oid; /* OID of function (NOT of handler, if
- * any) */
+ PGFunction fn_addr; /* pointer to function or handler to be called */
+ Oid fn_oid; /* OID of function (NOT of handler, if any) */
short fn_nargs; /* 0..FUNC_MAX_ARGS, or -1 if variable arg
* count */
- bool fn_strict; /* function is "strict" (NULL in => NULL
- * out) */
+ bool fn_strict; /* function is "strict" (NULL in => NULL out) */
bool fn_retset; /* function returns a set */
void *fn_extra; /* extra space for use by handler */
MemoryContext fn_mcxt; /* memory context to store fn_extra in */
FmgrInfo *flinfo; /* ptr to lookup info used for this call */
fmNodePtr context; /* pass info about context of call */
fmNodePtr resultinfo; /* pass or return extra info about result */
- bool isnull; /* function must set true if result is
- * NULL */
+ bool isnull; /* function must set true if result is NULL */
short nargs; /* # arguments actually passed */
Datum arg[FUNC_MAX_ARGS]; /* Arguments passed to function */
bool argnull[FUNC_MAX_ARGS]; /* T if arg[i] is actually NULL */
/*
* This macro initializes all the fields of a FunctionCallInfoData except
- * for the arg[] and argnull[] arrays. Performance testing has shown that
+ * for the arg[] and argnull[] arrays. Performance testing has shown that
* the fastest way to set up argnull[] for small numbers of arguments is to
* explicitly set each required element to false, so we don't try to zero
* out the argnull[] array in the macro.
typedef struct
{
- int api_version; /* specifies call convention version
- * number */
+ int api_version; /* specifies call convention version number */
/* More fields may be added later, for version numbers > 1. */
} Pg_finfo_record;
*
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.19 2005/10/06 19:51:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.20 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* OPTIONAL maximum number of calls
*
- * max_calls is here for convenience only and setting it is optional. If
- * not set, you must provide alternative means to know when the
- * function is done.
+ * max_calls is here for convenience only and setting it is optional. If not
+ * set, you must provide alternative means to know when the function is
+ * done.
*/
uint32 max_calls;
/*
* OPTIONAL pointer to miscellaneous user-provided context information
*
- * user_fctx is for use as a pointer to your own struct to retain
- * arbitrary context information between calls of your function.
+ * user_fctx is for use as a pointer to your own struct to retain arbitrary
+ * context information between calls of your function.
*/
void *user_fctx;
/*
* OPTIONAL pointer to struct containing attribute type input metadata
*
- * attinmeta is for use when returning tuples (i.e. composite data types)
- * and is not used when returning base data types. It is only needed
- * if you intend to use BuildTupleFromCStrings() to create the return
- * tuple.
+ * attinmeta is for use when returning tuples (i.e. composite data types) and
+ * is not used when returning base data types. It is only needed if you
+ * intend to use BuildTupleFromCStrings() to create the return tuple.
*/
AttInMetadata *attinmeta;
/*
- * memory context used for structures that must live for multiple
- * calls
+ * memory context used for structures that must live for multiple calls
*
- * multi_call_memory_ctx is set by SRF_FIRSTCALL_INIT() for you, and used
- * by SRF_RETURN_DONE() for cleanup. It is the most appropriate memory
- * context for any memory that is to be reused across multiple calls
- * of the SRF.
+ * multi_call_memory_ctx is set by SRF_FIRSTCALL_INIT() for you, and used by
+ * SRF_RETURN_DONE() for cleanup. It is the most appropriate memory
+ * context for any memory that is to be reused across multiple calls of
+ * the SRF.
*/
MemoryContext multi_call_memory_ctx;
/*
* OPTIONAL pointer to struct containing tuple description
*
- * tuple_desc is for use when returning tuples (i.e. composite data
- * types) and is only needed if you are going to build the tuples with
- * heap_formtuple() rather than with BuildTupleFromCStrings(). Note
- * that the TupleDesc pointer stored here should usually have been run
- * through BlessTupleDesc() first.
+ * tuple_desc is for use when returning tuples (i.e. composite data types)
+ * and is only needed if you are going to build the tuples with
+ * heap_formtuple() rather than with BuildTupleFromCStrings(). Note that
+ * the TupleDesc pointer stored here should usually have been run through
+ * BlessTupleDesc() first.
*/
TupleDesc tuple_desc;
*
* External declarations:
* get_call_result_type:
- * Given a function's call info record, determine the kind of datatype
- * it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
- * receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
- * receives a pointer to a TupleDesc when the result is of a composite
- * type, or NULL when it's a scalar result or the rowtype could not be
- * determined. NB: the tupledesc should be copied if it is to be
- * accessed over a long period.
+ * Given a function's call info record, determine the kind of datatype
+ * it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
+ * receives the actual datatype OID (this is mainly useful for scalar
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * receives a pointer to a TupleDesc when the result is of a composite
+ * type, or NULL when it's a scalar result or the rowtype could not be
+ * determined. NB: the tupledesc should be copied if it is to be
+ * accessed over a long period.
* get_expr_result_type:
- * Given an expression node, return the same info as for
- * get_call_result_type. Note: the cases in which rowtypes cannot be
- * determined are different from the cases for get_call_result_type.
+ * Given an expression node, return the same info as for
+ * get_call_result_type. Note: the cases in which rowtypes cannot be
+ * determined are different from the cases for get_call_result_type.
* get_func_result_type:
- * Given only a function's OID, return the same info as for
- * get_call_result_type. Note: the cases in which rowtypes cannot be
- * determined are different from the cases for get_call_result_type.
- * Do *not* use this if you can use one of the others.
+ * Given only a function's OID, return the same info as for
+ * get_call_result_type. Note: the cases in which rowtypes cannot be
+ * determined are different from the cases for get_call_result_type.
+ * Do *not* use this if you can use one of the others.
*----------
*/
} TypeFuncClass;
extern TypeFuncClass get_call_result_type(FunctionCallInfo fcinfo,
- Oid *resultTypeId,
- TupleDesc *resultTupleDesc);
+ Oid *resultTypeId,
+ TupleDesc *resultTupleDesc);
extern TypeFuncClass get_expr_result_type(Node *expr,
- Oid *resultTypeId,
- TupleDesc *resultTupleDesc);
+ Oid *resultTypeId,
+ TupleDesc *resultTupleDesc);
extern TypeFuncClass get_func_result_type(Oid functionId,
- Oid *resultTypeId,
- TupleDesc *resultTupleDesc);
+ Oid *resultTypeId,
+ TupleDesc *resultTupleDesc);
extern char *get_func_result_name(Oid functionId);
extern bool resolve_polymorphic_argtypes(int numargs, Oid *argtypes,
- char *argmodes,
- Node *call_expr);
+ char *argmodes,
+ Node *call_expr);
extern TupleDesc build_function_result_tupdesc_d(Datum proallargtypes,
- Datum proargmodes,
- Datum proargnames);
+ Datum proargmodes,
+ Datum proargnames);
extern TupleDesc build_function_result_tupdesc_t(HeapTuple procTuple);
*
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/getaddrinfo.h,v 1.16 2005/08/25 17:50:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/getaddrinfo.h,v 1.17 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define EAI_SERVICE (-8)
#define EAI_MEMORY (-10)
#define EAI_SYSTEM (-11)
-#else /* WIN32 */
+#else /* WIN32 */
#define EAI_AGAIN WSATRY_AGAIN
#define EAI_BADFLAGS WSAEINVAL
#define EAI_FAIL WSANO_RECOVERY
#define EAI_NODATA WSANO_DATA
#define EAI_NONAME WSAHOST_NOT_FOUND
#define EAI_SERVICE WSATYPE_NOT_FOUND
-#define EAI_SOCKTYPE WSAESOCKTNOSUPPORT
-#endif /* !WIN32 */
-#endif /* !EAI_FAIL */
+#define EAI_SOCKTYPE WSAESOCKTNOSUPPORT
+#endif /* !WIN32 */
+#endif /* !EAI_FAIL */
#ifndef AI_PASSIVE
#define AI_PASSIVE 0x0001
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/lib/dllist.h,v 1.24 2004/12/31 22:03:31 pgsql Exp $
+ * $PostgreSQL: pgsql/src/include/lib/dllist.h,v 1.25 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Dllist *DLNewList(void); /* allocate and initialize a list header */
extern void DLInitList(Dllist *list); /* init a header alloced by caller */
-extern void DLFreeList(Dllist *list); /* free up a list and all the
- * nodes in it */
+extern void DLFreeList(Dllist *list); /* free up a list and all the nodes in
+ * it */
extern Dlelem *DLNewElem(void *val);
extern void DLInitElem(Dlelem *e, void *val);
extern void DLFreeElem(Dlelem *e);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/auth.h,v 1.29 2005/06/27 02:04:25 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/auth.h,v 1.30 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void ClientAuthentication(Port *port);
-#define PG_KRB5_VERSION "PGVER5.1" /* at most KRB_SENDAUTH_VLEN chars */
+#define PG_KRB5_VERSION "PGVER5.1" /* at most KRB_SENDAUTH_VLEN chars */
extern char *pg_krb_server_keyfile;
extern char *pg_krb_srvnam;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/be-fsstubs.h,v 1.24 2005/06/13 02:26:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/be-fsstubs.h,v 1.25 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
extern void AtEOXact_LargeObject(bool isCommit);
extern void AtEOSubXact_LargeObject(bool isCommit, SubTransactionId mySubid,
- SubTransactionId parentSubid);
+ SubTransactionId parentSubid);
#endif /* BE_FSSTUBS_H */
* Interface to hba.c
*
*
- * $PostgreSQL: pgsql/src/include/libpq/hba.h,v 1.40 2005/08/11 21:11:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/hba.h,v 1.41 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void load_role(void);
extern int hba_getauthmethod(hbaPort *port);
extern int authident(hbaPort *port);
-extern bool read_pg_database_line(FILE *fp, char *dbname, Oid *dboid,
- Oid *dbtablespace, TransactionId *dbfrozenxid,
- TransactionId *dbvacuumxid);
+extern bool read_pg_database_line(FILE *fp, char *dbname, Oid *dboid,
+ Oid *dbtablespace, TransactionId *dbfrozenxid,
+ TransactionId *dbvacuumxid);
-#endif /* HBA_H */
+#endif /* HBA_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.51 2005/09/12 02:26:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.52 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
CAC_state canAcceptConnections; /* postmaster connection status */
/*
- * Information that needs to be saved from the startup packet and
- * passed into backend execution. "char *" fields are NULL if not
- * set. guc_options points to a List of alternating option names and
- * values.
+ * Information that needs to be saved from the startup packet and passed
+ * into backend execution. "char *" fields are NULL if not set.
+ * guc_options points to a List of alternating option names and values.
*/
char *database_name;
char *user_name;
char cryptSalt[2]; /* Password salt */
/*
- * Information that really has no business at all being in struct
- * Port, but since it gets used by elog.c in the same way as
- * database_name and other members of this struct, we may as well keep
- * it here.
+ * Information that really has no business at all being in struct Port,
+ * but since it gets used by elog.c in the same way as database_name and
+ * other members of this struct, we may as well keep it here.
*/
const char *commandTag; /* current command tag */
struct timeval session_start; /* for session duration logging */
/*
* TCP keepalive settings.
*
- * default values are 0 if AF_UNIX or not yet known;
- * current values are 0 if AF_UNIX or using the default.
- * Also, -1 in a default value means we were unable to find out the
- * default (getsockopt failed).
+ * default values are 0 if AF_UNIX or not yet known; current values are 0 if
+ * AF_UNIX or using the default. Also, -1 in a default value means we were
+ * unable to find out the default (getsockopt failed).
*/
- int default_keepalives_idle;
- int default_keepalives_interval;
- int default_keepalives_count;
- int keepalives_idle;
- int keepalives_interval;
- int keepalives_count;
+ int default_keepalives_idle;
+ int default_keepalives_interval;
+ int default_keepalives_count;
+ int keepalives_idle;
+ int keepalives_interval;
+ int keepalives_count;
/*
* SSL structures
/* TCP keepalives configuration. These are no-ops on an AF_UNIX socket. */
-extern int pq_getkeepalivesidle(Port *port);
-extern int pq_getkeepalivesinterval(Port *port);
-extern int pq_getkeepalivescount(Port *port);
+extern int pq_getkeepalivesidle(Port *port);
+extern int pq_getkeepalivesinterval(Port *port);
+extern int pq_getkeepalivescount(Port *port);
-extern int pq_setkeepalivesidle(int idle, Port *port);
-extern int pq_setkeepalivesinterval(int interval, Port *port);
-extern int pq_setkeepalivescount(int count, Port *port);
+extern int pq_setkeepalivesidle(int idle, Port *port);
+extern int pq_setkeepalivesinterval(int interval, Port *port);
+extern int pq_setkeepalivescount(int count, Port *port);
#endif /* LIBPQ_BE_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/libpq.h,v 1.64 2004/12/31 22:03:32 pgsql Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/libpq.h,v 1.65 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* prototypes for functions in pqcomm.c
*/
extern int StreamServerPort(int family, char *hostName,
- unsigned short portNumber, char *unixSocketName, int ListenSocket[],
+ unsigned short portNumber, char *unixSocketName, int ListenSocket[],
int MaxListen);
extern int StreamConnection(int server_fd, Port *port);
extern void StreamClose(int sock);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/pqcomm.h,v 1.97 2005/06/27 02:04:26 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/pqcomm.h,v 1.98 2005/10/15 02:49:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define ss_len __ss_len
#define HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN 1
#endif
-
#else /* !HAVE_STRUCT_SOCKADDR_STORAGE */
/* Define a struct sockaddr_storage if we don't have one. */
typedef struct CancelRequestPacket
{
/* Note that each field is stored in network byte order! */
- MsgType cancelRequestCode; /* code to identify a cancel
- * request */
+ MsgType cancelRequestCode; /* code to identify a cancel request */
uint32 backendPID; /* PID of client's backend */
uint32 cancelAuthCode; /* secret key to authorize cancel */
} CancelRequestPacket;
-/* $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.62 2005/09/24 17:53:27 tgl Exp $ */
+/* $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.63 2005/10/15 02:49:45 momjian Exp $ */
#ifndef PG_WCHAR_H
#define PG_WCHAR_H
#define LC_CNS11643_1 0x95 /* CNS 11643-1992 Plane 1 */
#define LC_CNS11643_2 0x96 /* CNS 11643-1992 Plane 2 */
/* #define FREE 0x97 free (unused) */
-#define LC_BIG5_1 0x98 /* Plane 1 Chinese traditional (not
- * supported) */
-#define LC_BIG5_2 0x99 /* Plane 1 Chinese traditional (not
- * supported) */
+#define LC_BIG5_1 0x98 /* Plane 1 Chinese traditional (not supported) */
+#define LC_BIG5_2 0x99 /* Plane 1 Chinese traditional (not supported) */
/*
* Private single byte encodings (0xa0-0xef)
*/
#define LC_SISHENG 0xa0 /* Chinese SiSheng characters for
* PinYin/ZhuYin (not supported) */
-#define LC_IPA 0xa1 /* IPA (International Phonetic
- * Association) (not supported) */
+#define LC_IPA 0xa1 /* IPA (International Phonetic Association)
+ * (not supported) */
#define LC_VISCII_LOWER 0xa2 /* Vietnamese VISCII1.1 lower-case (not
* supported) */
#define LC_VISCII_UPPER 0xa3 /* Vietnamese VISCII1.1 upper-case (not
* supported) */
#define LC_ARABIC_DIGIT 0xa4 /* Arabic digit (not supported) */
#define LC_ARABIC_1_COLUMN 0xa5 /* Arabic 1-column (not supported) */
-#define LC_ASCII_RIGHT_TO_LEFT 0xa6 /* ASCII (left half of ISO8859-1)
- * with right-to-left direction
- * (not supported) */
-#define LC_LAO 0xa7 /* Lao characters (ISO10646 0E80..0EDF)
- * (not supported) */
+#define LC_ASCII_RIGHT_TO_LEFT 0xa6 /* ASCII (left half of ISO8859-1) with
+ * right-to-left direction (not
+ * supported) */
+#define LC_LAO 0xa7 /* Lao characters (ISO10646 0E80..0EDF) (not
+ * supported) */
#define LC_ARABIC_2_COLUMN 0xa8 /* Arabic 1-column (not supported) */
/*
* Private multibyte encodings (0xf0-0xff)
*/
-#define LC_INDIAN_1_COLUMN 0xf0/* Indian charset for 1-column width
- * glypps (not supported) */
+#define LC_INDIAN_1_COLUMN 0xf0/* Indian charset for 1-column width glypps
+ * (not supported) */
#define LC_TIBETAN_1_COLUMN 0xf1 /* Tibetan 1 column glyph (not supported) */
#define LC_ETHIOPIC 0xf5 /* Ethiopic characters (not supported) */
#define LC_CNS11643_3 0xf6 /* CNS 11643-1992 Plane 3 */
#define LC_CNS11643_5 0xf8 /* CNS 11643-1992 Plane 5 */
#define LC_CNS11643_6 0xf9 /* CNS 11643-1992 Plane 6 */
#define LC_CNS11643_7 0xfa /* CNS 11643-1992 Plane 7 */
-#define LC_INDIAN_2_COLUMN 0xfb/* Indian charset for 2-column width
- * glypps (not supported) */
+#define LC_INDIAN_2_COLUMN 0xfb/* Indian charset for 2-column width glypps
+ * (not supported) */
#define LC_TIBETAN 0xfc /* Tibetan (not supported) */
/* #define FREE 0xfd free (unused) */
/* #define FREE 0xfe free (unused) */
mb2wchar_with_len_converter mb2wchar_with_len; /* convert a multibyte
* string to a wchar */
mblen_converter mblen; /* returns the length of a multibyte char */
- mbdisplaylen_converter dsplen; /* returns the lenghth of a
- * display length */
+ mbdisplaylen_converter dsplen; /* returns the lenghth of a display
+ * length */
int maxmblen; /* max bytes for a char in this charset */
} pg_wchar_tbl;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.179 2005/08/17 22:14:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.180 2005/10/15 02:49:41 momjian Exp $
*
* NOTES
* some of the information in this file should be moved to other files.
if (InterruptPending) \
ProcessInterrupts(); \
} while(0)
-
#else /* WIN32 */
#define CHECK_FOR_INTERRUPTS() \
extern void SetDatabasePath(const char *path);
extern char *GetUserNameFromId(Oid roleid);
-extern Oid GetUserId(void);
+extern Oid GetUserId(void);
extern void SetUserId(Oid userid);
-extern Oid GetOuterUserId(void);
-extern Oid GetSessionUserId(void);
+extern Oid GetOuterUserId(void);
+extern Oid GetSessionUserId(void);
extern void InitializeSessionUserId(const char *rolename);
extern void InitializeSessionUserIdStandalone(void);
extern void AtAbort_UserId(void);
extern void SetSessionAuthorization(Oid userid, bool is_superuser);
-extern Oid GetCurrentRoleId(void);
+extern Oid GetCurrentRoleId(void);
extern void SetCurrentRoleId(Oid roleid, bool is_superuser);
extern void SetDataDir(const char *dir);
/* in utils/misc/superuser.c */
extern bool superuser(void); /* current user is superuser */
-extern bool superuser_arg(Oid roleid); /* given user is superuser */
+extern bool superuser_arg(Oid roleid); /* given user is superuser */
/*****************************************************************************
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.138 2005/09/25 19:37:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.139 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* ExecProject() evaluates the tlist, forms a tuple, and stores it
* in the given slot. Note that the result will be a "virtual" tuple
* unless ExecMaterializeSlot() is then called to force it to be
- * converted to a physical tuple. The slot must have a tupledesc
+ * converted to a physical tuple. The slot must have a tupledesc
* that matches the output of the tlist!
*
* The planner very often produces tlists that consist entirely of
/* Info about target table for insert/update/delete queries: */
ResultRelInfo *es_result_relations; /* array of ResultRelInfos */
int es_num_result_relations; /* length of array */
- ResultRelInfo *es_result_relation_info; /* currently active array
- * elt */
+ ResultRelInfo *es_result_relation_info; /* currently active array elt */
JunkFilter *es_junkFilter; /* currently active junk filter */
Relation es_into_relation_descriptor; /* for SELECT INTO */
List *es_exprcontexts; /* List of ExprContexts within EState */
/*
- * this ExprContext is for per-output-tuple operations, such as
- * constraint checks and index-value computations. It will be reset
- * for each output tuple. Note that it will be created only if
- * needed.
+ * this ExprContext is for per-output-tuple operations, such as constraint
+ * checks and index-value computations. It will be reset for each output
+ * tuple. Note that it will be created only if needed.
*/
ExprContext *es_per_tuple_exprcontext;
/* Below is to re-evaluate plan qual in READ COMMITTED mode */
Plan *es_topPlan; /* link to top of plan tree */
- struct evalPlanQual *es_evalPlanQual; /* chain of PlanQual
- * states */
+ struct evalPlanQual *es_evalPlanQual; /* chain of PlanQual states */
bool *es_evTupleNull; /* local array of EPQ status */
HeapTuple *es_evTuple; /* shared array of EPQ substitute tuples */
bool es_useEvalPlan; /* evaluating EPQ tuples? */
List *args; /* states of argument expressions */
/*
- * Function manager's lookup info for the target function. If
- * func.fn_oid is InvalidOid, we haven't initialized it yet.
+ * Function manager's lookup info for the target function. If func.fn_oid
+ * is InvalidOid, we haven't initialized it yet.
*/
FmgrInfo func;
/*
- * We also need to store argument values across calls when evaluating
- * a function-returning-set.
+ * We also need to store argument values across calls when evaluating a
+ * function-returning-set.
*
- * setArgsValid is true when we are evaluating a set-valued function and
- * we are in the middle of a call series; we want to pass the same
- * argument values to the function again (and again, until it returns
+ * setArgsValid is true when we are evaluating a set-valued function and we
+ * are in the middle of a call series; we want to pass the same argument
+ * values to the function again (and again, until it returns
* ExprEndResult).
*/
bool setArgsValid;
/*
* Flag to remember whether we found a set-valued argument to the
- * function. This causes the function result to be a set as well.
- * Valid only when setArgsValid is true.
+ * function. This causes the function result to be a set as well. Valid
+ * only when setArgsValid is true.
*/
bool setHasSetArg; /* some argument returns a set */
/*
* Flag to remember whether we have registered a shutdown callback for
* this FuncExprState. We do so only if setArgsValid has been true at
- * least once (since all the callback is for is to clear
- * setArgsValid).
+ * least once (since all the callback is for is to clear setArgsValid).
*/
bool shutdown_reg; /* a shutdown callback is registered */
/*
- * Current argument data for a set-valued function; contains valid
- * data only if setArgsValid is true.
+ * Current argument data for a set-valued function; contains valid data
+ * only if setArgsValid is true.
*/
FunctionCallInfoData setArgs;
} FuncExprState;
Plan *plan; /* associated Plan node */
- EState *state; /* at execution time, state's of
- * individual nodes point to one EState
- * for the whole top-level plan */
+ EState *state; /* at execution time, state's of individual
+ * nodes point to one EState for the whole
+ * top-level plan */
struct Instrumentation *instrument; /* Optional runtime stats for this
* plan node */
/*
- * Common structural data for all Plan types. These links to
- * subsidiary state trees parallel links in the associated plan tree
- * (except for the subPlan list, which does not exist in the plan
- * tree).
+ * Common structural data for all Plan types. These links to subsidiary
+ * state trees parallel links in the associated plan tree (except for the
+ * subPlan list, which does not exist in the plan tree).
*/
List *targetlist; /* target list to be computed at this node */
List *qual; /* implicitly-ANDed qual conditions */
struct PlanState *lefttree; /* input plan tree(s) */
struct PlanState *righttree;
- List *initPlan; /* Init SubPlanState nodes (un-correlated
- * expr subselects) */
+ List *initPlan; /* Init SubPlanState nodes (un-correlated expr
+ * subselects) */
List *subPlan; /* SubPlanState nodes in my expressions */
/*
{
JoinState js; /* its first field is NodeTag */
int mj_NumClauses;
- MergeJoinClause mj_Clauses; /* array of length mj_NumClauses */
+ MergeJoinClause mj_Clauses; /* array of length mj_NumClauses */
int mj_JoinState;
bool mj_FillOuter;
bool mj_FillInner;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/makefuncs.h,v 1.52 2005/04/06 16:34:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/makefuncs.h,v 1.53 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Index varlevelsup);
extern TargetEntry *makeTargetEntry(Expr *expr,
- AttrNumber resno,
- char *resname,
- bool resjunk);
+ AttrNumber resno,
+ char *resname,
+ bool resjunk);
extern TargetEntry *flatCopyTargetEntry(TargetEntry *src_tle);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/nodes.h,v 1.175 2005/08/01 20:31:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/nodes.h,v 1.176 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* TAGS FOR EXPRESSION STATE NODES (execnodes.h)
*
- * These correspond (not always one-for-one) to primitive nodes derived
- * from Expr.
+ * These correspond (not always one-for-one) to primitive nodes derived from
+ * Expr.
*/
T_ExprState = 400,
T_GenericExprState,
*
* These are objects that aren't part of parse/plan/execute node tree
* structures, but we give them NodeTags anyway for identification
- * purposes (usually because they are involved in APIs where we want
- * to pass multiple object types through the same pointer).
+ * purposes (usually because they are involved in APIs where we want to
+ * pass multiple object types through the same pointer).
*/
T_TriggerData = 900, /* in commands/trigger.h */
T_ReturnSetInfo, /* in nodes/execnodes.h */
* These could have gone into plannodes.h or some such, but many files
* depend on them...
*/
-typedef double Selectivity; /* fraction of tuples a qualifier will
- * pass */
+typedef double Selectivity; /* fraction of tuples a qualifier will pass */
typedef double Cost; /* execution cost (in page-access units) */
CMD_UPDATE, /* update stmt (formerly replace) */
CMD_INSERT, /* insert stmt (formerly append) */
CMD_DELETE,
- CMD_UTILITY, /* cmds like create, destroy, copy,
- * vacuum, etc. */
+ CMD_UTILITY, /* cmds like create, destroy, copy, vacuum,
+ * etc. */
CMD_NOTHING /* dummy command for instead nothing rules
* with qual */
} CmdType;
*/
JOIN_INNER, /* matching tuple pairs only */
JOIN_LEFT, /* pairs + unmatched outer tuples */
- JOIN_FULL, /* pairs + unmatched outer + unmatched
- * inner */
+ JOIN_FULL, /* pairs + unmatched outer + unmatched inner */
JOIN_RIGHT, /* pairs + unmatched inner tuples */
/*
- * SQL92 considers UNION JOIN to be a kind of join, so list it here
- * for parser convenience, even though it's not implemented like a
- * join in the executor. (The planner must convert it to an Append
- * plan.)
+ * SQL92 considers UNION JOIN to be a kind of join, so list it here for
+ * parser convenience, even though it's not implemented like a join in the
+ * executor. (The planner must convert it to an Append plan.)
*/
JOIN_UNION,
/*
* These are used for queries like WHERE foo IN (SELECT bar FROM ...).
- * Only JOIN_IN is actually implemented in the executor; the others
- * are defined for internal use in the planner.
+ * Only JOIN_IN is actually implemented in the executor; the others are
+ * defined for internal use in the planner.
*/
JOIN_IN, /* at most one result per outer row */
JOIN_REVERSE_IN, /* at most one result per inner row */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.290 2005/08/23 22:40:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.291 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool hasSubLinks; /* has subquery SubLink */
List *rtable; /* list of range table entries */
- FromExpr *jointree; /* table join tree (FROM and WHERE
- * clauses) */
+ FromExpr *jointree; /* table join tree (FROM and WHERE clauses) */
List *rowMarks; /* integer list of RT indexes of relations
* that are selected FOR UPDATE/SHARE */
- bool forUpdate; /* true if rowMarks are FOR UPDATE,
- * false if they are FOR SHARE */
+ bool forUpdate; /* true if rowMarks are FOR UPDATE, false if
+ * they are FOR SHARE */
bool rowNoWait; /* FOR UPDATE/SHARE NOWAIT option */
List *targetList; /* target list (of TargetEntry) */
Node *limitOffset; /* # of result tuples to skip */
Node *limitCount; /* # of result tuples to return */
- Node *setOperations; /* set-operation tree if this is top level
- * of a UNION/INTERSECT/EXCEPT query */
+ Node *setOperations; /* set-operation tree if this is top level of
+ * a UNION/INTERSECT/EXCEPT query */
/*
* If the resultRelation turns out to be the parent of an inheritance
- * tree, the planner will add all the child tables to the rtable and
- * store a list of the rtindexes of all the result relations here.
- * This is done at plan time, not parse time, since we don't want to
- * commit to the exact set of child tables at parse time. This field
- * ought to go in some sort of TopPlan plan node, not in the Query.
+ * tree, the planner will add all the child tables to the rtable and store
+ * a list of the rtindexes of all the result relations here. This is done
+ * at plan time, not parse time, since we don't want to commit to the
+ * exact set of child tables at parse time. This field ought to go in
+ * some sort of TopPlan plan node, not in the Query.
*/
List *resultRelations; /* integer list of RT indexes, or NIL */
} Query;
NodeTag type;
char *name; /* column name or NULL */
List *indirection; /* subscripts and field names, or NIL */
- Node *val; /* the value expression to compute or
- * assign */
+ Node *val; /* the value expression to compute or assign */
} ResTarget;
/*
int inhcount; /* number of times column is inherited */
bool is_local; /* column has local (non-inherited) def'n */
bool is_not_null; /* NOT NULL constraint specified? */
- Node *raw_default; /* default value (untransformed parse
- * tree) */
+ Node *raw_default; /* default value (untransformed parse tree) */
char *cooked_default; /* nodeToString representation */
List *constraints; /* other constraints on column */
RangeVar *support; /* supporting relation, if any */
/*
* LockingClause - raw representation of FOR UPDATE/SHARE options
*
- * Note: lockedRels == NIL means "all relations in query". Otherwise it
+ * Note: lockedRels == NIL means "all relations in query". Otherwise it
* is a list of String nodes giving relation eref names.
*/
typedef struct LockingClause
* a stored rule might contain entries for columns dropped since the rule
* was created. (This is only possible for columns not actually referenced
* in the rule.) When loading a stored rule, we replace the joinaliasvars
- * items for any such columns with NULL Consts. (We can't simply delete
+ * items for any such columns with NULL Consts. (We can't simply delete
* them from the joinaliasvars list, because that would affect the attnums
* of Vars referencing the rest of the list.)
*
RTEKind rtekind; /* see above */
/*
- * XXX the fields applicable to only some rte kinds should be merged
- * into a union. I didn't do this yet because the diffs would impact
- * a lot of code that is being actively worked on. FIXME later.
+ * XXX the fields applicable to only some rte kinds should be merged into
+ * a union. I didn't do this yet because the diffs would impact a lot of
+ * code that is being actively worked on. FIXME later.
*/
/*
/*
* Fields valid for a join RTE (else NULL/zero):
*
- * joinaliasvars is a list of Vars or COALESCE expressions corresponding
- * to the columns of the join result. An alias Var referencing column
- * K of the join result can be replaced by the K'th element of
- * joinaliasvars --- but to simplify the task of reverse-listing
- * aliases correctly, we do not do that until planning time. In a Query
- * loaded from a stored rule, it is also possible for joinaliasvars
- * items to be NULL Consts, denoting columns dropped since the rule was
- * made.
+ * joinaliasvars is a list of Vars or COALESCE expressions corresponding to
+ * the columns of the join result. An alias Var referencing column K of
+ * the join result can be replaced by the K'th element of joinaliasvars
+ * --- but to simplify the task of reverse-listing aliases correctly, we
+ * do not do that until planning time. In a Query loaded from a stored
+ * rule, it is also possible for joinaliasvars items to be NULL Consts,
+ * denoting columns dropped since the rule was made.
*/
JoinType jointype; /* type of join */
List *joinaliasvars; /* list of alias-var expansions */
/*
* An INSERT statement has *either* VALUES or SELECT, never both. If
- * VALUES, a targetList is supplied (empty for DEFAULT VALUES). If
- * SELECT, a complete SelectStmt (or set-operation tree) is supplied.
+ * VALUES, a targetList is supplied (empty for DEFAULT VALUES). If SELECT,
+ * a complete SelectStmt (or set-operation tree) is supplied.
*/
List *targetList; /* the target list (of ResTarget) */
Node *selectStmt; /* the source SELECT */
{
MUST_HAVE_OIDS, /* WITH OIDS explicitely specified */
MUST_NOT_HAVE_OIDS, /* WITHOUT OIDS explicitely specified */
- DEFAULT_OIDS /* neither specified; use the default,
- * which is the value of the
- * default_with_oids GUC var */
+ DEFAULT_OIDS /* neither specified; use the default, which
+ * is the value of the default_with_oids GUC
+ * var */
} ContainsOids;
typedef struct SelectStmt
* else...
*/
List *distinctClause; /* NULL, list of DISTINCT ON exprs, or
- * lcons(NIL,NIL) for all (SELECT
- * DISTINCT) */
+ * lcons(NIL,NIL) for all (SELECT DISTINCT) */
RangeVar *into; /* target table (for select into table) */
List *intoColNames; /* column names for into table */
ContainsOids intoHasOids; /* should target table have OIDs? */
List *sortClause; /* sort clause (a list of SortBy's) */
Node *limitOffset; /* # of result tuples to skip */
Node *limitCount; /* # of result tuples to return */
- LockingClause *lockingClause; /* FOR UPDATE/FOR SHARE */
+ LockingClause *lockingClause; /* FOR UPDATE/FOR SHARE */
/*
* These fields are used only in upper-level SelectStmts.
AT_ProcessedConstraint, /* pre-processed add constraint (local in
* parser/analyze.c) */
AT_DropConstraint, /* drop constraint */
- AT_DropConstraintQuietly, /* drop constraint, no error/warning
- * (local in commands/tablecmds.c) */
+ AT_DropConstraintQuietly, /* drop constraint, no error/warning (local in
+ * commands/tablecmds.c) */
AT_AlterColumnType, /* alter column type */
AT_ToastTable, /* create toast table */
AT_ChangeOwner, /* change owner */
NodeTag type;
bool is_grant; /* true = GRANT, false = REVOKE */
GrantObjectType objtype; /* kind of object being operated on */
- List *objects; /* list of RangeVar nodes, FuncWithArgs
- * nodes, or plain names (as Value
- * strings) */
+ List *objects; /* list of RangeVar nodes, FuncWithArgs nodes,
+ * or plain names (as Value strings) */
List *privileges; /* list of privilege names (as Strings) */
/* privileges == NIL denotes "all privileges" */
List *grantees; /* list of PrivGrantee nodes */
{
NodeTag type;
RangeVar *relation; /* the relation to copy */
- List *attlist; /* List of column names (as Strings), or
- * NIL for all columns */
+ List *attlist; /* List of column names (as Strings), or NIL
+ * for all columns */
bool is_from; /* TO or FROM */
char *filename; /* if NULL, use stdin/stdout */
List *options; /* List of DefElem nodes */
typedef enum ConstrType /* types of constraints */
{
- CONSTR_NULL, /* not SQL92, but a lot of people expect
- * it */
+ CONSTR_NULL, /* not SQL92, but a lot of people expect it */
CONSTR_NOTNULL,
CONSTR_DEFAULT,
CONSTR_CHECK,
char *name; /* name, or NULL if unnamed */
Node *raw_expr; /* expr, as untransformed parse tree */
char *cooked_expr; /* expr, as nodeToString representation */
- List *keys; /* String nodes naming referenced
- * column(s) */
+ List *keys; /* String nodes naming referenced column(s) */
char *indexspace; /* index tablespace for PKEY/UNIQUE
* constraints; NULL for default */
} Constraint;
NodeTag type;
char *plname; /* PL name */
List *plhandler; /* PL call handler function (qual. name) */
- List *plvalidator; /* optional validator function (qual.
- * name) */
+ List *plvalidator; /* optional validator function (qual. name) */
bool pltrusted; /* PL is trusted */
} CreatePLangStmt;
char *tableSpace; /* tablespace, or NULL to use parent's */
List *indexParams; /* a list of IndexElem */
Node *whereClause; /* qualification (partial-index predicate) */
- List *rangetable; /* range table for qual and/or
- * expressions, filled in by
- * transformStmt() */
+ List *rangetable; /* range table for qual and/or expressions,
+ * filled in by transformStmt() */
bool unique; /* is index unique? */
bool primary; /* is index on primary key? */
bool isconstraint; /* is it from a CONSTRAINT clause? */
NodeTag type;
char *name; /* parameter name, or NULL if not given */
TypeName *argType; /* TypeName for parameter type */
- FunctionParameterMode mode; /* IN/OUT/INOUT */
+ FunctionParameterMode mode; /* IN/OUT/INOUT */
} FunctionParameter;
typedef struct AlterFunctionStmt
*/
typedef struct AlterObjectSchemaStmt
{
- NodeTag type;
- ObjectType objectType; /* OBJECT_TABLE, OBJECT_TYPE, etc */
+ NodeTag type;
+ ObjectType objectType; /* OBJECT_TABLE, OBJECT_TYPE, etc */
RangeVar *relation; /* in case it's a table */
List *object; /* in case it's some other object */
List *objarg; /* argument types, if applicable */
char *addname; /* additional name if needed */
- char *newschema; /* the new schema */
+ char *newschema; /* the new schema */
} AlterObjectSchemaStmt;
/* ----------------------
typedef struct AlterOwnerStmt
{
NodeTag type;
- ObjectType objectType; /* OBJECT_TABLE, OBJECT_TYPE, etc */
+ ObjectType objectType; /* OBJECT_TABLE, OBJECT_TYPE, etc */
RangeVar *relation; /* in case it's a table */
List *object; /* in case it's some other object */
List *objarg; /* argument types, if applicable */
NodeTag type;
TransactionStmtKind kind; /* see above */
List *options; /* for BEGIN/START and savepoint commands */
- char *gid; /* for two-phase-commit related commands */
+ char *gid; /* for two-phase-commit related commands */
} TransactionStmt;
/* ----------------------
typedef struct ReindexStmt
{
NodeTag type;
- ObjectType kind; /* OBJECT_INDEX, OBJECT_TABLE,
- * OBJECT_DATABASE */
+ ObjectType kind; /* OBJECT_INDEX, OBJECT_TABLE, OBJECT_DATABASE */
RangeVar *relation; /* Table or index to reindex */
const char *name; /* name of database to reindex */
bool do_system; /* include system tables in database case */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/pg_list.h,v 1.52 2005/07/28 20:26:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/pg_list.h,v 1.53 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
return l ? l->length : 0;
}
-
#else
extern ListCell *list_head(List *l);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.79 2005/04/25 01:30:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.80 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* estimated execution costs for plan (see costsize.c for more info)
*/
- Cost startup_cost; /* cost expended before fetching any
- * tuples */
- Cost total_cost; /* total cost (assuming all tuples
- * fetched) */
+ Cost startup_cost; /* cost expended before fetching any tuples */
+ Cost total_cost; /* total cost (assuming all tuples fetched) */
/*
* planner's estimate of result size of this plan step
/*
* Information for management of parameter-change-driven rescanning
*
- * extParam includes the paramIDs of all external PARAM_EXEC params
- * affecting this plan node or its children. setParam params from the
- * node's initPlans are not included, but their extParams are.
+ * extParam includes the paramIDs of all external PARAM_EXEC params affecting
+ * this plan node or its children. setParam params from the node's
+ * initPlans are not included, but their extParams are.
*
- * allParam includes all the extParam paramIDs, plus the IDs of local
- * params that affect the node (i.e., the setParams of its initplans).
- * These are _all_ the PARAM_EXEC params that affect this node.
+ * allParam includes all the extParam paramIDs, plus the IDs of local params
+ * that affect the node (i.e., the setParams of its initplans). These are
+ * _all_ the PARAM_EXEC params that affect this node.
*/
Bitmapset *extParam;
Bitmapset *allParam;
* resultRelation from Query there and get rid of Query itself from
* Executor. Some other stuff like below could be put there, too.
*/
- int nParamExec; /* Number of them in entire query. This is
- * to get Executor know about how many
- * PARAM_EXEC there are in query plan. */
+ int nParamExec; /* Number of them in entire query. This is to
+ * get Executor know about how many PARAM_EXEC
+ * there are in query plan. */
} Plan;
/* ----------------
* BitmapAnd node -
* Generate the intersection of the results of sub-plans.
*
- * The subplans must be of types that yield tuple bitmaps. The targetlist
+ * The subplans must be of types that yield tuple bitmaps. The targetlist
* and qual fields of the plan are unused and are always NIL.
* ----------------
*/
* BitmapOr node -
* Generate the union of the results of sub-plans.
*
- * The subplans must be of types that yield tuple bitmaps. The targetlist
+ * The subplans must be of types that yield tuple bitmaps. The targetlist
* and qual fields of the plan are unused and are always NIL.
* ----------------
*/
* in the same form it appeared in the query WHERE condition. Each should
* be of the form (indexkey OP comparisonval) or (comparisonval OP indexkey).
* The indexkey is a Var or expression referencing column(s) of the index's
- * base table. The comparisonval might be any expression, but it won't use
+ * base table. The comparisonval might be any expression, but it won't use
* any columns of the base table.
*
* indexqual has the same form, but the expressions have been commuted if
* necessary to put the indexkeys on the left, and the indexkeys are replaced
* by Var nodes identifying the index columns (varattno is the index column
* position, not the base table's column, even though varno is for the base
- * table). This is a bit hokey ... would be cleaner to use a special-purpose
- * node type that could not be mistaken for a regular Var. But it will do
+ * table). This is a bit hokey ... would be cleaner to use a special-purpose
+ * node type that could not be mistaken for a regular Var. But it will do
* for now.
*
* indexstrategy and indexsubtype are lists corresponding one-to-one with
typedef struct IndexScan
{
Scan scan;
- Oid indexid; /* OID of index to scan */
- List *indexqual; /* list of index quals (OpExprs) */
- List *indexqualorig; /* the same in original form */
- List *indexstrategy; /* integer list of strategy numbers */
- List *indexsubtype; /* OID list of strategy subtypes */
+ Oid indexid; /* OID of index to scan */
+ List *indexqual; /* list of index quals (OpExprs) */
+ List *indexqualorig; /* the same in original form */
+ List *indexstrategy; /* integer list of strategy numbers */
+ List *indexsubtype; /* OID list of strategy subtypes */
ScanDirection indexorderdir; /* forward or backward or don't care */
} IndexScan;
* bitmap index scan node
*
* BitmapIndexScan delivers a bitmap of potential tuple locations;
- * it does not access the heap itself. The bitmap is used by an
+ * it does not access the heap itself. The bitmap is used by an
* ancestor BitmapHeapScan node, possibly after passing through
* intermediate BitmapAnd and/or BitmapOr nodes to combine it with
* the results of other BitmapIndexScans.
typedef struct BitmapIndexScan
{
Scan scan;
- Oid indexid; /* OID of index to scan */
- List *indexqual; /* list of index quals (OpExprs) */
- List *indexqualorig; /* the same in original form */
- List *indexstrategy; /* integer list of strategy numbers */
- List *indexsubtype; /* OID list of strategy subtypes */
+ Oid indexid; /* OID of index to scan */
+ List *indexqual; /* list of index quals (OpExprs) */
+ List *indexqualorig; /* the same in original form */
+ List *indexstrategy; /* integer list of strategy numbers */
+ List *indexsubtype; /* OID list of strategy subtypes */
} BitmapIndexScan;
/* ----------------
typedef struct BitmapHeapScan
{
Scan scan;
- List *bitmapqualorig; /* index quals, in standard expr form */
+ List *bitmapqualorig; /* index quals, in standard expr form */
} BitmapHeapScan;
/* ----------------
typedef struct Unique
{
Plan plan;
- int numCols; /* number of columns to check for
- * uniqueness */
+ int numCols; /* number of columns to check for uniqueness */
AttrNumber *uniqColIdx; /* indexes into the target list */
} Unique;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.108 2005/06/26 22:05:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.109 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *catalogname; /* the catalog (database) name, or NULL */
char *schemaname; /* the schema name, or NULL */
char *relname; /* the relation/sequence name */
- InhOption inhOpt; /* expand rel by inheritance? recursively
- * act on children? */
+ InhOption inhOpt; /* expand rel by inheritance? recursively act
+ * on children? */
bool istemp; /* is this a temp relation/sequence? */
Alias *alias; /* table alias & optional column aliases */
} RangeVar;
typedef struct Var
{
Expr xpr;
- Index varno; /* index of this var's relation in the
- * range table (could also be INNER or
- * OUTER) */
- AttrNumber varattno; /* attribute number of this var, or zero
- * for all */
- Oid vartype; /* pg_type tuple OID for the type of this
- * var */
+ Index varno; /* index of this var's relation in the range
+ * table (could also be INNER or OUTER) */
+ AttrNumber varattno; /* attribute number of this var, or zero for
+ * all */
+ Oid vartype; /* pg_type tuple OID for the type of this var */
int32 vartypmod; /* pg_attribute typmod value */
Index varlevelsup;
/*
- * for subquery variables referencing outer relations; 0 in a normal
- * var, >0 means N levels up
+ * for subquery variables referencing outer relations; 0 in a normal var,
+ * >0 means N levels up
*/
Index varnoold; /* original value of varno, for debugging */
AttrNumber varoattno; /* original value of varattno */
Datum constvalue; /* the constant's value */
bool constisnull; /* whether the constant is null (if true,
* constvalue is undefined) */
- bool constbyval; /* whether this datatype is passed by
- * value. If true, then all the
- * information is stored in the Datum. If
- * false, then the Datum contains a
- * pointer to the information. */
+ bool constbyval; /* whether this datatype is passed by value.
+ * If true, then all the information is stored
+ * in the Datum. If false, then the Datum
+ * contains a pointer to the information. */
} Const;
/* ----------------
* operation */
Oid refarraytype; /* type of the array proper */
Oid refelemtype; /* type of the array elements */
- List *refupperindexpr;/* expressions that evaluate to upper
- * array indexes */
- List *reflowerindexpr;/* expressions that evaluate to lower
- * array indexes */
- Expr *refexpr; /* the expression that evaluates to an
- * array value */
- Expr *refassgnexpr; /* expression for the source value, or
- * NULL if fetch */
+ List *refupperindexpr;/* expressions that evaluate to upper array
+ * indexes */
+ List *reflowerindexpr;/* expressions that evaluate to lower array
+ * indexes */
+ Expr *refexpr; /* the expression that evaluates to an array
+ * value */
+ Expr *refassgnexpr; /* expression for the source value, or NULL if
+ * fetch */
} ArrayRef;
/*
{
Expr xpr;
SubLinkType subLinkType; /* EXISTS, ALL, ANY, MULTIEXPR, EXPR */
- bool useOr; /* TRUE to combine column results with
- * "OR" not "AND" */
- List *lefthand; /* list of outer-query expressions on the
- * left */
+ bool useOr; /* TRUE to combine column results with "OR"
+ * not "AND" */
+ List *lefthand; /* list of outer-query expressions on the left */
List *operName; /* originally specified operator name */
List *operOids; /* OIDs of actual combining operators */
Node *subselect; /* subselect as Query* or parsetree */
Expr xpr;
/* Fields copied from original SubLink: */
SubLinkType subLinkType; /* EXISTS, ALL, ANY, MULTIEXPR, EXPR */
- bool useOr; /* TRUE to combine column results with
- * "OR" not "AND" */
+ bool useOr; /* TRUE to combine column results with "OR"
+ * not "AND" */
/* The combining operators, transformed to executable expressions: */
List *exprs; /* list of OpExpr expression trees */
List *paramIds; /* IDs of Params embedded in the above */
/* The subselect, transformed to a Plan: */
struct Plan *plan; /* subselect plan itself */
int plan_id; /* dummy thing because of we haven't equal
- * funcs for plan nodes... actually, we
- * could put *plan itself somewhere else
- * (TopPlan node ?)... */
+ * funcs for plan nodes... actually, we could
+ * put *plan itself somewhere else (TopPlan
+ * node ?)... */
List *rtable; /* range table for subselect */
/* Information about execution strategy: */
- bool useHashTable; /* TRUE to store subselect output in a
- * hash table (implies we are doing "IN") */
- bool unknownEqFalse; /* TRUE if it's okay to return FALSE when
- * the spec result is UNKNOWN; this allows
- * much simpler handling of null values */
+ bool useHashTable; /* TRUE to store subselect output in a hash
+ * table (implies we are doing "IN") */
+ bool unknownEqFalse; /* TRUE if it's okay to return FALSE when the
+ * spec result is UNKNOWN; this allows much
+ * simpler handling of null values */
/* Information for passing params into and out of the subselect: */
/* setParam and parParam are lists of integers (param IDs) */
List *setParam; /* initplan subqueries have to set these
* Params for parent plan */
- List *parParam; /* indices of input Params from parent
- * plan */
+ List *parParam; /* indices of input Params from parent plan */
List *args; /* exprs to pass as parParam values */
} SubPlan;
Oid row_typeid; /* RECORDOID or a composite type's ID */
/*
- * Note: we deliberately do NOT store a typmod. Although a typmod
- * will be associated with specific RECORD types at runtime, it will
- * differ for different backends, and so cannot safely be stored in
- * stored parsetrees. We must assume typmod -1 for a RowExpr node.
+ * Note: we deliberately do NOT store a typmod. Although a typmod will be
+ * associated with specific RECORD types at runtime, it will differ for
+ * different backends, and so cannot safely be stored in stored
+ * parsetrees. We must assume typmod -1 for a RowExpr node.
*/
CoercionForm row_format; /* how to display this node */
} RowExpr;
* clause */
Oid resorigtbl; /* OID of column's source table */
AttrNumber resorigcol; /* column's number in source table */
- bool resjunk; /* set to true to eliminate the attribute
- * from final target list */
+ bool resjunk; /* set to true to eliminate the attribute from
+ * final target list */
} TargetEntry;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.118 2005/08/27 22:13:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.119 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* This struct is conventionally called "root" in all the planner routines.
* It holds links to all of the planner's working state, in addition to the
- * original Query. Note that at present the planner extensively manipulates
+ * original Query. Note that at present the planner extensively manipulates
* the passed-in Query data structure; someday that should stop.
*----------
*/
/*
* base_rel_array holds pointers to "base rels" and "other rels" (see
- * comments for RelOptInfo for more info). It is indexed by rangetable
- * index (so entry 0 is always wasted). Entries can be NULL when
- * an RTE does not correspond to a base relation. Note that the array
- * may be enlarged on-the-fly.
+ * comments for RelOptInfo for more info). It is indexed by rangetable
+ * index (so entry 0 is always wasted). Entries can be NULL when an RTE
+ * does not correspond to a base relation. Note that the array may be
+ * enlarged on-the-fly.
*/
- struct RelOptInfo **base_rel_array; /* All one-relation RelOptInfos */
+ struct RelOptInfo **base_rel_array; /* All one-relation RelOptInfos */
int base_rel_array_size; /* current allocated array len */
/*
* join_rel_list is a list of all join-relation RelOptInfos we have
- * considered in this planning run. For small problems we just scan
- * the list to do lookups, but when there are many join relations we
- * build a hash table for faster lookups. The hash table is present
- * and valid when join_rel_hash is not NULL. Note that we still maintain
- * the list even when using the hash table for lookups; this simplifies
- * life for GEQO.
+ * considered in this planning run. For small problems we just scan the
+ * list to do lookups, but when there are many join relations we build a
+ * hash table for faster lookups. The hash table is present and valid
+ * when join_rel_hash is not NULL. Note that we still maintain the list
+ * even when using the hash table for lookups; this simplifies life for
+ * GEQO.
*/
List *join_rel_list; /* list of join-relation RelOptInfos */
- struct HTAB *join_rel_hash; /* optional hashtable for join relations */
+ struct HTAB *join_rel_hash; /* optional hashtable for join relations */
- List *equi_key_list; /* list of lists of equijoined
- * PathKeyItems */
+ List *equi_key_list; /* list of lists of equijoined PathKeyItems */
- List *left_join_clauses; /* list of RestrictInfos for outer join
- * clauses w/nonnullable var on left */
+ List *left_join_clauses; /* list of RestrictInfos for outer
+ * join clauses w/nonnullable var on
+ * left */
- List *right_join_clauses; /* list of RestrictInfos for outer join
- * clauses w/nonnullable var on right */
+ List *right_join_clauses; /* list of RestrictInfos for outer
+ * join clauses w/nonnullable var on
+ * right */
- List *full_join_clauses; /* list of RestrictInfos for full outer
- * join clauses */
+ List *full_join_clauses; /* list of RestrictInfos for full
+ * outer join clauses */
List *in_info_list; /* list of InClauseInfos */
- List *query_pathkeys; /* desired pathkeys for query_planner(),
- * and actual pathkeys afterwards */
+ List *query_pathkeys; /* desired pathkeys for query_planner(), and
+ * actual pathkeys afterwards */
List *group_pathkeys; /* groupClause pathkeys, if any */
List *sort_pathkeys; /* sortClause pathkeys, if any */
- double tuple_fraction; /* tuple_fraction passed to query_planner */
+ double tuple_fraction; /* tuple_fraction passed to query_planner */
bool hasJoinRTEs; /* true if any RTEs are RTE_JOIN kind */
bool hasOuterJoins; /* true if any RTEs are outer joins */
Relids *attr_needed; /* array indexed [min_attr .. max_attr] */
int32 *attr_widths; /* array indexed [min_attr .. max_attr] */
List *indexlist;
- BlockNumber pages;
+ BlockNumber pages;
double tuples;
struct Plan *subplan; /* if subquery */
/* used by various scans and joins: */
- List *baserestrictinfo; /* RestrictInfo structures (if
- * base rel) */
+ List *baserestrictinfo; /* RestrictInfo structures (if base
+ * rel) */
QualCost baserestrictcost; /* cost of evaluating the above */
Relids outerjoinset; /* set of base relids */
List *joininfo; /* RestrictInfo structures for join clauses
/*
* Inner indexscans are not in the main pathlist because they are not
- * usable except in specific join contexts. We use the
- * index_inner_paths list just to avoid recomputing the best inner
- * indexscan repeatedly for similar outer relations. See comments for
- * InnerIndexscanInfo.
+ * usable except in specific join contexts. We use the index_inner_paths
+ * list just to avoid recomputing the best inner indexscan repeatedly for
+ * similar outer relations. See comments for InnerIndexscanInfo.
*/
} RelOptInfo;
RelOptInfo *rel; /* back-link to index's table */
/* statistics from pg_class */
- BlockNumber pages; /* number of disk pages in index */
+ BlockNumber pages; /* number of disk pages in index */
double tuples; /* number of index tuples in index */
/* index descriptor information */
RegProcedure amcostestimate; /* OID of the access method's cost fcn */
- List *indexprs; /* expressions for non-simple index
- * columns */
+ List *indexprs; /* expressions for non-simple index columns */
List *indpred; /* predicate if a partial index, else NIL */
bool predOK; /* true if predicate matches query */
Oid sortop; /* the ordering operator ('<' op) */
/*
- * key typically points to a Var node, ie a relation attribute, but it
- * can also point to an arbitrary expression representing the value
- * indexed by an index expression.
+ * key typically points to a Var node, ie a relation attribute, but it can
+ * also point to an arbitrary expression representing the value indexed by
+ * an index expression.
*/
} PathKeyItem;
RelOptInfo *parent; /* the relation this path can build */
/* estimated execution costs for path (see costsize.c for more info) */
- Cost startup_cost; /* cost expended before fetching any
- * tuples */
- Cost total_cost; /* total cost (assuming all tuples
- * fetched) */
+ Cost startup_cost; /* cost expended before fetching any tuples */
+ Cost total_cost; /* total cost (assuming all tuples fetched) */
List *pathkeys; /* sort ordering of path's output */
/* pathkeys is a List of Lists of PathKeyItem nodes; see above */
*
* The individual indexscans are represented by IndexPath nodes, and any
* logic on top of them is represented by a tree of BitmapAndPath and
- * BitmapOrPath nodes. Notice that we can use the same IndexPath node both
+ * BitmapOrPath nodes. Notice that we can use the same IndexPath node both
* to represent a regular IndexScan plan, and as the child of a BitmapHeapPath
* that represents scanning the same index using a BitmapIndexScan. The
* startup_cost and total_cost figures of an IndexPath always represent the
- * costs to use it as a regular IndexScan. The costs of a BitmapIndexScan
+ * costs to use it as a regular IndexScan. The costs of a BitmapIndexScan
* can be computed using the IndexPath's indextotalcost and indexselectivity.
*
* BitmapHeapPaths can be nestloop inner indexscans. The isjoininner and
typedef struct BitmapAndPath
{
Path path;
- List *bitmapquals; /* IndexPaths and BitmapOrPaths */
+ List *bitmapquals; /* IndexPaths and BitmapOrPaths */
Selectivity bitmapselectivity;
} BitmapAndPath;
typedef struct BitmapOrPath
{
Path path;
- List *bitmapquals; /* IndexPaths and BitmapAndPaths */
+ List *bitmapquals; /* IndexPaths and BitmapAndPaths */
Selectivity bitmapselectivity;
} BitmapOrPath;
typedef struct MergePath
{
JoinPath jpath;
- List *path_mergeclauses; /* join clauses to be used for
- * merge */
+ List *path_mergeclauses; /* join clauses to be used for merge */
List *outersortkeys; /* keys for explicit sort, if any */
List *innersortkeys; /* keys for explicit sort, if any */
} MergePath;
* that appeared higher in the tree and were pushed down to the join rel
* because they used no other rels. That's what the is_pushed_down flag is
* for; it tells us that a qual came from a point above the join of the
- * set of base rels listed in required_relids. A clause that originally came
+ * set of base rels listed in required_relids. A clause that originally came
* from WHERE will *always* have its is_pushed_down flag set; a clause that
* came from an INNER JOIN condition, but doesn't use all the rels being
* joined, will also have is_pushed_down set because it will get attached to
bool is_pushed_down; /* TRUE if clause was pushed down in level */
/*
- * This flag is set true if the clause looks potentially useful as a
- * merge or hash join clause, that is if it is a binary opclause with
- * nonoverlapping sets of relids referenced in the left and right
- * sides. (Whether the operator is actually merge or hash joinable
- * isn't checked, however.)
+ * This flag is set true if the clause looks potentially useful as a merge
+ * or hash join clause, that is if it is a binary opclause with
+ * nonoverlapping sets of relids referenced in the left and right sides.
+ * (Whether the operator is actually merge or hash joinable isn't checked,
+ * however.)
*/
bool can_join;
List *sub_targetlist; /* targetlist of original RHS subquery */
/*
- * Note: sub_targetlist is just a list of Vars or expressions; it does
- * not contain TargetEntry nodes.
+ * Note: sub_targetlist is just a list of Vars or expressions; it does not
+ * contain TargetEntry nodes.
*/
} InClauseInfo;
*
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/nodes/tidbitmap.h,v 1.2 2005/08/28 22:47:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/tidbitmap.h,v 1.3 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
- * Actual bitmap representation is private to tidbitmap.c. Callers can
+ * Actual bitmap representation is private to tidbitmap.c. Callers can
* do IsA(x, TIDBitmap) on it, but nothing else.
*/
typedef struct TIDBitmap TIDBitmap;
/* Result structure for tbm_iterate */
typedef struct
{
- BlockNumber blockno; /* page number containing tuples */
+ BlockNumber blockno; /* page number containing tuples */
int ntuples; /* -1 indicates lossy result */
OffsetNumber offsets[1]; /* VARIABLE LENGTH ARRAY */
} TBMIterateResult; /* VARIABLE LENGTH STRUCT */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/clauses.h,v 1.79 2005/05/22 22:30:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/clauses.h,v 1.80 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct
{
- int numAggs; /* total number of aggregate calls */
+ int numAggs; /* total number of aggregate calls */
int numDistinctAggs; /* number that use DISTINCT */
Size transitionSpace; /* for pass-by-ref transition data */
} AggClauseCounts;
#define QTW_DONT_COPY_QUERY 0x04 /* do not copy top Query */
extern bool query_tree_walker(Query *query, bool (*walker) (),
- void *context, int flags);
+ void *context, int flags);
extern Query *query_tree_mutator(Query *query, Node *(*mutator) (),
- void *context, int flags);
+ void *context, int flags);
extern bool range_table_walker(List *rtable, bool (*walker) (),
- void *context, int flags);
+ void *context, int flags);
extern List *range_table_mutator(List *rtable, Node *(*mutator) (),
- void *context, int flags);
+ void *context, int flags);
extern bool query_or_expression_tree_walker(Node *node, bool (*walker) (),
- void *context, int flags);
+ void *context, int flags);
extern Node *query_or_expression_tree_mutator(Node *node, Node *(*mutator) (),
- void *context, int flags);
+ void *context, int flags);
#endif /* CLAUSES_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/cost.h,v 1.70 2005/08/22 17:35:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/cost.h,v 1.71 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void cost_index(IndexPath *path, PlannerInfo *root, IndexOptInfo *index,
List *indexQuals, bool is_injoin);
extern void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
- Path *bitmapqual, bool is_injoin);
+ Path *bitmapqual, bool is_injoin);
extern void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root);
extern void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root);
extern void cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/geqo.h,v 1.39 2005/06/05 22:32:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/geqo.h,v 1.40 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* If you change these, update backend/utils/misc/postgresql.sample.conf
*/
-extern int Geqo_effort; /* 1 .. 10, knob for adjustment of
- * defaults */
+extern int Geqo_effort; /* 1 .. 10, knob for adjustment of defaults */
#define DEFAULT_GEQO_EFFORT 5
#define MIN_GEQO_EFFORT 1
/* routines in geqo_main.c */
extern RelOptInfo *geqo(PlannerInfo *root,
- int number_of_rels, List *initial_rels);
+ int number_of_rels, List *initial_rels);
/* routines in geqo_eval.c */
extern Cost geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/pathnode.h,v 1.61 2005/06/05 22:32:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/pathnode.h,v 1.62 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ScanDirection indexscandir,
bool isjoininner);
extern BitmapHeapPath *create_bitmap_heap_path(PlannerInfo *root,
- RelOptInfo *rel,
- Path *bitmapqual,
- bool isjoininner);
+ RelOptInfo *rel,
+ Path *bitmapqual,
+ bool isjoininner);
extern BitmapAndPath *create_bitmap_and_path(PlannerInfo *root,
- RelOptInfo *rel,
- List *bitmapquals);
+ RelOptInfo *rel,
+ List *bitmapquals);
extern BitmapOrPath *create_bitmap_or_path(PlannerInfo *root,
- RelOptInfo *rel,
- List *bitmapquals);
+ RelOptInfo *rel,
+ List *bitmapquals);
extern TidPath *create_tidscan_path(PlannerInfo *root, RelOptInfo *rel,
List *tideval);
extern AppendPath *create_append_path(RelOptInfo *rel, List *subpaths);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/paths.h,v 1.87 2005/08/27 22:13:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/paths.h,v 1.88 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Path *best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
Relids outer_relids, JoinType jointype);
extern List *group_clauses_by_indexkey(IndexOptInfo *index,
- List *clauses, List *outer_clauses,
- Relids outer_relids,
- bool *found_clause);
+ List *clauses, List *outer_clauses,
+ Relids outer_relids,
+ bool *found_clause);
extern bool match_index_to_operand(Node *operand, int indexcol,
IndexOptInfo *index);
extern List *expand_indexqual_conditions(IndexOptInfo *index,
- List *clausegroups);
+ List *clausegroups);
extern void check_partial_indexes(PlannerInfo *root, RelOptInfo *rel);
extern List *flatten_clausegroups_list(List *clausegroups);
extern List *build_index_pathkeys(PlannerInfo *root, IndexOptInfo *index,
ScanDirection scandir);
extern List *convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
- List *subquery_pathkeys);
+ List *subquery_pathkeys);
extern List *build_join_pathkeys(PlannerInfo *root,
RelOptInfo *joinrel,
JoinType jointype,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/planmain.h,v 1.89 2005/09/28 21:17:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/planmain.h,v 1.90 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* prototypes for plan/planmain.c
*/
extern void query_planner(PlannerInfo *root, List *tlist,
- double tuple_fraction,
- Path **cheapest_path, Path **sorted_path,
- double *num_groups);
+ double tuple_fraction,
+ Path **cheapest_path, Path **sorted_path,
+ double *num_groups);
/*
* prototypes for plan/planagg.c
*/
extern Plan *optimize_minmax_aggregates(PlannerInfo *root, List *tlist,
- Path *best_path);
+ Path *best_path);
/*
* prototypes for plan/createplan.c
extern Plan *materialize_finished_plan(Plan *subplan);
extern Unique *make_unique(Plan *lefttree, List *distinctList);
extern Limit *make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount,
- int offset_est, int count_est);
+ int offset_est, int count_est);
extern SetOp *make_setop(SetOpCmd cmd, Plan *lefttree,
List *distinctList, AttrNumber flagColIdx);
extern Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan);
extern void add_base_rels_to_query(PlannerInfo *root, Node *jtnode);
extern void build_base_rel_tlists(PlannerInfo *root, List *final_tlist);
extern Relids distribute_quals_to_rels(PlannerInfo *root, Node *jtnode,
- bool below_outer_join);
+ bool below_outer_join);
extern void process_implied_equality(PlannerInfo *root,
Node *item1, Node *item2,
Oid sortop1, Oid sortop2,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/planner.h,v 1.33 2005/06/05 22:32:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/planner.h,v 1.34 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Plan *planner(Query *parse, bool isCursor, int cursorOptions,
ParamListInfo boundParams);
extern Plan *subquery_planner(Query *parse, double tuple_fraction,
- List **subquery_pathkeys);
+ List **subquery_pathkeys);
#endif /* PLANNER_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/predtest.h,v 1.2 2005/07/23 21:05:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/predtest.h,v 1.3 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern bool predicate_implied_by(List *predicate_list,
- List *restrictinfo_list);
+ List *restrictinfo_list);
extern bool predicate_refuted_by(List *predicate_list,
- List *restrictinfo_list);
+ List *restrictinfo_list);
#endif /* PREDTEST_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/prep.h,v 1.51 2005/06/10 02:21:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/prep.h,v 1.52 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* prototypes for prepunion.c
*/
extern Plan *plan_set_operations(PlannerInfo *root, double tuple_fraction,
- List **sortClauses);
+ List **sortClauses);
extern List *find_all_inheritors(Oid parentrel);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/restrictinfo.h,v 1.33 2005/07/28 20:26:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/restrictinfo.h,v 1.34 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern RestrictInfo *make_restrictinfo(Expr *clause,
- bool is_pushed_down,
- Relids required_relids);
+ bool is_pushed_down,
+ Relids required_relids);
extern List *make_restrictinfo_from_bitmapqual(Path *bitmapqual,
- bool is_pushed_down,
- bool include_predicates);
+ bool is_pushed_down,
+ bool include_predicates);
extern bool restriction_is_or_clause(RestrictInfo *restrictinfo);
extern List *get_actual_clauses(List *restrictinfo_list);
extern void get_actual_join_clauses(List *restrictinfo_list,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/subselect.h,v 1.25 2005/06/05 22:32:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/subselect.h,v 1.26 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Node *SS_process_sublinks(Node *expr, bool isQual);
extern void SS_finalize_plan(Plan *plan, List *rtable);
extern Param *SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan,
- Oid resulttype, int32 resulttypmod);
+ Oid resulttype, int32 resulttypmod);
#endif /* SUBSELECT_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_node.h,v 1.45 2005/08/01 20:31:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_node.h,v 1.46 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *p_rtable; /* range table so far */
List *p_joinlist; /* join items so far (will become FromExpr
* node's fromlist) */
- List *p_relnamespace; /* current namespace for relations */
- List *p_varnamespace; /* current namespace for columns */
+ List *p_relnamespace; /* current namespace for relations */
+ List *p_varnamespace; /* current namespace for columns */
Oid *p_paramtypes; /* OIDs of types for $n parameter symbols */
int p_numparams; /* allocated size of p_paramtypes[] */
int p_next_resno; /* next targetlist resno to assign */
LockingClause *p_locking_clause; /* FOR UPDATE/FOR SHARE info */
- Node *p_value_substitute; /* what to replace VALUE with,
- * if any */
+ Node *p_value_substitute; /* what to replace VALUE with, if any */
bool p_variableparams;
bool p_hasAggs;
bool p_hasSubLinks;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_target.h,v 1.36 2005/05/31 01:03:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_target.h,v 1.37 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern List *checkInsertTargets(ParseState *pstate, List *cols,
List **attrnos);
extern TupleDesc expandRecordVariable(ParseState *pstate, Var *var,
- int levelsup);
+ int levelsup);
extern char *FigureColname(Node *node);
#endif /* PARSE_TARGET_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parsetree.h,v 1.30 2005/06/03 23:05:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parsetree.h,v 1.31 2005/10/15 02:49:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* get_rte_attribute_type will fail on such an attr)
*/
extern bool get_rte_attribute_is_dropped(RangeTblEntry *rte,
- AttrNumber attnum);
+ AttrNumber attnum);
/* ----------------
*
* Copyright (c) 2001-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/pgstat.h,v 1.37 2005/10/06 02:29:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/pgstat.h,v 1.38 2005/10/15 02:49:41 momjian Exp $
* ----------
*/
#ifndef PGSTAT_H
*/
typedef struct PgStat_MsgHdr
{
- StatMsgType m_type;
+ StatMsgType m_type;
int m_size;
int m_backendid;
int m_procpid;
*/
typedef struct PgStat_MsgBestart
{
- PgStat_MsgHdr m_hdr;
- Oid m_databaseid;
- Oid m_userid;
- SockAddr m_clientaddr;
+ PgStat_MsgHdr m_hdr;
+ Oid m_databaseid;
+ Oid m_userid;
+ SockAddr m_clientaddr;
} PgStat_MsgBestart;
/* ----------
/* ----------
* PgStat_MsgAutovacStart Sent by the autovacuum daemon to signal
- * that a database is going to be processed
+ * that a database is going to be processed
* ----------
*/
typedef struct PgStat_MsgAutovacStart
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
- TimestampTz m_start_time;
+ TimestampTz m_start_time;
} PgStat_MsgAutovacStart;
/* ----------
* PgStat_MsgVacuum Sent by the backend or autovacuum daemon
- * after VACUUM or VACUUM ANALYZE
+ * after VACUUM or VACUUM ANALYZE
* ----------
*/
typedef struct PgStat_MsgVacuum
/* ----------
* PgStat_MsgAnalyze Sent by the backend or autovacuum daemon
- * after ANALYZE
+ * after ANALYZE
* ----------
*/
typedef struct PgStat_MsgAnalyze
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
Oid m_tableoid;
- PgStat_Counter m_live_tuples;
- PgStat_Counter m_dead_tuples;
+ PgStat_Counter m_live_tuples;
+ PgStat_Counter m_dead_tuples;
} PgStat_MsgAnalyze;
PgStat_Counter n_blocks_fetched;
PgStat_Counter n_blocks_hit;
int destroy;
- TimestampTz last_autovac_time;
+ TimestampTz last_autovac_time;
} PgStat_StatDBEntry;
{
/* An entry is non-empty iff procpid > 0 */
int procpid;
- TimestampTz start_timestamp;
- TimestampTz activity_start_timestamp;
+ TimestampTz start_timestamp;
+ TimestampTz activity_start_timestamp;
char activity[PGSTAT_ACTIVITY_SIZE];
/*
- * The following fields are initialized by the BESTART message. If
- * we have received messages from a backend before we have
- * received its BESTART, these fields will be uninitialized:
- * userid and databaseid will be InvalidOid, and clientaddr will
- * be undefined.
+ * The following fields are initialized by the BESTART message. If we have
+ * received messages from a backend before we have received its BESTART,
+ * these fields will be uninitialized: userid and databaseid will be
+ * InvalidOid, and clientaddr will be undefined.
*/
Oid userid;
Oid databaseid;
- SockAddr clientaddr;
+ SockAddr clientaddr;
} PgStat_StatBeEntry;
extern void pgstat_report_tabstat(void);
extern void pgstat_report_autovac(Oid dboid);
extern void pgstat_report_vacuum(Oid tableoid, bool shared,
- bool analyze, PgStat_Counter tuples);
+ bool analyze, PgStat_Counter tuples);
extern void pgstat_report_analyze(Oid tableoid, bool shared,
- PgStat_Counter livetuples,
- PgStat_Counter deadtuples);
+ PgStat_Counter livetuples,
+ PgStat_Counter deadtuples);
extern int pgstat_vacuum_tabstat(void);
extern void pgstat_reset_counters(void);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/include/pgtime.h,v 1.10 2005/09/09 02:31:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/pgtime.h,v 1.11 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern struct pg_tm *pg_localtime(const pg_time_t *timep, const pg_tz *tz);
extern struct pg_tm *pg_gmtime(const pg_time_t *timep);
-extern int pg_next_dst_boundary(const pg_time_t *timep,
- long int *before_gmtoff,
- int *before_isdst,
- pg_time_t *boundary,
- long int *after_gmtoff,
- int *after_isdst,
- const pg_tz *tz);
+extern int pg_next_dst_boundary(const pg_time_t *timep,
+ long int *before_gmtoff,
+ int *before_isdst,
+ pg_time_t *boundary,
+ long int *after_gmtoff,
+ int *after_isdst,
+ const pg_tz *tz);
extern size_t pg_strftime(char *s, size_t max, const char *format,
- const struct pg_tm *tm);
+ const struct pg_tm * tm);
extern void pg_timezone_initialize(void);
extern pg_tz *pg_tzset(const char *tzname);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/port.h,v 1.83 2005/09/27 17:39:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/port.h,v 1.84 2005/10/15 02:49:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern char *last_dir_separator(const char *filename);
extern char *first_path_separator(const char *pathlist);
extern void join_path_components(char *ret_path,
- const char *head, const char *tail);
+ const char *head, const char *tail);
extern void canonicalize_path(char *path);
extern void make_native_path(char *path);
extern bool path_contains_parent_reference(const char *path);
extern unsigned char pg_tolower(unsigned char ch);
#ifdef USE_SNPRINTF
-extern int pg_vsnprintf(char *str, size_t count, const char *fmt, va_list args);
-extern int pg_snprintf(char *str, size_t count, const char *fmt,...)
+extern int pg_vsnprintf(char *str, size_t count, const char *fmt, va_list args);
+extern int
+pg_snprintf(char *str, size_t count, const char *fmt,...)
/* This extension allows gcc to check the format string */
__attribute__((format(printf, 3, 4)));
-extern int pg_sprintf(char *str, const char *fmt,...)
+extern int
+pg_sprintf(char *str, const char *fmt,...)
/* This extension allows gcc to check the format string */
__attribute__((format(printf, 2, 3)));
-extern int pg_fprintf(FILE *stream, const char *fmt,...)
+extern int
+pg_fprintf(FILE *stream, const char *fmt,...)
/* This extension allows gcc to check the format string */
__attribute__((format(printf, 2, 3)));
-extern int pg_printf(const char *fmt,...)
+extern int
+pg_printf(const char *fmt,...)
/* This extension allows gcc to check the format string */
__attribute__((format(printf, 1, 2)));
* know anything about pg_printf.
*/
#ifdef __GNUC__
-#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__)
+#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__)
#define snprintf(...) pg_snprintf(__VA_ARGS__)
#define sprintf(...) pg_sprintf(__VA_ARGS__)
#define fprintf(...) pg_fprintf(__VA_ARGS__)
*/
extern int pgrename(const char *from, const char *to);
extern int pgunlink(const char *path);
+
/* Include this first so later includes don't see these defines */
#ifdef WIN32_CLIENT_ONLY
#include
* Cygwin has its own symlinks which work on Win95/98/ME where
* junction points don't, so use it instead. We have no way of
* knowing what type of system Cygwin binaries will be run on.
- * Note: Some CYGWIN includes might #define WIN32.
+ * Note: Some CYGWIN includes might #define WIN32.
*/
#if defined(WIN32) && !defined(__CYGWIN__)
extern int pgsymlink(const char *oldpath, const char *newpath);
+
#define symlink(oldpath, newpath) pgsymlink(oldpath, newpath)
#endif
-
-#endif /* defined(WIN32) || defined(__CYGWIN__) */
+#endif /* defined(WIN32) || defined(__CYGWIN__) */
extern void copydir(char *fromdir, char *todir, bool recurse);
/* Last parameter not used */
extern int gettimeofday(struct timeval * tp, struct timezone * tzp);
-
-#else /* !WIN32 */
+#else /* !WIN32 */
/*
* Win32 requires a special close for sockets and pipes, while on Unix
* close() does them all.
*/
#define closesocket close
-#endif /* WIN32 */
+#endif /* WIN32 */
/*
* Default "extern" declarations or macro substitutes for library routines.
#define __darwin__ 1
#define HAVE_FSYNC_WRITETHROUGH
-
#ifndef BYTE_ORDER
#define BYTE_ORDER BIG_ENDIAN
#endif
-
#elif defined(__ia64)
/* HPUX runs IA64 in big-endian mode */
#ifndef BYTE_ORDER
#define BYTE_ORDER BIG_ENDIAN
#endif
-
#else
#error unrecognized CPU type for HP-UX
* warning-free compilation.
*/
-#include /* Declare various types, e.g. size_t,
- * fd_set */
+#include /* Declare various types, e.g. size_t, fd_set */
extern int fp_class_d(double);
extern long random(void);
-/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.46 2005/06/16 17:53:54 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.47 2005/10/15 02:49:45 momjian Exp $ */
/* undefine and redefine after #include */
#undef mkdir
#else /* not BUILDING_DLL */
#define DLLIMPORT __declspec (dllimport)
#endif
-
#elif defined(WIN32_CLIENT_ONLY)
#if defined(_DLL)
#else /* not _DLL */
#define DLLIMPORT __declspec (dllimport)
#endif
-
#else /* not CYGWIN, not MSVC, not MingW */
#define DLLIMPORT
#define SIGHUP 1
#define SIGQUIT 3
#define SIGTRAP 5
-#define SIGABRT 22 /* Set to match W32 value -- not UNIX
- * value */
+#define SIGABRT 22 /* Set to match W32 value -- not UNIX value */
#define SIGKILL 9
#define SIGPIPE 13
#define SIGALRM 14
/*
* Supplement to .
*/
-#define lstat(path, sb) stat((path), (sb))
+#define lstat(path, sb) stat((path), (sb))
/*
* Supplement to .
int pgwin32_send(SOCKET s, char *buf, int len, int flags);
const char *pgwin32_socket_strerror(int err);
-int pgwin32_waitforsinglesocket(SOCKET s, int what);
+int pgwin32_waitforsinglesocket(SOCKET s, int what);
/* in backend/port/win32/security.c */
extern int pgwin32_is_admin(void);
#ifdef PGERROR
#define ERROR PGERROR
-/*
+/*
* we can't use the windows gai_strerror{AW} functions because
* they are defined inline in the MS header files. So we'll use our
* own
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/postmaster/autovacuum.h,v 1.2 2005/08/11 21:11:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/postmaster/autovacuum.h,v 1.3 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define AUTOVACUUM_H
/* GUC variables */
-extern bool autovacuum_start_daemon;
-extern int autovacuum_naptime;
-extern int autovacuum_vac_thresh;
-extern double autovacuum_vac_scale;
-extern int autovacuum_anl_thresh;
-extern double autovacuum_anl_scale;
-extern int autovacuum_vac_cost_delay;
-extern int autovacuum_vac_cost_limit;
+extern bool autovacuum_start_daemon;
+extern int autovacuum_naptime;
+extern int autovacuum_vac_thresh;
+extern double autovacuum_vac_scale;
+extern int autovacuum_anl_thresh;
+extern double autovacuum_anl_scale;
+extern int autovacuum_vac_cost_delay;
+extern int autovacuum_vac_cost_limit;
/* Status inquiry functions */
extern bool AutoVacuumingActive(void);
/* Functions to start autovacuum process, called from postmaster */
extern void autovac_init(void);
-extern int autovac_start(void);
+extern int autovac_start(void);
extern void autovac_stopped(void);
#ifdef EXEC_BACKEND
extern void AutoVacMain(int argc, char *argv[]);
#endif
-#endif /* AUTOVACUUM_H */
+#endif /* AUTOVACUUM_H */
*
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/postmaster/fork_process.h,v 1.2 2005/03/13 23:32:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/postmaster/fork_process.h,v 1.3 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern pid_t fork_process(void);
-#endif /* FORK_PROCESS_H */
+#endif /* FORK_PROCESS_H */
*
* Copyright (c) 2004-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/postmaster/syslogger.h,v 1.4 2005/01/01 20:44:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/postmaster/syslogger.h,v 1.5 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifndef WIN32
extern int syslogPipe[2];
-
#else
extern HANDLE syslogPipe[2];
#endif
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/include/regex/regcustom.h,v 1.4 2004/05/07 00:24:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/regex/regcustom.h,v 1.5 2005/10/15 02:49:46 momjian Exp $
*/
/* headers if any */
/* internal character type and related */
typedef pg_wchar chr; /* the type itself */
typedef unsigned uchr; /* unsigned type that will hold a chr */
-typedef int celt; /* type to hold chr, MCCE number, or
- * NOCELT */
+typedef int celt; /* type to hold chr, MCCE number, or NOCELT */
-#define NOCELT (-1) /* celt value which is not valid chr or
- * MCCE */
-#define CHR(c) ((unsigned char) (c)) /* turn char literal into chr
- * literal */
+#define NOCELT (-1) /* celt value which is not valid chr or MCCE */
+#define CHR(c) ((unsigned char) (c)) /* turn char literal into chr literal */
#define DIGITVAL(c) ((c)-'0') /* turn chr digit into its value */
#define CHRBITS 32 /* bits in a chr; must not use sizeof */
#define CHR_MIN 0x00000000 /* smallest and largest chr; the value */
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/include/regex/regex.h,v 1.27 2005/07/10 04:54:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/regex/regex.h,v 1.28 2005/10/15 02:49:46 momjian Exp $
*/
/*
#define REG_NLANCH 000200 /* ^ matches after \n, $ before */
#define REG_NEWLINE 000300 /* newlines are line terminators */
#define REG_PEND 000400 /* ugh -- backward-compatibility hack */
-#define REG_EXPECT 001000 /* report details on partial/limited
- * matches */
+#define REG_EXPECT 001000 /* report details on partial/limited matches */
#define REG_BOSONLY 002000 /* temporary kludge for BOS-only matches */
#define REG_DUMP 004000 /* none of your business :-) */
#define REG_FAKE 010000 /* none of your business :-) */
#define REG_BADRPT 13 /* quantifier operand invalid */
#define REG_ASSERT 15 /* "can't happen" -- you found a bug */
#define REG_INVARG 16 /* invalid argument to regex function */
-#define REG_MIXED 17 /* character widths of regex and string
- * differ */
+#define REG_MIXED 17 /* character widths of regex and string differ */
#define REG_BADOPT 18 /* invalid embedded option */
/* two specials for debugging and testing */
#define REG_ATOI 101 /* convert error-code name to number */
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/include/regex/regguts.h,v 1.4 2005/05/25 21:40:42 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/regex/regguts.h,v 1.5 2005/10/15 02:49:46 momjian Exp $
*/
#ifndef BYTBITS
#define BYTBITS 8 /* bits in a byt */
#endif
-#define BYTTAB (1<
- * value */
+#define BYTTAB (1<
#define BYTMASK (BYTTAB-1) /* bit mask for byt */
#define NBYTS ((CHRBITS+BYTBITS-1)/BYTBITS)
/* the definition of GETCOLOR(), below, assumes NBYTS <= 4 */
struct state *tmp; /* temporary for traversal algorithms */
struct state *next; /* chain for traversing all */
struct state *prev; /* back chain */
- struct arcbatch oas; /* first arcbatch, avoid malloc in easy
- * case */
+ struct arcbatch oas; /* first arcbatch, avoid malloc in easy case */
int noas; /* number of arcs used in first arcbatch */
};
*/
struct subre
{
- char op; /* '|', '.' (concat), 'b' (backref), '(',
- * '=' */
+ char op; /* '|', '.' (concat), 'b' (backref), '(', '=' */
char flags;
#define LONGER 01 /* prefers longer match */
#define SHORTER 02 /* prefers shorter match */
int subno; /* subexpression number (for 'b' and '(') */
short min; /* min repetitions, for backref only */
short max; /* max repetitions, for backref only */
- struct subre *left; /* left child, if any (also freelist
- * chain) */
+ struct subre *left; /* left child, if any (also freelist chain) */
struct subre *right; /* right child, if any */
struct state *begin; /* outarcs from here... */
struct state *end; /* ...ending in inarcs here */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/backendid.h,v 1.17 2004/12/31 22:03:42 pgsql Exp $
+ * $PostgreSQL: pgsql/src/include/storage/backendid.h,v 1.18 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* -cim 8/17/90
* ----------------
*/
-typedef int BackendId; /* unique currently active backend
- * identifier */
+typedef int BackendId; /* unique currently active backend identifier */
#define InvalidBackendId (-1)
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.80 2005/10/12 16:45:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.81 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define BM_DIRTY (1 << 0) /* data needs writing */
#define BM_VALID (1 << 1) /* data is valid */
#define BM_TAG_VALID (1 << 2) /* tag is assigned */
-#define BM_IO_IN_PROGRESS (1 << 3) /* read or write in
- * progress */
+#define BM_IO_IN_PROGRESS (1 << 3) /* read or write in progress */
#define BM_IO_ERROR (1 << 4) /* previous I/O failed */
-#define BM_JUST_DIRTIED (1 << 5) /* dirtied since write
- * started */
-#define BM_PIN_COUNT_WAITER (1 << 6) /* have waiter for sole
- * pin */
+#define BM_JUST_DIRTIED (1 << 5) /* dirtied since write started */
+#define BM_PIN_COUNT_WAITER (1 << 6) /* have waiter for sole pin */
typedef bits16 BufFlags;
*
* Note: buf_hdr_lock must be held to examine or change the tag, flags,
* usage_count, refcount, or wait_backend_pid fields. buf_id field never
- * changes after initialization, so does not need locking. freeNext is
+ * changes after initialization, so does not need locking. freeNext is
* protected by the BufFreelistLock not buf_hdr_lock. The LWLocks can take
- * care of themselves. The buf_hdr_lock is *not* used to control access to
+ * care of themselves. The buf_hdr_lock is *not* used to control access to
* the data in the buffer!
*
* An exception is that if we have the buffer pinned, its tag can't change
*
* We can't physically remove items from a disk page if another backend has
* the buffer pinned. Hence, a backend may need to wait for all other pins
- * to go away. This is signaled by storing its own PID into
+ * to go away. This is signaled by storing its own PID into
* wait_backend_pid and setting flag bit BM_PIN_COUNT_WAITER. At present,
* there can be only one such waiter per buffer.
*
BufFlags flags; /* see bit definitions above */
uint16 usage_count; /* usage counter for clock sweep code */
unsigned refcount; /* # of backends holding pins on buffer */
- int wait_backend_pid; /* backend PID of pin-count waiter */
+ int wait_backend_pid; /* backend PID of pin-count waiter */
slock_t buf_hdr_lock; /* protects the above fields */
* ensure that the compiler doesn't rearrange accesses to the header to
* occur before or after the spinlock is acquired/released.
*/
-#define LockBufHdr(bufHdr) \
+#define LockBufHdr(bufHdr) \
SpinLockAcquire(&(bufHdr)->buf_hdr_lock)
#define UnlockBufHdr(bufHdr) \
SpinLockRelease(&(bufHdr)->buf_hdr_lock)
#define LockBufHdr_NoHoldoff(bufHdr) \
SpinLockAcquire_NoHoldoff(&(bufHdr)->buf_hdr_lock)
-#define UnlockBufHdr_NoHoldoff(bufHdr) \
+#define UnlockBufHdr_NoHoldoff(bufHdr) \
SpinLockRelease_NoHoldoff(&(bufHdr)->buf_hdr_lock)
extern void StrategyInitialize(bool init);
/* buf_table.c */
-extern Size BufTableShmemSize(int size);
+extern Size BufTableShmemSize(int size);
extern void InitBufTable(int size);
extern int BufTableLookup(BufferTag *tagPtr);
extern int BufTableInsert(BufferTag *tagPtr, int buf_id);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/bufmgr.h,v 1.96 2005/08/20 23:26:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/bufmgr.h,v 1.97 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void AbortBufferIO(void);
extern void BufmgrCommit(void);
-extern void BufferSync(void);
+extern void BufferSync(void);
extern void BgBufferSync(void);
extern void AtProcExit_LocalBuffers(void);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/fd.h,v 1.53 2005/08/08 03:12:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/fd.h,v 1.54 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void closeAllVfds(void);
extern void AtEOXact_Files(void);
extern void AtEOSubXact_Files(bool isCommit, SubTransactionId mySubid,
- SubTransactionId parentSubid);
+ SubTransactionId parentSubid);
extern void RemovePgTempFiles(void);
extern int pg_fsync(int fd);
extern int pg_fsync_no_writethrough(int fd);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/itemptr.h,v 1.26 2004/12/31 22:03:42 pgsql Exp $
+ * $PostgreSQL: pgsql/src/include/storage/itemptr.h,v 1.27 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
#ifdef __arm__
-__attribute__((packed)) /* Appropriate whack upside the head for
- * ARM */
+__attribute__((packed)) /* Appropriate whack upside the head for ARM */
#endif
ItemPointerData;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/lmgr.h,v 1.51 2005/08/01 20:31:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/lmgr.h,v 1.52 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define RowExclusiveLock 3 /* INSERT, UPDATE, DELETE */
#define ShareUpdateExclusiveLock 4 /* VACUUM (non-FULL) */
#define ShareLock 5 /* CREATE INDEX */
-#define ShareRowExclusiveLock 6 /* like EXCLUSIVE MODE, but allows
- * ROW SHARE */
+#define ShareRowExclusiveLock 6 /* like EXCLUSIVE MODE, but allows ROW
+ * SHARE */
#define ExclusiveLock 7 /* blocks ROW SHARE/SELECT...FOR
* UPDATE */
#define AccessExclusiveLock 8 /* ALTER TABLE, DROP TABLE, VACUUM
- * FULL, and unqualified LOCK
- * TABLE */
+ * FULL, and unqualified LOCK TABLE */
/*
* Note: all lock mode numbers must be less than lock.h's MAX_LOCKMODES,
extern void UnlockRelation(Relation relation, LOCKMODE lockmode);
extern void LockRelationForSession(LockRelId *relid, bool istemprel,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
extern void UnlockRelationForSession(LockRelId *relid, LOCKMODE lockmode);
/* Lock a relation for extension */
/* Lock a tuple (see heap_lock_tuple before assuming you understand this) */
extern void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode);
extern bool ConditionalLockTuple(Relation relation, ItemPointer tid,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
extern void UnlockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode);
/* Lock an XID (used to wait for a transaction to finish) */
/* Lock a general object (other than a relation) of the current database */
extern void LockDatabaseObject(Oid classid, Oid objid, uint16 objsubid,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
extern void UnlockDatabaseObject(Oid classid, Oid objid, uint16 objsubid,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
/* Lock a shared-across-databases object (other than a relation) */
extern void LockSharedObject(Oid classid, Oid objid, uint16 objsubid,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
extern void UnlockSharedObject(Oid classid, Oid objid, uint16 objsubid,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
#endif /* LMGR_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.90 2005/08/20 23:26:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.91 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* ID info for a transaction is its TransactionId */
LOCKTAG_OBJECT, /* non-relation database object */
/* ID info for an object is DB OID + CLASS OID + OBJECT OID + SUBID */
+
/*
* Note: object ID has same representation as in pg_depend and
* pg_description, but notice that we are constraining SUBID to 16 bits.
* to widen Oid, BlockNumber, or TransactionId to more than 32 bits.
*
* We include lockmethodid in the locktag so that a single hash table in
- * shared memory can store locks of different lockmethods. For largely
+ * shared memory can store locks of different lockmethods. For largely
* historical reasons, it's passed to the lock.c routines as a separate
* argument and then stored into the locktag.
*/
typedef struct LOCKTAG
{
- uint32 locktag_field1; /* a 32-bit ID field */
- uint32 locktag_field2; /* a 32-bit ID field */
- uint32 locktag_field3; /* a 32-bit ID field */
- uint16 locktag_field4; /* a 16-bit ID field */
- uint8 locktag_type; /* see enum LockTagType */
+ uint32 locktag_field1; /* a 32-bit ID field */
+ uint32 locktag_field2; /* a 32-bit ID field */
+ uint32 locktag_field3; /* a 32-bit ID field */
+ uint16 locktag_field4; /* a 16-bit ID field */
+ uint8 locktag_type; /* see enum LockTagType */
uint8 locktag_lockmethodid; /* lockmethod indicator */
} LOCKTAG;
/*
* These macros define how we map logical IDs of lockable objects into
- * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
+ * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
* rather than accessing the fields directly. Note multiple eval of target!
*/
#define SET_LOCKTAG_RELATION(locktag,dboid,reloid) \
/* data */
LOCKMASK grantMask; /* bitmask for lock types already granted */
LOCKMASK waitMask; /* bitmask for lock types awaited */
- SHM_QUEUE procLocks; /* list of PROCLOCK objects assoc. with
- * lock */
+ SHM_QUEUE procLocks; /* list of PROCLOCK objects assoc. with lock */
PROC_QUEUE waitProcs; /* list of PGPROC objects waiting on lock */
- int requested[MAX_LOCKMODES]; /* counts of requested
- * locks */
+ int requested[MAX_LOCKMODES]; /* counts of requested locks */
int nRequested; /* total of requested[] array */
int granted[MAX_LOCKMODES]; /* counts of granted locks */
int nGranted; /* total of granted[] array */
*
* Internally to a backend, it is possible for the same lock to be held
* for different purposes: the backend tracks transaction locks separately
- * from session locks. However, this is not reflected in the shared-memory
+ * from session locks. However, this is not reflected in the shared-memory
* state: we only track which backend(s) hold the lock. This is OK since a
* backend can never block itself.
*
* as soon as convenient.
*
* releaseMask is workspace for LockReleaseAll(): it shows the locks due
- * to be released during the current call. This must only be examined or
+ * to be released during the current call. This must only be examined or
* set by the backend owning the PROCLOCK.
*
* Each PROCLOCK object is linked into lists for both the associated LOCK
int numModes);
extern LOCKMETHODID LockMethodTableRename(LOCKMETHODID lockmethodid);
extern LockAcquireResult LockAcquire(LOCKMETHODID lockmethodid,
- LOCKTAG *locktag,
- bool isTempObject,
- LOCKMODE lockmode,
- bool sessionLock,
- bool dontWait);
+ LOCKTAG *locktag,
+ bool isTempObject,
+ LOCKMODE lockmode,
+ bool sessionLock,
+ bool dontWait);
extern bool LockRelease(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
- LOCKMODE lockmode, bool sessionLock);
+ LOCKMODE lockmode, bool sessionLock);
extern void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks);
extern void LockReleaseCurrentOwner(void);
extern void LockReassignCurrentOwner(void);
extern const char *GetLockmodeName(LOCKMODE mode);
extern void lock_twophase_recover(TransactionId xid, uint16 info,
- void *recdata, uint32 len);
+ void *recdata, uint32 len);
extern void lock_twophase_postcommit(TransactionId xid, uint16 info,
- void *recdata, uint32 len);
+ void *recdata, uint32 len);
extern void lock_twophase_postabort(TransactionId xid, uint16 info,
- void *recdata, uint32 len);
+ void *recdata, uint32 len);
#ifdef LOCK_DEBUG
extern void DumpLocks(PGPROC *proc);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/lwlock.h,v 1.22 2005/08/20 23:26:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/lwlock.h,v 1.23 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
BgWriterCommLock,
TwoPhaseStateLock,
- NumFixedLWLocks, /* must be last except for
- * MaxDynamicLWLock */
+ NumFixedLWLocks, /* must be last except for MaxDynamicLWLock */
MaxDynamicLWLock = 1000000000
} LWLockId;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/pg_shmem.h,v 1.15 2005/08/20 23:26:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/pg_shmem.h,v 1.16 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#endif
extern PGShmemHeader *PGSharedMemoryCreate(Size size, bool makePrivate,
- int port);
+ int port);
extern bool PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2);
extern void PGSharedMemoryDetach(void);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.83 2005/10/11 20:41:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.84 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
TransactionId xid; /* transaction currently being executed by
* this proc */
- TransactionId xmin; /* minimal running XID as it was when we
- * were starting our xact: vacuum must not
- * remove tuples deleted by xid >= xmin ! */
+ TransactionId xmin; /* minimal running XID as it was when we were
+ * starting our xact: vacuum must not remove
+ * tuples deleted by xid >= xmin ! */
int pid; /* This backend's process id, or 0 */
Oid databaseId; /* OID of database this backend is using */
LOCK *waitLock; /* Lock object we're sleeping on ... */
PROCLOCK *waitProcLock; /* Per-holder info for awaited lock */
LOCKMODE waitLockMode; /* type of lock we're waiting for */
- LOCKMASK heldLocks; /* bitmask for lock types already held on
- * this lock object by this backend */
+ LOCKMASK heldLocks; /* bitmask for lock types already held on this
+ * lock object by this backend */
- SHM_QUEUE procLocks; /* list of PROCLOCK objects for locks held
- * or awaited by this backend */
+ SHM_QUEUE procLocks; /* list of PROCLOCK objects for locks held or
+ * awaited by this backend */
struct XidCache subxids; /* cache for subtransaction XIDs */
};
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/procarray.h,v 1.5 2005/08/20 23:26:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/procarray.h,v 1.6 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern TransactionId GetOldestXmin(bool allDbs);
extern PGPROC *BackendPidGetProc(int pid);
-extern int BackendXidGetPid(TransactionId xid);
+extern int BackendXidGetPid(TransactionId xid);
extern bool IsBackendPid(int pid);
extern bool DatabaseHasActiveBackends(Oid databaseId, bool ignoreMyself);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/sinval.h,v 1.42 2005/08/20 23:26:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/sinval.h,v 1.43 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* invalidates an entry in a catcache, one that invalidates a relcache entry,
* and one that invalidates an smgr cache entry. More types could be added
* if needed. The message type is identified by the first "int16" field of
- * the message struct. Zero or positive means a catcache inval message (and
+ * the message struct. Zero or positive means a catcache inval message (and
* also serves as the catcache ID field). -1 means a relcache inval message.
- * -2 means an smgr inval message. Other negative values are available to
+ * -2 means an smgr inval message. Other negative values are available to
* identify other inval message types.
*
* Catcache inval events are initially driven by detecting tuple inserts,
extern void SendSharedInvalidMessage(SharedInvalidationMessage *msg);
extern void ReceiveSharedInvalidMessages(
- void (*invalFunction) (SharedInvalidationMessage *msg),
+ void (*invalFunction) (SharedInvalidationMessage *msg),
void (*resetFunction) (void));
/* signal handler for catchup events (SIGUSR1) */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/sinvaladt.h,v 1.39 2005/08/20 23:26:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/sinvaladt.h,v 1.40 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
int minMsgNum; /* oldest message still needed */
int maxMsgNum; /* next message number to be assigned */
- int lastBackend; /* index of last active procState entry,
- * +1 */
+ int lastBackend; /* index of last active procState entry, +1 */
int maxBackends; /* size of procState array */
int freeBackends; /* number of empty procState slots */
/*
* Per-backend state info.
*
- * We declare procState as 1 entry because C wants a fixed-size array,
- * but actually it is maxBackends entries long.
+ * We declare procState as 1 entry because C wants a fixed-size array, but
+ * actually it is maxBackends entries long.
*/
ProcState procState[1]; /* reflects the invalidation state */
} SISeg;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/smgr.h,v 1.52 2005/06/17 22:32:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/smgr.h,v 1.53 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* An SMgrRelation may have an "owner", which is just a pointer to it from
* somewhere else; smgr.c will clear this pointer if the SMgrRelation is
- * closed. We use this to avoid dangling pointers from relcache to smgr
+ * closed. We use this to avoid dangling pointers from relcache to smgr
* without having to make the smgr explicitly aware of relcache. There
* can't be more than one "owner" pointer per SMgrRelation, but that's
* all we need.
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tcop/dest.h,v 1.46 2005/03/16 21:38:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/dest.h,v 1.47 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/* Called for each tuple to be output: */
void (*receiveSlot) (TupleTableSlot *slot,
- DestReceiver *self);
+ DestReceiver *self);
/* Per-executor-run initialization and shutdown: */
void (*rStartup) (DestReceiver *self,
int operation,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tcop/pquery.h,v 1.35 2005/06/22 17:45:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/pquery.h,v 1.36 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern List *FetchPortalTargetList(Portal portal);
extern void PortalStart(Portal portal, ParamListInfo params,
- Snapshot snapshot);
+ Snapshot snapshot);
extern void PortalSetResultFormat(Portal portal, int nFormats,
int16 *formats);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.77 2005/08/11 21:11:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.78 2005/10/15 02:49:46 momjian Exp $
*
* OLD COMMENTS
* This file was created so that other c files could get the two
extern void ResetUsage(void);
extern void ShowUsage(const char *title);
extern void set_debug_options(int debug_flag,
- GucContext context, GucSource source);
+ GucContext context, GucSource source);
#endif /* TCOPPROT_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/acl.h,v 1.84 2005/10/10 18:49:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/acl.h,v 1.85 2005/10/15 02:49:46 momjian Exp $
*
* NOTES
* An ACL array is simply an array of AclItems, representing the union
/*
* Definitions for convenient access to Acl (array of AclItem) and IdList
- * (array of Oid). These are standard PostgreSQL arrays, but are restricted
+ * (array of Oid). These are standard PostgreSQL arrays, but are restricted
* to have one dimension. We also ignore the lower bound when reading,
* and set it to one when writing.
*
extern AclMode aclmask(const Acl *acl, Oid roleid, Oid ownerId,
AclMode mask, AclMaskHow how);
-extern int aclmembers(const Acl *acl, Oid **roleids);
+extern int aclmembers(const Acl *acl, Oid **roleids);
extern bool has_privs_of_role(Oid member, Oid role);
extern bool is_member_of_role(Oid member, Oid role);
extern void check_is_member_of_role(Oid member, Oid role);
extern void select_best_grantor(Oid roleId, AclMode privileges,
- const Acl *acl, Oid ownerId,
- Oid *grantorId, AclMode *grantOptions);
+ const Acl *acl, Oid ownerId,
+ Oid *grantorId, AclMode *grantOptions);
extern void initialize_acl(void);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/array.h,v 1.54 2005/03/29 00:17:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/array.h,v 1.55 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool *isNull);
extern Datum array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType,
- ArrayMapState *amstate);
+ ArrayMapState *amstate);
extern ArrayType *construct_array(Datum *elems, int nelems,
Oid elmtype,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.265 2005/10/02 23:50:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.266 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Datum numeric_out(PG_FUNCTION_ARGS);
extern Datum numeric_recv(PG_FUNCTION_ARGS);
extern Datum numeric_send(PG_FUNCTION_ARGS);
-extern Datum numeric (PG_FUNCTION_ARGS);
+extern Datum numeric(PG_FUNCTION_ARGS);
extern Datum numeric_abs(PG_FUNCTION_ARGS);
extern Datum numeric_uminus(PG_FUNCTION_ARGS);
extern Datum numeric_uplus(PG_FUNCTION_ARGS);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.55 2005/08/13 22:18:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.56 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
long cc_newloads; /* # of successful loads of new entry */
/*
- * cc_searches - (cc_hits + cc_neg_hits + cc_newloads) is number of
- * failed searches, each of which will result in loading a negative
- * entry
+ * cc_searches - (cc_hits + cc_neg_hits + cc_newloads) is number of failed
+ * searches, each of which will result in loading a negative entry
*/
long cc_invals; /* # of entries invalidated from cache */
long cc_discards; /* # of entries discarded due to overflow */
/*
* Each tuple in a cache is a member of two Dllists: one lists all the
- * elements in all the caches in LRU order, and the other lists just
- * the elements in one hashbucket of one cache, also in LRU order.
+ * elements in all the caches in LRU order, and the other lists just the
+ * elements in one hashbucket of one cache, also in LRU order.
*/
Dlelem lrulist_elem; /* list member of global LRU list */
Dlelem cache_elem; /* list member of per-bucket list */
/*
* The tuple may also be a member of at most one CatCList. (If a single
- * catcache is list-searched with varying numbers of keys, we may have
- * to make multiple entries for the same tuple because of this
- * restriction. Currently, that's not expected to be common, so we
- * accept the potential inefficiency.)
+ * catcache is list-searched with varying numbers of keys, we may have to
+ * make multiple entries for the same tuple because of this restriction.
+ * Currently, that's not expected to be common, so we accept the potential
+ * inefficiency.)
*/
struct catclist *c_list; /* containing CatCList, or NULL if none */
* A tuple marked "dead" must not be returned by subsequent searches.
* However, it won't be physically deleted from the cache until its
* refcount goes to zero. (If it's a member of a CatCList, the list's
- * refcount must go to zero, too; also, remember to mark the list dead
- * at the same time the tuple is marked.)
+ * refcount must go to zero, too; also, remember to mark the list dead at
+ * the same time the tuple is marked.)
*
- * A negative cache entry is an assertion that there is no tuple matching
- * a particular key. This is just as useful as a normal entry so far
- * as avoiding catalog searches is concerned. Management of positive
- * and negative entries is identical.
+ * A negative cache entry is an assertion that there is no tuple matching a
+ * particular key. This is just as useful as a normal entry so far as
+ * avoiding catalog searches is concerned. Management of positive and
+ * negative entries is identical.
*/
int refcount; /* number of active references */
bool dead; /* dead but not yet removed? */
CatCache *my_cache; /* link to owning catcache */
/*
- * A CatCList describes the result of a partial search, ie, a search
- * using only the first K key columns of an N-key cache. We form the
- * keys used into a tuple (with other attributes NULL) to represent
- * the stored key set. The CatCList object contains links to cache
- * entries for all the table rows satisfying the partial key. (Note:
- * none of these will be negative cache entries.)
+ * A CatCList describes the result of a partial search, ie, a search using
+ * only the first K key columns of an N-key cache. We form the keys used
+ * into a tuple (with other attributes NULL) to represent the stored key
+ * set. The CatCList object contains links to cache entries for all the
+ * table rows satisfying the partial key. (Note: none of these will be
+ * negative cache entries.)
*
- * A CatCList is only a member of a per-cache list; we do not do separate
- * LRU management for CatCLists. See CatalogCacheCleanup() for the
- * details of the management algorithm.
+ * A CatCList is only a member of a per-cache list; we do not do separate LRU
+ * management for CatCLists. See CatalogCacheCleanup() for the details of
+ * the management algorithm.
*
- * A list marked "dead" must not be returned by subsequent searches.
- * However, it won't be physically deleted from the cache until its
- * refcount goes to zero. (A list should be marked dead if any of its
- * member entries are dead.)
+ * A list marked "dead" must not be returned by subsequent searches. However,
+ * it won't be physically deleted from the cache until its refcount goes
+ * to zero. (A list should be marked dead if any of its member entries
+ * are dead.)
*
* If "ordered" is true then the member tuples appear in the order of the
- * cache's underlying index. This will be true in normal operation,
- * but might not be true during bootstrap or recovery operations.
- * (namespace.c is able to save some cycles when it is true.)
+ * cache's underlying index. This will be true in normal operation, but
+ * might not be true during bootstrap or recovery operations. (namespace.c
+ * is able to save some cycles when it is true.)
*/
Dlelem cache_elem; /* list member of per-catcache list */
int refcount; /* number of active references */
ItemPointer pointer);
extern void PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
- void (*function) (int, uint32, ItemPointer, Oid));
+ void (*function) (int, uint32, ItemPointer, Oid));
extern void PrintCatCacheLeakWarning(HeapTuple tuple);
extern void PrintCatCacheListLeakWarning(CatCList *list);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/date.h,v 1.31 2005/10/09 17:21:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/date.h,v 1.32 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifdef HAVE_INT64_TIMESTAMP
typedef int64 TimeADT;
-
#else
typedef float8 TimeADT;
#endif
typedef struct
{
#ifdef HAVE_INT64_TIMESTAMP
- int64 time; /* all time units other than months and
- * years */
+ int64 time; /* all time units other than months and years */
#else
- double time; /* all time units other than months and
- * years */
+ double time; /* all time units other than months and years */
#endif
int32 zone; /* numeric time zone, in seconds */
} TimeTzADT;
#define DateADTGetDatum(X) Int32GetDatum(X)
#define TimeADTGetDatum(X) Int64GetDatum(X)
#define TimeTzADTPGetDatum(X) PointerGetDatum(X)
-
#else
#define MAX_TIME_PRECISION 10
#define DateADTGetDatum(X) Int32GetDatum(X)
#define TimeADTGetDatum(X) Float8GetDatum(X)
#define TimeTzADTPGetDatum(X) PointerGetDatum(X)
-
-#endif /* HAVE_INT64_TIMESTAMP */
+#endif /* HAVE_INT64_TIMESTAMP */
#define PG_GETARG_DATEADT(n) DatumGetDateADT(PG_GETARG_DATUM(n))
#define PG_GETARG_TIMEADT(n) DatumGetTimeADT(PG_GETARG_DATUM(n))
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/datetime.h,v 1.56 2005/07/23 14:25:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/datetime.h,v 1.57 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define DTK_DATE_M (DTK_M(YEAR) | DTK_M(MONTH) | DTK_M(DAY))
#define DTK_TIME_M (DTK_M(HOUR) | DTK_M(MINUTE) | DTK_M(SECOND))
-#define MAXDATELEN 51 /* maximum possible length of an input
- * date string (not counting tr. null) */
-#define MAXDATEFIELDS 25 /* maximum possible number of fields in a
- * date string */
+#define MAXDATELEN 51 /* maximum possible length of an input date
+ * string (not counting tr. null) */
+#define MAXDATEFIELDS 25 /* maximum possible number of fields in a date
+ * string */
#define TOKMAXLEN 10 /* only this many chars are stored in
* datetktbl */
#define DTERR_TZDISP_OVERFLOW (-5)
-extern void GetCurrentDateTime(struct pg_tm *tm);
-extern void GetCurrentTimeUsec(struct pg_tm *tm, fsec_t *fsec, int *tzp);
+extern void GetCurrentDateTime(struct pg_tm * tm);
+extern void GetCurrentTimeUsec(struct pg_tm * tm, fsec_t *fsec, int *tzp);
extern void j2date(int jd, int *year, int *month, int *day);
extern int date2j(int year, int month, int day);
int maxfields, int *numfields);
extern int DecodeDateTime(char **field, int *ftype,
int nf, int *dtype,
- struct pg_tm *tm, fsec_t *fsec, int *tzp);
+ struct pg_tm * tm, fsec_t *fsec, int *tzp);
extern int DecodeTimeOnly(char **field, int *ftype,
int nf, int *dtype,
- struct pg_tm *tm, fsec_t *fsec, int *tzp);
+ struct pg_tm * tm, fsec_t *fsec, int *tzp);
extern int DecodeInterval(char **field, int *ftype,
int nf, int *dtype,
- struct pg_tm *tm, fsec_t *fsec);
+ struct pg_tm * tm, fsec_t *fsec);
extern void DateTimeParseError(int dterr, const char *str,
const char *datatype);
-extern int DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp);
+extern int DetermineTimeZoneOffset(struct pg_tm * tm, pg_tz *tzp);
-extern int EncodeDateOnly(struct pg_tm *tm, int style, char *str);
-extern int EncodeTimeOnly(struct pg_tm *tm, fsec_t fsec, int *tzp, int style, char *str);
-extern int EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str);
-extern int EncodeInterval(struct pg_tm *tm, fsec_t fsec, int style, char *str);
+extern int EncodeDateOnly(struct pg_tm * tm, int style, char *str);
+extern int EncodeTimeOnly(struct pg_tm * tm, fsec_t fsec, int *tzp, int style, char *str);
+extern int EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str);
+extern int EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str);
extern int DecodeSpecial(int field, char *lowtoken, int *val);
extern int DecodeUnits(int field, char *lowtoken, int *val);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/elog.h,v 1.80 2005/10/14 20:53:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/elog.h,v 1.81 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define DEBUG3 12
#define DEBUG2 13
#define DEBUG1 14 /* used by GUC debug_* variables */
-#define LOG 15 /* Server operational messages; sent only
- * to server log by default. */
-#define COMMERROR 16 /* Client communication problems; same as
- * LOG for server reporting, but never
- * sent to client. */
-#define INFO 17 /* Informative messages that are always
- * sent to client; is not affected by
+#define LOG 15 /* Server operational messages; sent only to
+ * server log by default. */
+#define COMMERROR 16 /* Client communication problems; same as LOG
+ * for server reporting, but never sent to
+ * client. */
+#define INFO 17 /* Informative messages that are always sent
+ * to client; is not affected by
* client_min_messages */
#define NOTICE 18 /* Helpful messages to users about query
- * operation; sent to client and server
- * log by default. */
-#define WARNING 19 /* Warnings. NOTICE is for expected
- * messages like implicit sequence
- * creation by SERIAL. WARNING is for
- * unexpected messages. */
-#define ERROR 20 /* user error - abort transaction; return
- * to known state */
+ * operation; sent to client and server log
+ * by default. */
+#define WARNING 19 /* Warnings. NOTICE is for expected messages
+ * like implicit sequence creation by SERIAL.
+ * WARNING is for unexpected messages. */
+#define ERROR 20 /* user error - abort transaction; return to
+ * known state */
/* Save ERROR value in PGERROR so it can be restored when Win32 includes
* modify it. We have to use a constant rather than ERROR because macros
* are expanded only when referenced outside macros.
/* Other exported functions */
extern void DebugFileOpen(void);
extern char *unpack_sql_state(int sql_state);
+
#ifdef HAVE_SYSLOG
extern void set_syslog_parameters(const char *ident, int facility);
#endif
* Routines for maintaining "flat file" images of the shared catalogs.
*
*
- * $PostgreSQL: pgsql/src/include/utils/flatfiles.h,v 1.5 2005/06/28 05:09:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/flatfiles.h,v 1.6 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void AtPrepare_UpdateFlatFiles(void);
extern void AtEOXact_UpdateFlatFiles(bool isCommit);
extern void AtEOSubXact_UpdateFlatFiles(bool isCommit,
- SubTransactionId mySubid,
- SubTransactionId parentSubid);
+ SubTransactionId mySubid,
+ SubTransactionId parentSubid);
extern Datum flatfile_update_trigger(PG_FUNCTION_ARGS);
extern void flatfile_twophase_postcommit(TransactionId xid, uint16 info,
- void *recdata, uint32 len);
+ void *recdata, uint32 len);
#endif /* FLATFILES_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/fmgrtab.h,v 1.24 2004/12/31 22:03:46 pgsql Exp $
+ * $PostgreSQL: pgsql/src/include/utils/fmgrtab.h,v 1.25 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
Oid foid; /* OID of the function */
const char *funcName; /* C name of the function */
- short nargs; /* 0..FUNC_MAX_ARGS, or -1 if variable
- * count */
+ short nargs; /* 0..FUNC_MAX_ARGS, or -1 if variable count */
bool strict; /* T if function is "strict" */
bool retset; /* T if function returns a set */
PGFunction func; /* pointer to compiled function */
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
* Written by Peter Eisentraut .
*
- * $PostgreSQL: pgsql/src/include/utils/guc.h,v 1.62 2005/07/30 15:17:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/guc.h,v 1.63 2005/10/15 02:49:46 momjian Exp $
*--------------------------------------------------------------------
*/
#ifndef GUC_H
extern char *IdentFileName;
extern char *external_pid_file;
-extern int tcp_keepalives_idle;
-extern int tcp_keepalives_interval;
-extern int tcp_keepalives_count;
+extern int tcp_keepalives_idle;
+extern int tcp_keepalives_interval;
+extern int tcp_keepalives_count;
extern void SetConfigOption(const char *name, const char *value,
GucContext context, GucSource source);
/* in commands/tablespace.c */
extern const char *assign_default_tablespace(const char *newval,
- bool doit, GucSource source);
+ bool doit, GucSource source);
/* in utils/adt/regexp.c */
extern const char *assign_regex_flavor(const char *value,
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/hsearch.h,v 1.40 2005/08/20 23:26:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/hsearch.h,v 1.41 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* as key comparison functions.)
*/
typedef int (*HashCompareFunc) (const void *key1, const void *key2,
- Size keysize);
+ Size keysize);
/*
- * Key copying functions must have this signature. The return value is not
+ * Key copying functions must have this signature. The return value is not
* used. (The definition is set up to allow memcpy() and strncpy() to be
* used directly.)
*/
long nsegs; /* Number of allocated segments */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
- long max_dsize; /* 'dsize' limit if directory is fixed
- * size */
+ long max_dsize; /* 'dsize' limit if directory is fixed size */
int nelem_alloc; /* number of entries to allocate at once */
HASHELEMENT *freeList; /* linked list of free elements */
#ifdef HASH_STATISTICS
HashCompareFunc match; /* key comparison function */
HashCopyFunc keycopy; /* key copying function */
HashAllocFunc alloc; /* memory allocator */
- MemoryContext hcxt; /* memory context if default allocator
- * used */
+ MemoryContext hcxt; /* memory context if default allocator used */
char *tabname; /* table name (for error messages) */
bool isshared; /* true if table is in shared memory */
} HTAB;
{
long ssize; /* Segment Size */
long dsize; /* (initial) Directory Size */
- long max_dsize; /* limit to dsize if directory size is
- * limited */
+ long max_dsize; /* limit to dsize if directory size is limited */
long ffactor; /* Fill factor */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/inval.h,v 1.36 2005/06/17 22:32:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/inval.h,v 1.37 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Datum arg);
extern void inval_twophase_postcommit(TransactionId xid, uint16 info,
- void *recdata, uint32 len);
+ void *recdata, uint32 len);
#endif /* INVAL_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/lsyscache.h,v 1.100 2005/06/28 05:09:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/lsyscache.h,v 1.101 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Datum *values, int nvalues,
float4 *numbers, int nnumbers);
extern char *get_namespace_name(Oid nspid);
-extern Oid get_roleid(const char *rolname);
-extern Oid get_roleid_checked(const char *rolname);
+extern Oid get_roleid(const char *rolname);
+extern Oid get_roleid_checked(const char *rolname);
#define is_array_type(typid) (get_element_type(typid) != InvalidOid)
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/nabstime.h,v 1.47 2005/07/22 03:46:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/nabstime.h,v 1.48 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* non-fmgr-callable support routines */
extern AbsoluteTime GetCurrentAbsoluteTime(void);
-extern void abstime2tm(AbsoluteTime time, int *tzp, struct pg_tm *tm, char **tzn);
+extern void abstime2tm(AbsoluteTime time, int *tzp, struct pg_tm * tm, char **tzn);
#endif /* NABSTIME_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/palloc.h,v 1.33 2005/02/18 21:52:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/palloc.h,v 1.34 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
MemoryContextSwitchTo(MemoryContext context)
{
MemoryContext old = CurrentMemoryContext;
+
CurrentMemoryContext = context;
return old;
}
-
#else
extern MemoryContext MemoryContextSwitchTo(MemoryContext context);
-
#endif /* __GNUC__ */
/*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/pg_crc.h,v 1.13 2005/06/02 05:55:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/pg_crc.h,v 1.14 2005/10/15 02:49:46 momjian Exp $
*/
#ifndef PG_CRC_H
#define PG_CRC_H
{
uint32 crc0;
uint32 crc1;
-} pg_crc64;
+} pg_crc64;
/* Initialize a CRC accumulator */
#define INIT_CRC64(crc) ((crc).crc0 = 0xffffffff, (crc).crc1 = 0xffffffff)
/* Constant table for CRC calculation */
extern const uint32 pg_crc64_table0[];
extern const uint32 pg_crc64_table1[];
-
#else /* int64 works */
typedef struct pg_crc64
{
uint64 crc0;
-} pg_crc64;
+} pg_crc64;
/* Initialize a CRC accumulator */
#define INIT_CRC64(crc) ((crc).crc0 = UINT64CONST(0xffffffffffffffff))
/* Constant table for CRC calculation */
extern const uint64 pg_crc64_table[];
#endif /* INT64_IS_BUSTED */
-
-#endif /* PROVIDE_64BIT_CRC */
+#endif /* PROVIDE_64BIT_CRC */
#endif /* PG_CRC_H */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/portal.h,v 1.56 2005/06/17 22:32:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/portal.h,v 1.57 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
MemoryContext heap; /* subsidiary memory for portal */
ResourceOwner resowner; /* resources owned by portal */
void (*cleanup) (Portal portal); /* cleanup hook */
- SubTransactionId createSubid; /* the ID of the creating subxact */
+ SubTransactionId createSubid; /* the ID of the creating subxact */
+
/*
- * if createSubid is InvalidSubTransactionId, the portal is held over
- * from a previous transaction
+ * if createSubid is InvalidSubTransactionId, the portal is held over from
+ * a previous transaction
*/
/* The query or queries the portal will execute */
MemoryContext queryContext; /* where the above trees live */
/*
- * Note: queryContext effectively identifies which prepared statement
- * the portal depends on, if any. The queryContext is *not* owned by
- * the portal and is not to be deleted by portal destruction. (But
- * for a cursor it is the same as "heap", and that context is deleted
- * by portal destruction.)
+ * Note: queryContext effectively identifies which prepared statement the
+ * portal depends on, if any. The queryContext is *not* owned by the
+ * portal and is not to be deleted by portal destruction. (But for a
+ * cursor it is the same as "heap", and that context is deleted by portal
+ * destruction.)
*/
ParamListInfo portalParams; /* params to pass to query */
int16 *formats; /* a format code for each column */
/*
- * Where we store tuples for a held cursor or a PORTAL_UTIL_SELECT
- * query. (A cursor held past the end of its transaction no longer has
- * any active executor state.)
+ * Where we store tuples for a held cursor or a PORTAL_UTIL_SELECT query.
+ * (A cursor held past the end of its transaction no longer has any active
+ * executor state.)
*/
Tuplestorestate *holdStore; /* store for holdable cursors */
MemoryContext holdContext; /* memory containing holdStore */
/*
* atStart, atEnd and portalPos indicate the current cursor position.
- * portalPos is zero before the first row, N after fetching N'th row
- * of query. After we run off the end, portalPos = # of rows in
- * query, and atEnd is true. If portalPos overflows, set posOverflow
- * (this causes us to stop relying on its value for navigation). Note
- * that atStart implies portalPos == 0, but not the reverse (portalPos
- * could have overflowed).
+ * portalPos is zero before the first row, N after fetching N'th row of
+ * query. After we run off the end, portalPos = # of rows in query, and
+ * atEnd is true. If portalPos overflows, set posOverflow (this causes us
+ * to stop relying on its value for navigation). Note that atStart
+ * implies portalPos == 0, but not the reverse (portalPos could have
+ * overflowed).
*/
bool atStart;
bool atEnd;
extern void AtAbort_Portals(void);
extern void AtCleanup_Portals(void);
extern void AtSubCommit_Portals(SubTransactionId mySubid,
- SubTransactionId parentSubid,
- ResourceOwner parentXactOwner);
+ SubTransactionId parentSubid,
+ ResourceOwner parentXactOwner);
extern void AtSubAbort_Portals(SubTransactionId mySubid,
- SubTransactionId parentSubid,
- ResourceOwner parentXactOwner);
+ SubTransactionId parentSubid,
+ ResourceOwner parentXactOwner);
extern void AtSubCleanup_Portals(SubTransactionId mySubid);
extern Portal CreatePortal(const char *name, bool allowDup, bool dupSilent);
extern Portal CreateNewPortal(void);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.86 2005/10/06 02:29:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.87 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct TriggerDesc
{
/*
- * Index data to identify which triggers are which. Since each
- * trigger can appear in more than one class, for each class we
- * provide a list of integer indexes into the triggers array.
+ * Index data to identify which triggers are which. Since each trigger
+ * can appear in more than one class, for each class we provide a list of
+ * integer indexes into the triggers array.
*/
#define TRIGGER_NUM_EVENT_CLASSES 3
bool rd_istemp; /* rel uses the local buffer mgr */
bool rd_isnailed; /* rel is nailed in cache */
bool rd_isvalid; /* relcache entry is valid */
- char rd_indexvalid; /* state of rd_indexlist: 0 = not valid,
- * 1 = valid, 2 = temporarily forced */
+ char rd_indexvalid; /* state of rd_indexlist: 0 = not valid, 1 =
+ * valid, 2 = temporarily forced */
SubTransactionId rd_createSubid; /* rel was created in current xact */
/*
* rd_createSubid is the ID of the highest subtransaction the rel has
- * survived into; or zero if the rel was not created in the current
- * top transaction. This should be relied on only for optimization
- * purposes; it is possible for new-ness to be "forgotten" (eg, after
- * CLUSTER).
+ * survived into; or zero if the rel was not created in the current top
+ * transaction. This should be relied on only for optimization purposes;
+ * it is possible for new-ness to be "forgotten" (eg, after CLUSTER).
*/
Form_pg_class rd_rel; /* RELATION tuple */
TupleDesc rd_att; /* tuple descriptor */
*
* Note: only default operators and support procs for each opclass are
* cached, namely those with subtype zero. The arrays are indexed by
- * strategy or support number, which is a sufficient identifier given
- * that restriction.
+ * strategy or support number, which is a sufficient identifier given that
+ * restriction.
*/
MemoryContext rd_indexcxt; /* private memory cxt for this stuff */
RelationAmInfo *rd_aminfo; /* lookup info for funcs found in pg_am */
Oid *rd_operator; /* OIDs of index operators */
RegProcedure *rd_support; /* OIDs of support procedures */
- FmgrInfo *rd_supportinfo; /* lookup info for support procedures */
+ FmgrInfo *rd_supportinfo; /* lookup info for support procedures */
List *rd_indexprs; /* index expression trees, if any */
List *rd_indpred; /* index predicate tree, if any */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/relcache.h,v 1.51 2005/08/12 01:36:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/relcache.h,v 1.52 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern List *RelationGetIndexPredicate(Relation relation);
extern void RelationSetIndexList(Relation relation,
- List *indexIds, Oid oidIndex);
+ List *indexIds, Oid oidIndex);
extern void RelationInitIndexAccessInfo(Relation relation);
extern void AtEOXact_RelationCache(bool isCommit);
extern void AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
- SubTransactionId parentSubid);
+ SubTransactionId parentSubid);
/*
* Routines to help manage rebuilding of relcache init file
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/selfuncs.h,v 1.23 2005/06/05 22:32:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/selfuncs.h,v 1.24 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define DEFAULT_INEQ_SEL 0.3333333333333333
/* default selectivity estimate for range inequalities "A > b AND A < c" */
-#define DEFAULT_RANGE_INEQ_SEL 0.005
+#define DEFAULT_RANGE_INEQ_SEL 0.005
/* default selectivity estimate for pattern-match operators such as LIKE */
#define DEFAULT_MATCH_SEL 0.005
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/syscache.h,v 1.60 2005/06/28 05:09:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/syscache.h,v 1.61 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define NAMESPACEOID 23
#define OPERNAMENSP 24
#define OPEROID 25
-#define PROCNAMEARGSNSP 26
+#define PROCNAMEARGSNSP 26
#define PROCOID 27
#define RELNAMENSP 28
#define RELOID 29
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/timestamp.h,v 1.56 2005/10/09 17:21:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/timestamp.h,v 1.57 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifdef HAVE_INT64_TIMESTAMP
typedef int64 Timestamp;
typedef int64 TimestampTz;
-
#else
typedef double Timestamp;
typedef double TimestampTz;
typedef struct
{
#ifdef HAVE_INT64_TIMESTAMP
- int64 time; /* all time units other than days,
- * months and years */
+ int64 time; /* all time units other than days, months and
+ * years */
#else
- double time; /* all time units other than days,
- * months and years */
+ double time; /* all time units other than days, months and
+ * years */
#endif
- int32 day; /* days, after time for alignment */
- int32 month; /* months and years, after time for
- * alignment */
+ int32 day; /* days, after time for alignment */
+ int32 month; /* months and years, after time for alignment */
} Interval;
/* in both timestamp.h and ecpg/dt.h */
#define DAYS_PER_YEAR 365.25 /* assumes leap year every four years */
-#define MONTHS_PER_YEAR 12
+#define MONTHS_PER_YEAR 12
/*
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
- * also return a 'time' value to be used as well. ISO 8601 suggests
+ * also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
*/
#define SECS_PER_YEAR (36525 * 864) /* avoid floating-point computation */
#define SECS_PER_DAY 86400
-#define SECS_PER_HOUR 3600
+#define SECS_PER_HOUR 3600
#define SECS_PER_MINUTE 60
#define MINS_PER_HOUR 60
#define DT_NOBEGIN (-INT64CONST(0x7fffffffffffffff) - 1)
#define DT_NOEND (INT64CONST(0x7fffffffffffffff))
-
#else
#define DatumGetTimestamp(X) ((Timestamp) DatumGetFloat8(X))
#ifdef HAVE_INT64_TIMESTAMP
typedef int32 fsec_t;
-
#else
typedef double fsec_t;
/* note: this is also used for rounding off intervals */
#define TS_PREC_INV 1000000.0
#define TSROUND(j) (rint(((double) (j)) * TS_PREC_INV) / TS_PREC_INV)
-
#endif
#define TIMESTAMP_MASK(b) (1 << (b))
extern TimestampTz time_t_to_timestamptz(time_t tm);
-extern int tm2timestamp(struct pg_tm *tm, fsec_t fsec, int *tzp, Timestamp *dt);
-extern int timestamp2tm(Timestamp dt, int *tzp, struct pg_tm *tm,
+extern int tm2timestamp(struct pg_tm * tm, fsec_t fsec, int *tzp, Timestamp *dt);
+extern int timestamp2tm(Timestamp dt, int *tzp, struct pg_tm * tm,
fsec_t *fsec, char **tzn, pg_tz *attimezone);
extern void dt2time(Timestamp dt, int *hour, int *min, int *sec, fsec_t *fsec);
-extern int interval2tm(Interval span, struct pg_tm *tm, fsec_t *fsec);
-extern int tm2interval(struct pg_tm *tm, fsec_t fsec, Interval *span);
+extern int interval2tm(Interval span, struct pg_tm * tm, fsec_t *fsec);
+extern int tm2interval(struct pg_tm * tm, fsec_t fsec, Interval *span);
extern Timestamp SetEpochTimestamp(void);
-extern void GetEpochTime(struct pg_tm *tm);
+extern void GetEpochTime(struct pg_tm * tm);
extern int timestamp_cmp_internal(Timestamp dt1, Timestamp dt2);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/tqual.h,v 1.58 2005/08/20 00:40:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/tqual.h,v 1.59 2005/10/15 02:49:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef SnapshotData *Snapshot;
/* Special snapshot values: */
-#define InvalidSnapshot ((Snapshot) 0x0) /* same as NULL */
+#define InvalidSnapshot ((Snapshot) 0x0) /* same as NULL */
#define SnapshotNow ((Snapshot) 0x1)
#define SnapshotSelf ((Snapshot) 0x2)
#define SnapshotAny ((Snapshot) 0x3)
HEAPTUPLE_DEAD, /* tuple is dead and deletable */
HEAPTUPLE_LIVE, /* tuple is live (committed, no deleter) */
HEAPTUPLE_RECENTLY_DEAD, /* tuple is dead, but not deletable yet */
- HEAPTUPLE_INSERT_IN_PROGRESS, /* inserting xact is still in progress */
+ HEAPTUPLE_INSERT_IN_PROGRESS, /* inserting xact is still in progress */
HEAPTUPLE_DELETE_IN_PROGRESS /* deleting xact is still in progress */
} HTSV_Result;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.7 2004/12/31 22:03:46 pgsql Exp $
+ * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.8 2005/10/15 02:49:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Information obtained from opclass entries
*
- * These will be InvalidOid if no match could be found, or if the
- * information hasn't yet been requested.
+ * These will be InvalidOid if no match could be found, or if the information
+ * hasn't yet been requested.
*/
Oid btree_opc; /* OID of the default btree opclass */
Oid hash_opc; /* OID of the default hash opclass */
/*
* Pre-set-up fmgr call info for the equality operator and the btree
* comparison function. These are kept in the type cache to avoid
- * problems with memory leaks in repeated calls to array_eq and
- * array_cmp. There is not currently a need to maintain call info for
- * the lt_opr or gt_opr.
+ * problems with memory leaks in repeated calls to array_eq and array_cmp.
+ * There is not currently a need to maintain call info for the lt_opr or
+ * gt_opr.
*/
FmgrInfo eq_opr_finfo;
FmgrInfo cmp_proc_finfo;
char *ECPGalloc(long, int);
static int
-deccall2(decimal *arg1, decimal *arg2, int (*ptr) (numeric *, numeric *))
+deccall2(decimal * arg1, decimal * arg2, int (*ptr) (numeric *, numeric *))
{
numeric *a1,
*a2;
}
static int
-deccall3(decimal *arg1, decimal *arg2, decimal *result, int (*ptr) (numeric *, numeric *, numeric *))
+deccall3(decimal * arg1, decimal * arg2, decimal * result, int (*ptr) (numeric *, numeric *, numeric *))
{
numeric *a1,
*a2,
/* we start with the numeric functions */
int
-decadd(decimal *arg1, decimal *arg2, decimal *sum)
+decadd(decimal * arg1, decimal * arg2, decimal * sum)
{
deccall3(arg1, arg2, sum, PGTYPESnumeric_add);
}
int
-deccmp(decimal *arg1, decimal *arg2)
+deccmp(decimal * arg1, decimal * arg2)
{
return (deccall2(arg1, arg2, PGTYPESnumeric_cmp));
}
void
-deccopy(decimal *src, decimal *target)
+deccopy(decimal * src, decimal * target)
{
memcpy(target, src, sizeof(decimal));
}
}
int
-deccvasc(char *cp, int len, decimal *np)
+deccvasc(char *cp, int len, decimal * np)
{
- char *str = ecpg_strndup(cp, len); /* decimal_in always
- * converts the complete
- * string */
+ char *str = ecpg_strndup(cp, len); /* decimal_in always converts
+ * the complete string */
int ret = 0;
numeric *result;
}
int
-deccvdbl(double dbl, decimal *np)
+deccvdbl(double dbl, decimal * np)
{
numeric *nres = PGTYPESnumeric_new();
int result = 1;
}
int
-deccvint(int in, decimal *np)
+deccvint(int in, decimal * np)
{
numeric *nres = PGTYPESnumeric_new();
int result = 1;
}
int
-deccvlong(long lng, decimal *np)
+deccvlong(long lng, decimal * np)
{
numeric *nres = PGTYPESnumeric_new();
int result = 1;
}
int
-decdiv(decimal *n1, decimal *n2, decimal *result)
+decdiv(decimal * n1, decimal * n2, decimal * result)
{
int i;
}
int
-decmul(decimal *n1, decimal *n2, decimal *result)
+decmul(decimal * n1, decimal * n2, decimal * result)
{
int i;
}
int
-decsub(decimal *n1, decimal *n2, decimal *result)
+decsub(decimal * n1, decimal * n2, decimal * result)
{
int i;
}
int
-dectoasc(decimal *np, char *cp, int len, int right)
+dectoasc(decimal * np, char *cp, int len, int right)
{
char *str;
numeric *nres = PGTYPESnumeric_new();
}
int
-dectodbl(decimal *np, double *dblp)
+dectodbl(decimal * np, double *dblp)
{
numeric *nres = PGTYPESnumeric_new();
int i;
}
int
-dectoint(decimal *np, int *ip)
+dectoint(decimal * np, int *ip)
{
int ret;
numeric *nres = PGTYPESnumeric_new();
}
int
-dectolong(decimal *np, long *lngp)
+dectolong(decimal * np, long *lngp)
{
int ret;
numeric *nres = PGTYPESnumeric_new();;
*
*/
int
-rstrdate(char *str, date *d)
+rstrdate(char *str, date * d)
{
date dat;
char strbuf[10];
}
void
-rtoday(date *d)
+rtoday(date * d)
{
PGTYPESdate_today(d);
return;
}
int
-rdefmtdate(date *d, char *fmt, char *str)
+rdefmtdate(date * d, char *fmt, char *str)
{
/* TODO: take care of DBCENTURY environment variable */
/* PGSQL functions allow all centuries */
}
int
-rmdyjul(short mdy[3], date *d)
+rmdyjul(short mdy[3], date * d)
{
int mdy_int[3];
/* And the datetime stuff */
void
-dtcurrent(timestamp *ts)
+dtcurrent(timestamp * ts)
{
PGTYPEStimestamp_current(ts);
}
int
-dtcvasc(char *str, timestamp *ts)
+dtcvasc(char *str, timestamp * ts)
{
timestamp ts_tmp;
int i;
}
int
-dtsub(timestamp *ts1, timestamp *ts2, interval *iv)
+dtsub(timestamp * ts1, timestamp * ts2, interval * iv)
{
return PGTYPEStimestamp_sub(ts1, ts2, iv);
}
int
-dttoasc(timestamp *ts, char *output)
+dttoasc(timestamp * ts, char *output)
{
char *asctime = PGTYPEStimestamp_to_asc(*ts);
}
int
-dttofmtasc(timestamp *ts, char *output, int str_len, char *fmtstr)
+dttofmtasc(timestamp * ts, char *output, int str_len, char *fmtstr)
{
return PGTYPEStimestamp_fmt_asc(ts, output, str_len, fmtstr);
}
int
-intoasc(interval *i, char *str)
+intoasc(interval * i, char *str)
{
str = PGTYPESinterval_to_asc(i);
}
int
-dtcvfmtasc(char *inbuf, char *fmtstr, timestamp *dtvalue)
+dtcvfmtasc(char *inbuf, char *fmtstr, timestamp * dtvalue)
{
return PGTYPEStimestamp_defmt_asc(inbuf, fmtstr, dtvalue);
}
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.25 2005/04/14 10:08:57 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.26 2005/10/15 02:49:47 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
static pthread_mutex_t connections_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_key_t actual_connection_key;
static pthread_once_t actual_connection_key_once = PTHREAD_ONCE_INIT;
-
#endif
static struct connection *actual_connection = NULL;
static struct connection *all_connections = NULL;
{
#ifdef ENABLE_THREAD_SAFETY
ret = pthread_getspecific(actual_connection_key);
- /* if no connection in TSD for this thread, get the global default connection
- * and hope the user knows what they're doing (i.e. using their own mutex to
- * protect that connection from concurrent accesses */
- if(NULL == ret)
+
+ /*
+ * if no connection in TSD for this thread, get the global default
+ * connection and hope the user knows what they're doing (i.e. using
+ * their own mutex to protect that connection from concurrent accesses
+ */
+ if (NULL == ret)
{
ECPGlog("no TSD connection, going for global\n");
ret = actual_connection;
{
#ifdef ENABLE_THREAD_SAFETY
ret = pthread_getspecific(actual_connection_key);
- /* if no connection in TSD for this thread, get the global default connection
- * and hope the user knows what they're doing (i.e. using their own mutex to
- * protect that connection from concurrent accesses */
- if(NULL == ret)
+
+ /*
+ * if no connection in TSD for this thread, get the global default
+ * connection and hope the user knows what they're doing (i.e. using
+ * their own mutex to protect that connection from concurrent accesses
+ */
+ if (NULL == ret)
{
ECPGlog("no TSD connection here either, using global\n");
- ret = actual_connection;
+ ret = actual_connection;
}
else
ECPGlog("got TSD connection\n");
/*
* Informix uses an environment variable DBPATH that overrides the
- * connection parameters given here. We do the same with PG_DBPATH
- * as the syntax is different.
+ * connection parameters given here. We do the same with PG_DBPATH as
+ * the syntax is different.
*/
envname = getenv("PG_DBPATH");
if (envname)
connection_name = "DEFAULT";
if (dbname != NULL)
- {
+ {
/* get the detail information out of dbname */
if (strchr(dbname, '@') != NULL)
{
/* old style: dbname[@server][:port] */
tmp = strrchr(dbname, ':');
- if (tmp != NULL) /* port number given */
+ if (tmp != NULL) /* port number given */
{
port = strdup(tmp + 1);
*tmp = '\0';
}
tmp = strrchr(dbname, '@');
- if (tmp != NULL) /* host name given */
+ if (tmp != NULL) /* host name given */
{
host = strdup(tmp + 1);
*tmp = '\0';
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.28 2005/08/24 10:34:19 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.29 2005/10/15 02:49:47 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
/* We will have to decode the value */
/*
- * check for null value and set indicator accordingly, i.e. -1 if NULL
- * and 0 if not
+ * check for null value and set indicator accordingly, i.e. -1 if NULL and
+ * 0 if not
*/
if (PQgetisnull(results, act_tuple, act_field))
value_for_indicator = -1;
if (force_indicator == false)
{
/*
- * Informix has an additional way to specify NULLs
- * note that this uses special values to denote NULL
+ * Informix has an additional way to specify NULLs note
+ * that this uses special values to denote NULL
*/
ECPGset_noind_null(type, var + offset * act_tuple);
}
if (INFORMIX_MODE(compat))
{
/*
- * Informix wants its own NULL value here
- * instead of an error
+ * Informix wants its own NULL value here instead
+ * of an error
*/
ECPGset_noind_null(ECPGt_numeric, nres);
}
if (INFORMIX_MODE(compat))
{
/*
- * Informix wants its own NULL value here
- * instead of an error
+ * Informix wants its own NULL value here instead
+ * of an error
*/
ECPGset_noind_null(ECPGt_interval, ires);
}
if (INFORMIX_MODE(compat))
{
/*
- * Informix wants its own NULL value here
- * instead of an error
+ * Informix wants its own NULL value here instead
+ * of an error
*/
ECPGset_noind_null(ECPGt_date, &ddres);
}
if (INFORMIX_MODE(compat))
{
/*
- * Informix wants its own NULL value here
- * instead of an error
+ * Informix wants its own NULL value here instead
+ * of an error
*/
ECPGset_noind_null(ECPGt_timestamp, &tres);
}
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/error.c,v 1.10 2003/11/29 19:52:08 pgsql Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/error.c,v 1.11 2005/10/15 02:49:47 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
case ECPG_INT_FORMAT:
snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
- "Not correctly formatted int type: %s line %d.", str, line);
+ "Not correctly formatted int type: %s line %d.", str, line);
break;
case ECPG_UINT_FORMAT:
case ECPG_CONVERT_BOOL:
snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
- "Unable to convert %s to bool on line %d.", str, line);
+ "Unable to convert %s to bool on line %d.", str, line);
break;
case ECPG_EMPTY:
case ECPG_DATA_NOT_ARRAY:
snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
- "Data read from backend is not an array in line %d.", line);
+ "Data read from backend is not an array in line %d.", line);
break;
case ECPG_ARRAY_INSERT:
snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
- "Trying to insert an array of variables in line %d.", line);
+ "Trying to insert an array of variables in line %d.", line);
break;
case ECPG_NO_CONN:
case ECPG_VAR_NOT_CHAR:
snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
- "Variable is not a character type in line %d.", line);
+ "Variable is not a character type in line %d.", line);
break;
case ECPG_TRANS:
case ECPG_CONNECT:
snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
- "Could not connect to database %s in line %d.", str, line);
+ "Could not connect to database %s in line %d.", str, line);
break;
default:
sqlca->sqlcode = ECPG_PGSQL;
ECPGlog("raising sqlstate %.*s in line %d, '%s'.\n",
- sizeof(sqlca->sqlstate), sqlca->sqlstate, line, sqlca->sqlerrm.sqlerrmc);
+ sizeof(sqlca->sqlstate), sqlca->sqlstate, line, sqlca->sqlerrm.sqlerrmc);
/* free all memory we have allocated for the user */
ECPGfree_auto_mem();
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.42 2005/07/04 19:05:45 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.43 2005/10/15 02:49:47 momjian Exp $ */
/*
* The aim is to get a simpler inteface to the database routines.
var->ind_value = var->ind_pointer;
/*
- * negative values are used to indicate an array without given
- * bounds
+ * negative values are used to indicate an array without given bounds
*/
/* reset to zero for us */
if (var->ind_arrsize < 0)
var->ind_varcharsize = 0;
}
}
+
#undef APREF
/*
if ((stmt->connection->cache_head) == NULL)
{
/*
- * Text like types are not an array for ecpg, but postgres counts
- * them as an array. This define reminds you to not 'correct'
- * these values.
+ * Text like types are not an array for ecpg, but postgres counts them
+ * as an array. This define reminds you to not 'correct' these values.
*/
#define not_an_array_in_ecpg ECPG_ARRAY_NONE
int len = strlen(PQgetvalue(results, act_tuple, act_field)) + 1;
if (!ECPGget_data(results, act_tuple, act_field, stmt->lineno,
- var->type, var->ind_type, current_data_location,
+ var->type, var->ind_type, current_data_location,
var->ind_value, len, 0, var->ind_offset, isarray, stmt->compat, stmt->force_indicator))
status = false;
else
char *newcopy = NULL;
/*
- * arrays are not possible unless the attribute is an array too FIXME:
- * we do not know if the attribute is an array here
+ * arrays are not possible unless the attribute is an array too FIXME: we
+ * do not know if the attribute is an array here
*/
#if 0
if (var->arrsize > 1 &&...)
sprintf(mallocedval + strlen(mallocedval), "%c,", (((char *) var->value)[element]) ? 't' : 'f');
/*
- * this is necessary since sizeof(C++'s
- * bool)==sizeof(int)
+ * this is necessary since sizeof(C++'s bool)==sizeof(int)
*/
else if (var->offset == sizeof(int))
for (element = 0; element < var->arrsize; element++)
copiedquery = ECPGstrdup(stmt->command, stmt->lineno);
/*
- * Now, if the type is one of the fill in types then we take the
- * argument and enter that in the string at the first %s position.
- * Then if there are any more fill in types we fill in at the next and
- * so on.
+ * Now, if the type is one of the fill in types then we take the argument
+ * and enter that in the string at the first %s position. Then if there
+ * are any more fill in types we fill in at the next and so on.
*/
var = stmt->inlist;
tobeinserted = NULL;
/*
- * A descriptor is a special case since it contains many variables
- * but is listed only once.
+ * A descriptor is a special case since it contains many variables but
+ * is listed only once.
*/
if (var->type == ECPGt_descriptor)
{
/*
- * We create an additional variable list here, so the same
- * logic applies.
+ * We create an additional variable list here, so the same logic
+ * applies.
*/
struct variable desc_inlist;
struct descriptor *desc;
if (tobeinserted)
{
/*
- * Now tobeinserted points to an area that is to be inserted
- * at the first %s
+ * Now tobeinserted points to an area that is to be inserted at
+ * the first %s
*/
if (!(newcopy = (char *) ECPGalloc(strlen(copiedquery) + strlen(tobeinserted) + 1, stmt->lineno)))
return false;
if ((p = next_insert(newcopy + hostvarl)) == NULL)
{
/*
- * We have an argument but we dont have the matched up
- * string in the string
+ * We have an argument but we dont have the matched up string
+ * in the string
*/
ECPGraise(stmt->lineno, ECPG_TOO_MANY_ARGUMENTS, ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS, NULL);
return false;
hostvarl = strlen(newcopy);
/*
- * The strange thing in the second argument is the rest of
- * the string from the old string
+ * The strange thing in the second argument is the rest of the
+ * string from the old string
*/
strcat(newcopy,
copiedquery
}
/*
- * Now everything is safely copied to the newcopy. Lets free
- * the oldcopy and let the copiedquery get the var->value from
- * the newcopy.
+ * Now everything is safely copied to the newcopy. Lets free the
+ * oldcopy and let the copiedquery get the var->value from the
+ * newcopy.
*/
if (malloced)
{
bool ECPGstore_result(const PGresult *results, int act_field,
const struct statement * stmt, struct variable * var);
bool ECPGstore_input(const int, const bool, const struct variable *, const char **, bool *);
+
#if defined(__GNUC__) && (defined (__powerpc__) || defined(__amd64__) || defined(__x86_64__))
- // work around a gcc/ABI bug with va_lists on ppc+amd64
+ /* work around a gcc/ABI bug with va_lists on ppc+amd64 */
void ECPGget_variable(va_list, enum ECPGttype, struct variable *, bool);
#else
void ECPGget_variable(va_list *, enum ECPGttype, struct variable *, bool);
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/memory.c,v 1.6 2004/12/30 09:36:37 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/memory.c,v 1.7 2005/10/15 02:49:47 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
if (string == NULL)
return NULL;
-
+
new = strdup(string);
if (!new)
{
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.25 2005/09/12 11:57:53 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.26 2005/10/15 02:49:47 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
#ifdef ENABLE_THREAD_SAFETY
static pthread_key_t sqlca_key;
static pthread_once_t sqlca_key_once = PTHREAD_ONCE_INIT;
-
#else
static struct sqlca_t sqlca =
{
ecpg_sqlca_key_destructor(void *arg)
{
if (arg != NULL)
- free(arg); /* sqlca structure allocated in
- * ECPGget_sqlca */
+ free(arg); /* sqlca structure allocated in ECPGget_sqlca */
}
static void
/* if we have no connection we just simulate the command */
if (con && con->connection)
{
- /* If we got a transaction command but have no open transaction,
- * we have to start one, unless we are in autocommit, where the
- * developers have to take care themselves.
- * However, if the command is a begin statement, we just execute it once.
+ /*
+ * If we got a transaction command but have no open transaction, we
+ * have to start one, unless we are in autocommit, where the
+ * developers have to take care themselves. However, if the command is
+ * a begin statement, we just execute it once.
*/
if (con->committed && !con->autocommit && strncmp(transaction, "begin", 5) != 0 && strncmp(transaction, "start", 5) != 0)
{
}
PQclear(res);
}
-
+
res = PQexec(con->connection, transaction);
if (res == NULL || PQresultStatus(res) != PGRES_COMMAND_OK)
{
return;
}
- sprintf(f, "[%d]: %s", (int)getpid(), format);
+ sprintf(f, "[%d]: %s", (int) getpid(), format);
va_start(ap, format);
vfprintf(debugstream, f, ap);
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/prepare.c,v 1.13 2004/10/05 10:48:37 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/prepare.c,v 1.14 2005/10/15 02:49:47 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
if (!string && *ptr == ':')
{
- if (ptr[1]==':')
- ptr+=2; /* skip '::' */
- else
+ if (ptr[1] == ':')
+ ptr += 2; /* skip '::' */
+ else
{
*ptr = '?';
for (++ptr; *ptr && isvarchar(*ptr); ptr++)
if (INFORMIX_MODE(compat))
{
/*
- * Just ignore all errors since we do not know the list of cursors
- * we are allowed to free. We have to trust the software.
+ * Just ignore all errors since we do not know the list of cursors we
+ * are allowed to free. We have to trust the software.
*/
return true;
}
ECPGt_bool,
ECPGt_float, ECPGt_double,
ECPGt_varchar, ECPGt_varchar2,
- ECPGt_numeric, /* this is a decimal that stores its
- * digits in a malloced array */
- ECPGt_decimal, /* this is a decimal that stores its
- * digits in a fixed array */
+ ECPGt_numeric, /* this is a decimal that stores its digits in
+ * a malloced array */
+ ECPGt_decimal, /* this is a decimal that stores its digits in
+ * a fixed array */
ECPGt_date,
ECPGt_timestamp,
ECPGt_interval,
typedef struct
{
#ifdef HAVE_INT64_TIMESTAMP
- int64 time; /* all time units other than months and
- * years */
+ int64 time; /* all time units other than months and years */
#else
- double time; /* all time units other than months and
- * years */
+ double time; /* all time units other than months and years */
#endif
- long month; /* months and years, after time for
- * alignment */
-} interval;
+ long month; /* months and years, after time for alignment */
+} interval;
#ifdef __cplusplus
extern "C"
typedef unsigned char NumericDigit;
typedef struct
{
- int ndigits; /* number of digits in digits[] - can be
- * 0! */
+ int ndigits; /* number of digits in digits[] - can be 0! */
int weight; /* weight of first digit */
int rscale; /* result scale */
int dscale; /* display scale */
- int sign; /* NUMERIC_POS, NUMERIC_NEG, or
- * NUMERIC_NAN */
+ int sign; /* NUMERIC_POS, NUMERIC_NEG, or NUMERIC_NAN */
NumericDigit *buf; /* start of alloc'd space for digits[] */
NumericDigit *digits; /* decimal digits */
-} numeric;
+} numeric;
typedef struct
{
- int ndigits; /* number of digits in digits[] - can be
- * 0! */
+ int ndigits; /* number of digits in digits[] - can be 0! */
int weight; /* weight of first digit */
int rscale; /* result scale */
int dscale; /* display scale */
- int sign; /* NUMERIC_POS, NUMERIC_NEG, or
- * NUMERIC_NAN */
+ int sign; /* NUMERIC_POS, NUMERIC_NEG, or NUMERIC_NAN */
NumericDigit digits[DECSIZE]; /* decimal digits */
-} decimal;
+} decimal;
#ifdef __cplusplus
extern "C"
{
#endif
-numeric *PGTYPESnumeric_new(void);
+ numeric * PGTYPESnumeric_new(void);
void PGTYPESnumeric_free(numeric *);
numeric *PGTYPESnumeric_from_asc(char *, char **);
char *PGTYPESnumeric_to_asc(numeric *, int);
#ifdef HAVE_INT64_TIMESTAMP
typedef int64 timestamp;
typedef int64 TimestampTz;
-
#else
typedef double timestamp;
typedef double TimestampTz;
extern int PGTYPEStimestamp_fmt_asc(timestamp *, char *, int, char *);
extern void PGTYPEStimestamp_current(timestamp *);
extern int PGTYPEStimestamp_defmt_asc(char *, char *, timestamp *);
-extern int PGTYPEStimestamp_add_interval(timestamp *tin, interval *span, timestamp *tout);
-extern int PGTYPEStimestamp_sub_interval(timestamp *tin, interval *span, timestamp *tout);
+extern int PGTYPEStimestamp_add_interval(timestamp * tin, interval * span, timestamp * tout);
+extern int PGTYPEStimestamp_sub_interval(timestamp * tin, interval * span, timestamp * tout);
#ifdef __cplusplus
}
if (i + 1 <= *pstr_len)
{
/*
- * copy over i + 1 bytes, that includes the tailing
- * terminator
+ * copy over i + 1 bytes, that includes the tailing terminator
*/
strncpy(*output, replace_val.str_val, i + 1);
*pstr_len -= i;
dDate = (dt / USECS_PER_DAY);
#else
/* Seconds to days */
- dDate = (dt / (double)SECS_PER_DAY);
+ dDate = (dt / (double) SECS_PER_DAY);
#endif
return dDate;
}
if (ParseDateTime(str, lowstr, field, ftype, MAXDATEFIELDS, &nf, ptr) != 0 ||
- DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, &tzp, EuroDates) != 0)
+ DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, &tzp, EuroDates) != 0)
{
errno = PGTYPES_DATE_BAD_DATE;
return INT_MIN;
}
void
-PGTYPESdate_mdyjul(int *mdy, date *jdate)
+PGTYPESdate_mdyjul(int *mdy, date * jdate)
{
/* month is mdy[0] */
/* day is mdy[1] */
}
void
-PGTYPESdate_today(date *d)
+PGTYPESdate_today(date * d)
{
struct tm ts;
#define PGTYPES_DATE_NUM_MAX_DIGITS 20 /* should suffice for most
* years... */
-#define PGTYPES_FMTDATE_DAY_DIGITS_LZ 1 /* LZ means "leading
- * zeroes" */
+#define PGTYPES_FMTDATE_DAY_DIGITS_LZ 1 /* LZ means "leading zeroes" */
#define PGTYPES_FMTDATE_DOW_LITERAL_SHORT 2
#define PGTYPES_FMTDATE_MONTH_DIGITS_LZ 3
#define PGTYPES_FMTDATE_MONTH_LITERAL_SHORT 4
} mapping[] =
{
/*
- * format items have to be sorted according to their length, since
- * the first pattern that matches gets replaced by its value
+ * format items have to be sorted according to their length, since the
+ * first pattern that matches gets replaced by its value
*/
{
"ddd", PGTYPES_FMTDATE_DOW_LITERAL_SHORT
/*
* doesn't happen (we set replace_type to
- * PGTYPES_TYPE_STRING_CONSTANT in case of an error
- * above)
+ * PGTYPES_TYPE_STRING_CONSTANT in case of an error above)
*/
break;
}
#define PGTYPES_DATE_MONTH_MAXLENGTH 20 /* probably even less :-) */
int
-PGTYPESdate_defmt_asc(date *d, char *fmt, char *str)
+PGTYPESdate_defmt_asc(date * d, char *fmt, char *str)
{
/*
- * token[2] = { 4,6 } means that token 2 starts at position 4 and ends
- * at (including) position 6
+ * token[2] = { 4,6 } means that token 2 starts at position 4 and ends at
+ * (including) position 6
*/
int token[3][2];
int token_values[3] = {-1, -1, -1};
char *str_copy;
struct tm tm;
- tm.tm_year = tm.tm_mon = tm.tm_mday = 0; /* keep compiler quiet */
+ tm.tm_year = tm.tm_mon = tm.tm_mday = 0; /* keep compiler quiet */
if (!d || !str || !fmt)
{
/* okay, this really is the special case */
/*
- * as long as the string, one additional byte for the terminator
- * and 2 for the delimiters between the 3 fiedls
+ * as long as the string, one additional byte for the terminator and 2
+ * for the delimiters between the 3 fiedls
*/
str_copy = pgtypes_alloc(strlen(str) + 1 + 2);
if (!str_copy)
target_pos = 0;
/*
- * XXX: Here we could calculate the positions of the tokens and
- * save the for loop down there where we again check with
- * isdigit() for digits.
+ * XXX: Here we could calculate the positions of the tokens and save
+ * the for loop down there where we again check with isdigit() for
+ * digits.
*/
for (i = 0; i < 3; i++)
{
}
/*
- * we're at the end of the input string, but maybe we are still
- * reading a number...
+ * we're at the end of the input string, but maybe we are still reading a
+ * number...
*/
if (reading_digit)
{
if (token_count < 2)
{
/*
- * not all tokens found, no way to find 2 missing tokens with
- * string matches
+ * not all tokens found, no way to find 2 missing tokens with string
+ * matches
*/
free(str_copy);
errno = PGTYPES_DATE_ERR_ENOTDMY;
{
/*
* not all tokens found but we may find another one with string
- * matches by testing for the months names and months
- * abbreviations
+ * matches by testing for the months names and months abbreviations
*/
char *month_lower_tmp = pgtypes_alloc(PGTYPES_DATE_MONTH_MAXLENGTH);
char *start_pos;
offset = start_pos - str_copy;
/*
- * sort the new token into the numeric tokens, shift them
- * if necessary
+ * sort the new token into the numeric tokens, shift them if
+ * necessary
*/
if (offset < token[0][0])
{
token[token_count][1] = offset + strlen(month_lower_tmp) - 1;
/*
- * the value is the index of the month in the array of
- * months + 1 (January is month 0)
+ * the value is the index of the month in the array of months
+ * + 1 (January is month 0)
*/
token_values[token_count] = i + 1;
found = 1;
}
/*
- * evil[tm] hack: if we read the pgtypes_date_months and
- * haven't found a match, reset list to point to
- * pgtypes_date_months_short and reset the counter variable i
+ * evil[tm] hack: if we read the pgtypes_date_months and haven't
+ * found a match, reset list to point to pgtypes_date_months_short
+ * and reset the counter variable i
*/
if (list == pgtypes_date_months)
{
* here we found a month. token[token_count] and
* token_values[token_count] reflect the month's details.
*
- * only the month can be specified with a literal. Here we can do a
- * quick check if the month is at the right position according to
- * the format string because we can check if the token that we
- * expect to be the month is at the position of the only token
- * that already has a value. If we wouldn't check here we could
- * say "December 4 1990" with a fmt string of "dd mm yy" for 12
- * April 1990.
+ * only the month can be specified with a literal. Here we can do a quick
+ * check if the month is at the right position according to the format
+ * string because we can check if the token that we expect to be the
+ * month is at the position of the only token that already has a
+ * value. If we wouldn't check here we could say "December 4 1990"
+ * with a fmt string of "dd mm yy" for 12 April 1990.
*/
if (fmt_token_order[token_count] != 'm')
{
#ifdef HAVE_INT64_TIMESTAMP
typedef int32 fsec_t;
-
#else
typedef double fsec_t;
/* note: this is also used for rounding off intervals */
#define TS_PREC_INV 1000000.0
#define TSROUND(j) (rint(((double) (j)) * TS_PREC_INV) / TS_PREC_INV)
-
#endif
#define USE_POSTGRES_DATES 0
#define DTK_DATE_M (DTK_M(YEAR) | DTK_M(MONTH) | DTK_M(DAY))
#define DTK_TIME_M (DTK_M(HOUR) | DTK_M(MINUTE) | DTK_M(SECOND))
-#define MAXDATELEN 51 /* maximum possible length of an input
- * date string (not counting tr. null) */
-#define MAXDATEFIELDS 25 /* maximum possible number of fields in a
- * date string */
+#define MAXDATELEN 51 /* maximum possible length of an input date
+ * string (not counting tr. null) */
+#define MAXDATEFIELDS 25 /* maximum possible number of fields in a date
+ * string */
#define TOKMAXLEN 10 /* only this many chars are stored in
* datetktbl */
/* in both timestamp.h and ecpg/dt.h */
#define DAYS_PER_YEAR 365.25 /* assumes leap year every four years */
-#define MONTHS_PER_YEAR 12
+#define MONTHS_PER_YEAR 12
/*
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
- * also return a 'time' value to be used as well. ISO 8601 suggests
+ * also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
*/
#define SECS_PER_YEAR (36525 * 864) /* avoid floating-point computation */
#define SECS_PER_DAY 86400
-#define SECS_PER_HOUR 3600
+#define SECS_PER_HOUR 3600
#define SECS_PER_MINUTE 60
#define MINS_PER_HOUR 60
#define DT_NOBEGIN (-INT64CONST(0x7fffffffffffffff) - 1)
#define DT_NOEND (INT64CONST(0x7fffffffffffffff))
-
#else
#ifdef HUGE_VAL
int DecodeTimeOnly(char **field, int *ftype,
int nf, int *dtype,
- struct tm *tm, fsec_t *fsec, int *tzp);
+ struct tm * tm, fsec_t *fsec, int *tzp);
int DecodeInterval(char **field, int *ftype,
int nf, int *dtype,
- struct tm *tm, fsec_t *fsec);
+ struct tm * tm, fsec_t *fsec);
-int EncodeTimeOnly(struct tm *tm, fsec_t fsec, int *tzp, int style, char *str);
-int EncodeDateTime(struct tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str, bool);
-int EncodeInterval(struct tm *tm, fsec_t fsec, int style, char *str);
+int EncodeTimeOnly(struct tm * tm, fsec_t fsec, int *tzp, int style, char *str);
+int EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str, bool);
+int EncodeInterval(struct tm * tm, fsec_t fsec, int style, char *str);
int tm2timestamp(struct tm *, fsec_t, int *, timestamp *);
extern char *pgtypes_date_months[];
extern char *months[];
extern char *days[];
-extern int day_tab[2][13];
+extern int day_tab[2][13];
#endif /* DT_H */
#include "dt.h"
#include "pgtypes_timestamp.h"
-int day_tab[2][13] = {
+int day_tab[2][13] = {
{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 0},
{31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 0}};
{"lhdt", DTZ, POS(44)}, /* Lord Howe Daylight Time, Australia */
{"lhst", TZ, POS(42)}, /* Lord Howe Standard Time, Australia */
{"ligt", TZ, POS(40)}, /* From Melbourne, Australia */
- {"lint", TZ, POS(56)}, /* Line Islands Time (Kiribati; +14
- * hours!) */
+ {"lint", TZ, POS(56)}, /* Line Islands Time (Kiribati; +14 hours!) */
{"lkt", TZ, POS(24)}, /* Lanka Time */
{"m", UNITS, DTK_MONTH}, /* "month" for ISO input */
{"magst", DTZ, POS(48)}, /* Magadan Summer Time */
* Encode date as local time.
*/
int
-EncodeDateOnly(struct tm *tm, int style, char *str, bool EuroDates)
+EncodeDateOnly(struct tm * tm, int style, char *str, bool EuroDates)
{
if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR)
return -1;
tm->tm_year, tm->tm_mon, tm->tm_mday);
else
sprintf(str, "%04d-%02d-%02d %s",
- -(tm->tm_year - 1), tm->tm_mon, tm->tm_mday, "BC");
+ -(tm->tm_year - 1), tm->tm_mon, tm->tm_mday, "BC");
break;
case USE_SQL_DATES:
* European - dd/mm/yyyy
*/
int
-EncodeDateTime(struct tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str, bool EuroDates)
+EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str, bool EuroDates)
{
int day,
hour,
/* Compatible with ISO-8601 date formats */
sprintf(str, "%04d-%02d-%02d %02d:%02d",
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
sprintf(str + strlen(str), " BC");
/*
- * tzp == NULL indicates that we don't want *any* time zone
- * info in the output string. *tzn != NULL indicates that we
- * have alpha time zone info available. tm_isdst != -1
- * indicates that we have a valid time zone translation.
+ * tzp == NULL indicates that we don't want *any* time zone info
+ * in the output string. *tzn != NULL indicates that we have alpha
+ * time zone info available. tm_isdst != -1 indicates that we have
+ * a valid time zone translation.
*/
if (tzp != NULL && tm->tm_isdst >= 0)
{
sprintf(str, "%02d/%02d", tm->tm_mon, tm->tm_mday);
sprintf(str + 5, "/%04d %02d:%02d",
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
sprintf(str, "%02d.%02d", tm->tm_mday, tm->tm_mon);
sprintf(str + 5, ".%04d %02d:%02d",
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
sprintf(str + 10, " %02d:%02d", tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
sprintf(str + strlen(str), ":%02d", tm->tm_sec);
sprintf(str + strlen(str), " %04d",
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1));
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1));
if (tm->tm_year <= 0)
sprintf(str + strlen(str), " BC");
{
/*
* We have a time zone, but no string version. Use the
- * numeric form, but be sure to include a leading
- * space to avoid formatting something which would be
- * rejected by the date/time parser later. - thomas
- * 2001-10-19
+ * numeric form, but be sure to include a leading space to
+ * avoid formatting something which would be rejected by
+ * the date/time parser later. - thomas 2001-10-19
*/
hour = -(*tzp / SECS_PER_HOUR);
min = (abs(*tzp) / MINS_PER_HOUR) % MINS_PER_HOUR;
} /* EncodeDateTime() */
void
-GetEpochTime(struct tm *tm)
+GetEpochTime(struct tm * tm)
{
struct tm *t0;
time_t epoch = 0;
} /* GetEpochTime() */
static void
-abstime2tm(AbsoluteTime _time, int *tzp, struct tm *tm, char **tzn)
+abstime2tm(AbsoluteTime _time, int *tzp, struct tm * tm, char **tzn)
{
time_t time = (time_t) _time;
struct tm *tx;
*tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */
/*
- * XXX FreeBSD man pages indicate that this should work - tgl
- * 97/04/23
+ * XXX FreeBSD man pages indicate that this should work - tgl 97/04/23
*/
if (tzn != NULL)
{
/*
- * Copy no more than MAXTZLEN bytes of timezone to tzn, in
- * case it contains an error message, which doesn't fit in the
- * buffer
+ * Copy no more than MAXTZLEN bytes of timezone to tzn, in case it
+ * contains an error message, which doesn't fit in the buffer
*/
StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
if (strlen(tm->tm_zone) > MAXTZLEN)
if (tzn != NULL)
{
/*
- * Copy no more than MAXTZLEN bytes of timezone to tzn, in
- * case it contains an error message, which doesn't fit in the
- * buffer
+ * Copy no more than MAXTZLEN bytes of timezone to tzn, in case it
+ * contains an error message, which doesn't fit in the buffer
*/
StrNCpy(*tzn, TZNAME_GLOBAL[tm->tm_isdst], MAXTZLEN + 1);
if (strlen(TZNAME_GLOBAL[tm->tm_isdst]) > MAXTZLEN)
}
void
-GetCurrentDateTime(struct tm *tm)
+GetCurrentDateTime(struct tm * tm)
{
int tz;
* the *only* call of mktime() in the backend.
*/
static int
-DetermineLocalTimeZone(struct tm *tm)
+DetermineLocalTimeZone(struct tm * tm)
{
int tz;
#if defined(HAVE_TM_ZONE) || defined(HAVE_INT_TIMEZONE)
/*
- * Some buggy mktime() implementations may change the
- * year/month/day when given a time right at a DST boundary. To
- * prevent corruption of the caller's data, give mktime() a
- * copy...
+ * Some buggy mktime() implementations may change the year/month/day
+ * when given a time right at a DST boundary. To prevent corruption
+ * of the caller's data, give mktime() a copy...
*/
struct tm tt,
*tmp = &tt;
/* indicate timezone unknown */
tmp->tm_isdst = -1;
- if (mktime(tmp) != (time_t)-1 && tmp->tm_isdst >= 0)
+ if (mktime(tmp) != (time_t) -1 && tmp->tm_isdst >= 0)
{
/* mktime() succeeded, trust its result */
tm->tm_isdst = tmp->tm_isdst;
mytime = (time_t) mysec;
/*
- * Use localtime to convert that time_t to broken-down time,
- * and reassemble to get a representation of local time.
+ * Use localtime to convert that time_t to broken-down time, and
+ * reassemble to get a representation of local time.
*/
tmp = localtime(&mytime);
day = (date2j(tmp->tm_year + 1900, tmp->tm_mon + 1, tmp->tm_mday) -
delta1 = mysec - locsec;
/*
- * However, if that GMT time and the local time we are
- * actually interested in are on opposite sides of a
- * daylight-savings-time transition, then this is not the time
- * offset we want. So, adjust the time_t to be what we think
- * the GMT time corresponding to our target local time is, and
- * repeat the localtime() call and delta calculation. We may
- * have to do it twice before we have a trustworthy delta.
+ * However, if that GMT time and the local time we are actually
+ * interested in are on opposite sides of a daylight-savings-time
+ * transition, then this is not the time offset we want. So,
+ * adjust the time_t to be what we think the GMT time
+ * corresponding to our target local time is, and repeat the
+ * localtime() call and delta calculation. We may have to do it
+ * twice before we have a trustworthy delta.
*
- * Note: think not to put a loop here, since if we've been given
- * an "impossible" local time (in the gap during a
- * spring-forward transition) we'd never get out of the loop.
- * Twice is enough to give the behavior we want, which is that
- * "impossible" times are taken as standard time, while at a
- * fall-back boundary ambiguous times are also taken as
- * standard.
+ * Note: think not to put a loop here, since if we've been given an
+ * "impossible" local time (in the gap during a spring-forward
+ * transition) we'd never get out of the loop. Twice is enough to
+ * give the behavior we want, which is that "impossible" times are
+ * taken as standard time, while at a fall-back boundary ambiguous
+ * times are also taken as standard.
*/
mysec += delta1;
mytime = (time_t) mysec;
*/
static int
DecodeNumberField(int len, char *str, int fmask,
-int *tmask, struct tm *tm, fsec_t *fsec, int *is2digits, bool EuroDates)
+ int *tmask, struct tm * tm, fsec_t *fsec, int *is2digits, bool EuroDates)
{
char *cp;
/*
- * Have a decimal point? Then this is a date or something with a
- * seconds field...
+ * Have a decimal point? Then this is a date or something with a seconds
+ * field...
*/
if ((cp = strchr(str, '.')) != NULL)
{
*/
static int
DecodeNumber(int flen, char *str, int fmask,
-int *tmask, struct tm *tm, fsec_t *fsec, int *is2digits, bool EuroDates)
+ int *tmask, struct tm * tm, fsec_t *fsec, int *is2digits, bool EuroDates)
{
int val;
char *cp;
if (*cp == '.')
{
/*
- * More than two digits? Then could be a date or a run-together
- * time: 2001.360 20011225 040506.789
+ * More than two digits? Then could be a date or a run-together time:
+ * 2001.360 20011225 040506.789
*/
if (cp - str > 2)
return DecodeNumberField(flen, str, (fmask | DTK_DATE_M),
- tmask, tm, fsec, is2digits, EuroDates);
+ tmask, tm, fsec, is2digits, EuroDates);
*fsec = strtod(cp, &cp);
if (*cp != '\0')
}
/* no year and EuroDates enabled? then could be day */
else if ((EuroDates || (fmask & DTK_M(MONTH))) &&
- !(fmask & DTK_M(YEAR)) && !(fmask & DTK_M(DAY)) &&
- val >= 1 && val <= 31)
+ !(fmask & DTK_M(YEAR)) && !(fmask & DTK_M(DAY)) &&
+ val >= 1 && val <= 31)
{
*tmask = DTK_M(DAY);
tm->tm_mday = val;
}
/*
- * Check for 2 or 4 or more digits, but currently we reach here only
- * if two digits. - thomas 2000-03-28
+ * Check for 2 or 4 or more digits, but currently we reach here only if
+ * two digits. - thomas 2000-03-28
*/
else if (!(fmask & DTK_M(YEAR)) && (flen >= 4 || flen == 2))
{
* Insist on a complete set of fields.
*/
static int
-DecodeDate(char *str, int fmask, int *tmask, struct tm *tm, bool EuroDates)
+DecodeDate(char *str, int fmask, int *tmask, struct tm * tm, bool EuroDates)
{
fsec_t fsec;
* can be used to represent time spans.
*/
static int
-DecodeTime(char *str, int fmask, int *tmask, struct tm *tm, fsec_t *fsec)
+DecodeTime(char *str, int fmask, int *tmask, struct tm * tm, fsec_t *fsec)
{
char *cp;
char fstr[MAXDATELEN + 1];
/*
- * OK, we have at most six digits to work with. Let's
- * construct a string and then do the conversion to an
- * integer.
+ * OK, we have at most six digits to work with. Let's construct a
+ * string and then do the conversion to an integer.
*/
strncpy(fstr, (cp + 1), 7);
strcpy(fstr + strlen(fstr), "000000");
*/
int
ParseDateTime(char *timestr, char *lowstr,
- char **field, int *ftype, int maxfields, int *numfields, char **endstr)
+ char **field, int *ftype, int maxfields, int *numfields, char **endstr)
{
int nf = 0;
char *lp = lowstr;
*lp++ = *(*endstr)++;
/*
- * insist that the delimiters match to get a
- * three-field date.
+ * insist that the delimiters match to get a three-field
+ * date.
*/
if (*(*endstr) == *dp)
{
}
/*
- * otherwise, number only and will determine year, month, day,
- * or concatenated fields later...
+ * otherwise, number only and will determine year, month, day, or
+ * concatenated fields later...
*/
else
ftype[nf] = DTK_NUMBER;
}
/*
- * text? then date string, month, day of week, special, or
- * timezone
+ * text? then date string, month, day of week, special, or timezone
*/
else if (isalpha((unsigned char) *(*endstr)))
{
*lp++ = pg_tolower((unsigned char) *(*endstr)++);
/*
- * Full date string with leading text month? Could also be a
- * POSIX time zone...
+ * Full date string with leading text month? Could also be a POSIX
+ * time zone...
*/
if (*(*endstr) == '-' || *(*endstr) == '/' || *(*endstr) == '.')
{
*/
int
DecodeDateTime(char **field, int *ftype, int nf,
- int *dtype, struct tm *tm, fsec_t *fsec, int *tzp, bool EuroDates)
+ int *dtype, struct tm * tm, fsec_t *fsec, int *tzp, bool EuroDates)
{
int fmask = 0,
tmask,
type;
- int ptype = 0; /* "prefix type" for ISO y2001m02d04
- * format */
+ int ptype = 0; /* "prefix type" for ISO y2001m02d04 format */
int i;
int val;
int mer = HR24;
/*
* Starts with a digit but we already have a time
- * field? Then we are in trouble with a date and
- * time already...
+ * field? Then we are in trouble with a date and time
+ * already...
*/
if ((fmask & DTK_TIME_M) == DTK_TIME_M)
return -1;
*cp = '\0';
/*
- * Then read the rest of the field as a
- * concatenated time
+ * Then read the rest of the field as a concatenated
+ * time
*/
if ((ftype[i] = DecodeNumberField(strlen(field[i]), field[i], fmask,
- &tmask, tm, fsec, &is2digits, EuroDates)) < 0)
+ &tmask, tm, fsec, &is2digits, EuroDates)) < 0)
return -1;
/*
* DecodeTime()
*/
/* test for > 24:00:00 */
- if (tm->tm_hour > 24 ||
- (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)))
+ if (tm->tm_hour > 24 ||
+ (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)))
return -1;
break;
return -1;
/*
- * Already have a time zone? Then maybe this is the
- * second field of a POSIX time: EST+3 (equivalent to
- * PST)
+ * Already have a time zone? Then maybe this is the second
+ * field of a POSIX time: EST+3 (equivalent to PST)
*/
if (i > 0 && (fmask & DTK_M(TZ)) != 0 &&
ftype[i - 1] == DTK_TZ &&
case DTK_TIME:
/* previous field was "t" for ISO time */
if ((ftype[i] = DecodeNumberField(strlen(field[i]), field[i], (fmask | DTK_DATE_M),
- &tmask, tm, fsec, &is2digits, EuroDates)) < 0)
+ &tmask, tm, fsec, &is2digits, EuroDates)) < 0)
return -1;
if (tmask != DTK_TIME_M)
else if (cp != NULL && flen - strlen(cp) > 2)
{
/*
- * Interpret as a concatenated date or time Set
- * the type field to allow decoding other fields
- * later. Example: 20011223 or 040506
+ * Interpret as a concatenated date or time Set the
+ * type field to allow decoding other fields later.
+ * Example: 20011223 or 040506
*/
if ((ftype[i] = DecodeNumberField(flen, field[i], fmask,
- &tmask, tm, fsec, &is2digits, EuroDates)) < 0)
+ &tmask, tm, fsec, &is2digits, EuroDates)) < 0)
return -1;
}
else if (flen > 4)
{
if ((ftype[i] = DecodeNumberField(flen, field[i], fmask,
- &tmask, tm, fsec, &is2digits, EuroDates)) < 0)
+ &tmask, tm, fsec, &is2digits, EuroDates)) < 0)
return -1;
}
/* otherwise it is a single date/time field... */
else if (DecodeNumber(flen, field[i], fmask,
- &tmask, tm, fsec, &is2digits, EuroDates) != 0)
+ &tmask, tm, fsec, &is2digits, EuroDates) != 0)
return -1;
}
break;
case MONTH:
/*
- * already have a (numeric) month? then see if we
- * can substitute...
+ * already have a (numeric) month? then see if we can
+ * substitute...
*/
if ((fmask & DTK_M(MONTH)) && !haveTextMonth &&
!(fmask & DTK_M(DAY)) && tm->tm_mon >= 1 && tm->tm_mon <= 31)
case DTZMOD:
/*
- * daylight savings time modifier (solves "MET
- * DST" syntax)
+ * daylight savings time modifier (solves "MET DST"
+ * syntax)
*/
tmask |= DTK_M(DTZ);
tm->tm_isdst = 1;
case DTZ:
/*
- * set mask for TZ here _or_ check for DTZ later
- * when getting default timezone
+ * set mask for TZ here _or_ check for DTZ later when
+ * getting default timezone
*/
tmask |= DTK_M(TZ);
tm->tm_isdst = 1;
case ISOTIME:
/*
- * This is a filler field "t" indicating that the
- * next field is time. Try to verify that this is
- * sensible.
+ * This is a filler field "t" indicating that the next
+ * field is time. Try to verify that this is sensible.
*/
tmask = 0;
***/
if (i >= nf - 1 ||
(ftype[i + 1] != DTK_NUMBER &&
- ftype[i + 1] != DTK_TIME &&
- ftype[i + 1] != DTK_DATE))
+ ftype[i + 1] != DTK_TIME &&
+ ftype[i + 1] != DTK_DATE))
return -1;
ptype = val;
return ((fmask & DTK_TIME_M) == DTK_TIME_M) ? 1 : -1;
/*
- * check for valid day of month, now that we know for sure the
- * month and year...
+ * check for valid day of month, now that we know for sure the month
+ * and year...
*/
if (tm->tm_mday < 1 || tm->tm_mday > day_tab[isleap(tm->tm_year)][tm->tm_mon - 1])
return -1;
if ((fmask & DTK_DATE_M) == DTK_DATE_M && tzp != NULL && !(fmask & DTK_M(TZ)))
{
/*
- * daylight savings time modifier but no standard timezone?
- * then error
+ * daylight savings time modifier but no standard timezone? then
+ * error
*/
if (fmask & DTK_M(DTZMOD))
return -1;
* functions gets called as find_end_token("28the day12the hour", "the
* day%hthehour")
*
- * fmt points to "the day%hthehour", next_percent points to %hthehour and
- * we have to find a match for everything between these positions
- * ("the day"). We look for "the day" in str and know that the pattern
- * we are about to scan ends where this string starts (right after the
- * "28")
+ * fmt points to "the day%hthehour", next_percent points to %hthehour and we
+ * have to find a match for everything between these positions ("the
+ * day"). We look for "the day" in str and know that the pattern we are
+ * about to scan ends where this string starts (right after the "28")
*
- * At the end, *fmt is '\0' and *str isn't. end_position then is
- * unchanged.
+ * At the end, *fmt is '\0' and *str isn't. end_position then is unchanged.
*/
char *end_position = NULL;
char *next_percent,
while (fmt[scan_offset] == '%' && fmt[scan_offset + 1])
{
/*
- * there is no delimiter, skip to the next delimiter if we're
- * reading a number and then something that is not a number
- * "9:15pm", we might be able to recover with the strtol end
- * pointer. Go for the next percent sign
+ * there is no delimiter, skip to the next delimiter if we're reading
+ * a number and then something that is not a number "9:15pm", we might
+ * be able to recover with the strtol end pointer. Go for the next
+ * percent sign
*/
scan_offset += 2;
}
if (next_percent)
{
/*
- * we don't want to allocate extra memory, so we temporarily set
- * the '%' sign to '\0' and call strstr However since we allow
- * whitespace to float around everything, we have to shorten the
- * pattern until we reach a non-whitespace character
+ * we don't want to allocate extra memory, so we temporarily set the
+ * '%' sign to '\0' and call strstr However since we allow whitespace
+ * to float around everything, we have to shorten the pattern until we
+ * reach a non-whitespace character
*/
subst_location = next_percent;
*subst_location = '\0';
/*
- * the haystack is the str and the needle is the original fmt but
- * it ends at the position where the next percent sign would be
+ * the haystack is the str and the needle is the original fmt but it
+ * ends at the position where the next percent sign would be
*/
/*
- * There is one special case. Imagine: str = " 2", fmt = "%d
- * %...", since we want to allow blanks as "dynamic" padding we
- * have to accept this. Now, we are called with a fmt of " %..."
- * and look for " " in str. We find it at the first position and
- * never read the 2...
+ * There is one special case. Imagine: str = " 2", fmt = "%d %...",
+ * since we want to allow blanks as "dynamic" padding we have to
+ * accept this. Now, we are called with a fmt of " %..." and look for
+ * " " in str. We find it at the first position and never read the
+ * 2...
*/
while (*str == ' ')
str++;
else
{
/*
- * there is no other percent sign. So everything up to the end has
- * to match.
+ * there is no other percent sign. So everything up to the end has to
+ * match.
*/
end_position = str + strlen(str);
}
*
* and have set fmt to " " because overwrote the % sign with a NULL
*
- * In this case where we would have to match a space but can't find
- * it, set end_position to the end of the string
+ * In this case where we would have to match a space but can't find it,
+ * set end_position to the end of the string
*/
if ((fmt + scan_offset)[0] == ' ' && fmt + scan_offset + 1 == subst_location)
end_position = str + strlen(str);
pgtypes_defmt_scan(union un_fmt_comb * scan_val, int scan_type, char **pstr, char *pfmt)
{
/*
- * scan everything between pstr and pstr_end. This is not including
- * the last character so we might set it to '\0' for the parsing
+ * scan everything between pstr and pstr_end. This is not including the
+ * last character so we might set it to '\0' for the parsing
*/
char last_char;
case PGTYPES_TYPE_UINT:
/*
- * numbers may be blank-padded, this is the only deviation
- * from the fmt-string we accept
+ * numbers may be blank-padded, this is the only deviation from
+ * the fmt-string we accept
*/
while (**pstr == ' ')
(*pstr)++;
int *, int *, int *, int *);
int
-PGTYPEStimestamp_defmt_scan(char **str, char *fmt, timestamp *d,
+PGTYPEStimestamp_defmt_scan(char **str, char *fmt, timestamp * d,
int *year, int *month, int *day,
int *hour, int *minute, int *second,
int *tz)
pfmt++;
/*
- * we parse the day and see if it is a week day but we do
- * not check if the week day really matches the date
+ * we parse the day and see if it is a week day but we do not
+ * check if the week day really matches the date
*/
err = 1;
j = 0;
while (pgtypes_date_weekdays_short[j])
{
if (strncmp(pgtypes_date_weekdays_short[j], pstr,
- strlen(pgtypes_date_weekdays_short[j])) == 0)
+ strlen(pgtypes_date_weekdays_short[j])) == 0)
{
/* found it */
err = 0;
case 'D':
/*
- * we have to concatenate the strings in order to be able
- * to find the end of the substitution
+ * we have to concatenate the strings in order to be able to
+ * find the end of the substitution
*/
pfmt++;
tmp = pgtypes_alloc(strlen("%m/%d/%y") + strlen(pstr) + 1);
/*
* XXX what should we do with that? We could say that it's
- * sufficient if we have the year and the day within the
- * year to get at least a specific day.
+ * sufficient if we have the year and the day within the year
+ * to get at least a specific day.
*/
break;
case 'M':
err = pgtypes_defmt_scan(&scan_val, scan_type, &pstr, pfmt);
/*
- * XXX use DecodeSpecial instead ? - it's declared static
- * but the arrays as well. :-(
+ * XXX use DecodeSpecial instead ? - it's declared static but
+ * the arrays as well. :-(
*/
for (j = 0; !err && j < szdatetktbl; j++)
{
{
/*
* tz calculates the offset for the seconds, the
- * timezone value of the datetktbl table is in
- * quarter hours
+ * timezone value of the datetktbl table is in quarter
+ * hours
*/
*tz = -15 * MINS_PER_HOUR * datetktbl[j].value;
break;
err = 1;
*minute = 0;
}
- if (*hour > 24 || /* test for > 24:00:00 */
+ if (*hour > 24 || /* test for > 24:00:00 */
(*hour == 24 && (*minute > 0 || *second > 0)))
{
err = 1;
#define PGTYPES_TYPE_DOUBLE_NF 4 /* no fractional part */
#define PGTYPES_TYPE_INT64 5
#define PGTYPES_TYPE_UINT 6
-#define PGTYPES_TYPE_UINT_2_LZ 7 /* 2 digits, pad with
- * leading zero */
-#define PGTYPES_TYPE_UINT_2_LS 8 /* 2 digits, pad with
- * leading space */
+#define PGTYPES_TYPE_UINT_2_LZ 7 /* 2 digits, pad with leading
+ * zero */
+#define PGTYPES_TYPE_UINT_2_LS 8 /* 2 digits, pad with leading
+ * space */
#define PGTYPES_TYPE_UINT_3_LZ 9
#define PGTYPES_TYPE_UINT_4_LZ 10
#define PGTYPES_TYPE_UINT_LONG 11
* can be used to represent time spans.
*/
static int
-DecodeTime(char *str, int fmask, int *tmask, struct tm *tm, fsec_t *fsec)
+DecodeTime(char *str, int fmask, int *tmask, struct tm * tm, fsec_t *fsec)
{
char *cp;
char fstr[MAXDATELEN + 1];
/*
- * OK, we have at most six digits to work with. Let's
- * construct a string and then do the conversion to an
- * integer.
+ * OK, we have at most six digits to work with. Let's construct a
+ * string and then do the conversion to an integer.
*/
strncpy(fstr, (cp + 1), 7);
strcpy(fstr + strlen(fstr), "000000");
* preceding an hh:mm:ss field. - thomas 1998-04-30
*/
int
-DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct tm *tm, fsec_t *fsec)
+DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct tm * tm, fsec_t *fsec)
{
int is_before = FALSE;
*/
/*
- * A single signed number ends up here, but will be
- * rejected by DecodeTime(). So, work this out to drop
- * through to DTK_NUMBER, which *can* tolerate this.
+ * A single signed number ends up here, but will be rejected
+ * by DecodeTime(). So, work this out to drop through to
+ * DTK_NUMBER, which *can* tolerate this.
*/
cp = field[i] + 1;
while (*cp != '\0' && *cp != ':' && *cp != '.')
/*
* Set the next type to be a day, if units are not
- * specified. This handles the case of '1 +02:03'
- * since we are reading right to left.
+ * specified. This handles the case of '1 +02:03' since we
+ * are reading right to left.
*/
type = DTK_DAY;
tmask = DTK_M(TZ);
* - thomas 1998-04-30
*/
int
-EncodeInterval(struct tm *tm, fsec_t fsec, int style, char *str)
+EncodeInterval(struct tm * tm, fsec_t fsec, int style, char *str)
{
int is_before = FALSE;
int is_nonzero = FALSE;
/*
* The sign of year and month are guaranteed to match, since they are
- * stored internally as "month". But we'll need to check for is_before
- * and is_nonzero when determining the signs of hour/minute/seconds
- * fields.
+ * stored internally as "month". But we'll need to check for is_before and
+ * is_nonzero when determining the signs of hour/minute/seconds fields.
*/
switch (style)
{
tm->tm_sec != 0 || fsec != 0)
{
int minus = tm->tm_hour < 0 || tm->tm_min < 0 ||
- tm->tm_sec < 0 || fsec < 0;
+ tm->tm_sec < 0 || fsec < 0;
sprintf(cp, "%s%s%02d:%02d", (is_nonzero ? " " : ""),
(minus ? "-" : (is_before ? "+" : "")),
sprintf(cp, ".%06d", Abs(fsec));
#else
fsec += tm->tm_sec;
- sprintf(cp, ":%012.9f", fabs(fsec));
+ sprintf(cp, ":%012.9f", fabs(fsec));
#endif
TrimTrailingZeros(cp);
cp += strlen(cp);
* Convert a interval data type to a tm structure.
*/
static int
-interval2tm(interval span, struct tm *tm, fsec_t *fsec)
+interval2tm(interval span, struct tm * tm, fsec_t *fsec)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 time;
*fsec = time - (tm->tm_sec * USECS_PER_SEC);
#else
recalc:
- TMODULO(time, tm->tm_mday, (double)SECS_PER_DAY);
- TMODULO(time, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(time, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(time, tm->tm_mday, (double) SECS_PER_DAY);
+ TMODULO(time, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(time, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(time, tm->tm_sec, 1.0);
time = TSROUND(time);
/* roundoff may need to propagate to higher-order fields */
} /* interval2tm() */
static int
-tm2interval(struct tm *tm, fsec_t fsec, interval *span)
+tm2interval(struct tm * tm, fsec_t fsec, interval * span)
{
span->month = tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
#ifdef HAVE_INT64_TIMESTAMP
span->time = (((((((tm->tm_mday * INT64CONST(24)) +
- tm->tm_hour) * INT64CONST(60)) +
- tm->tm_min) * INT64CONST(60)) +
- tm->tm_sec) * USECS_PER_SEC) + fsec;
+ tm->tm_hour) * INT64CONST(60)) +
+ tm->tm_min) * INT64CONST(60)) +
+ tm->tm_sec) * USECS_PER_SEC) + fsec;
#else
- span->time = (((((tm->tm_mday * (double)HOURS_PER_DAY) +
- tm->tm_hour) * (double)MINS_PER_HOUR) +
- tm->tm_min) * (double)SECS_PER_MINUTE) +
- tm->tm_sec + fsec;
+ span->time = (((((tm->tm_mday * (double) HOURS_PER_DAY) +
+ tm->tm_hour) * (double) MINS_PER_HOUR) +
+ tm->tm_min) * (double) SECS_PER_MINUTE) +
+ tm->tm_sec + fsec;
#endif
return 0;
}
char *
-PGTYPESinterval_to_asc(interval *span)
+PGTYPESinterval_to_asc(interval * span)
{
struct tm tt,
*tm = &tt;
}
int
-PGTYPESinterval_copy(interval *intvlsrc, interval *intrcldest)
+PGTYPESinterval_copy(interval * intvlsrc, interval * intrcldest)
{
intrcldest->time = intvlsrc->time;
intrcldest->month = intvlsrc->month;
* ----------
*/
static int
-apply_typmod(numeric *var, long typmod)
+apply_typmod(numeric * var, long typmod)
{
int precision;
int scale;
/*
* Check for overflow - note we can't do this before rounding, because
- * rounding could raise the weight. Also note that the var's weight
- * could be inflated by leading zeroes, which will be stripped before
- * storage but perhaps might not have been yet. In any case, we must
- * recognize a true zero, whose weight doesn't mean anything.
+ * rounding could raise the weight. Also note that the var's weight could
+ * be inflated by leading zeroes, which will be stripped before storage
+ * but perhaps might not have been yet. In any case, we must recognize a
+ * true zero, whose weight doesn't mean anything.
*/
if (var->weight >= maxweight)
{
* ----------
*/
static int
-alloc_var(numeric *var, int ndigits)
+alloc_var(numeric * var, int ndigits)
{
digitbuf_free(var->buf);
var->buf = digitbuf_alloc(ndigits + 1);
* ----------
*/
static int
-set_var_from_str(char *str, char **ptr, numeric *dest)
+set_var_from_str(char *str, char **ptr, numeric * dest)
{
bool have_dp = FALSE;
int i = 0;
* ----------
*/
static char *
-get_str_from_var(numeric *var, int dscale)
+get_str_from_var(numeric * var, int dscale)
{
char *str;
char *cp;
}
/*
- * If requested, output a decimal point and all the digits that follow
- * it.
+ * If requested, output a decimal point and all the digits that follow it.
*/
if (dscale > 0)
{
}
char *
-PGTYPESnumeric_to_asc(numeric *num, int dscale)
+PGTYPESnumeric_to_asc(numeric * num, int dscale)
{
if (dscale < 0)
dscale = num->dscale;
* ----------
*/
static void
-zero_var(numeric *var)
+zero_var(numeric * var)
{
digitbuf_free(var->buf);
var->buf = NULL;
}
void
-PGTYPESnumeric_free(numeric *var)
+PGTYPESnumeric_free(numeric * var)
{
digitbuf_free(var->buf);
free(var);
* ----------
*/
static int
-cmp_abs(numeric *var1, numeric *var2)
+cmp_abs(numeric * var1, numeric * var2)
{
int i1 = 0;
int i2 = 0;
* ----------
*/
static int
-add_abs(numeric *var1, numeric *var2, numeric *result)
+add_abs(numeric * var1, numeric * var2, numeric * result)
{
NumericDigit *res_buf;
NumericDigit *res_digits;
* ----------
*/
static int
-sub_abs(numeric *var1, numeric *var2, numeric *result)
+sub_abs(numeric * var1, numeric * var2, numeric * result)
{
NumericDigit *res_buf;
NumericDigit *res_digits;
* ----------
*/
int
-PGTYPESnumeric_add(numeric *var1, numeric *var2, numeric *result)
+PGTYPESnumeric_add(numeric * var1, numeric * var2, numeric * result)
{
/*
* Decide on the signs of the two variables what to do
else
{
/*
- * var1 is positive, var2 is negative Must compare absolute
- * values
+ * var1 is positive, var2 is negative Must compare absolute values
*/
switch (cmp_abs(var1, var2))
{
* ----------
*/
int
-PGTYPESnumeric_sub(numeric *var1, numeric *var2, numeric *result)
+PGTYPESnumeric_sub(numeric * var1, numeric * var2, numeric * result)
{
/*
* Decide on the signs of the two variables what to do
* ----------
*/
int
-PGTYPESnumeric_mul(numeric *var1, numeric *var2, numeric *result)
+PGTYPESnumeric_mul(numeric * var1, numeric * var2, numeric * result)
{
NumericDigit *res_buf;
NumericDigit *res_digits;
* Note that this must be called before div_var.
*/
static int
-select_div_scale(numeric *var1, numeric *var2, int *rscale)
+select_div_scale(numeric * var1, numeric * var2, int *rscale)
{
int weight1,
weight2,
int res_rscale;
/*
- * The result scale of a division isn't specified in any SQL standard.
- * For PostgreSQL we select a display scale that will give at least
+ * The result scale of a division isn't specified in any SQL standard. For
+ * PostgreSQL we select a display scale that will give at least
* NUMERIC_MIN_SIG_DIGITS significant digits, so that numeric gives a
* result no less accurate than float8; but use a scale not less than
* either input's display scale.
}
int
-PGTYPESnumeric_div(numeric *var1, numeric *var2, numeric *result)
+PGTYPESnumeric_div(numeric * var1, numeric * var2, numeric * result)
{
NumericDigit *res_digits;
int res_ndigits;
memcpy(dividend.digits, var1->digits, var1->ndigits);
/*
- * Setup the result. Do the allocation in a temporary buffer
- * first, so we don't free result->buf unless we have successfully
- * allocated a buffer to replace it with.
+ * Setup the result. Do the allocation in a temporary buffer first, so we
+ * don't free result->buf unless we have successfully allocated a buffer
+ * to replace it with.
*/
tmp_buf = digitbuf_alloc(res_ndigits + 2);
if (tmp_buf == NULL)
result->sign = NUMERIC_POS;
result->dscale = res_dscale;
- err = 0; /* if we've made it this far, return success */
+ err = 0; /* if we've made it this far, return success */
done:
+
/*
* Tidy up
*/
int
-PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
+PGTYPESnumeric_cmp(numeric * var1, numeric * var2)
{
/* use cmp_abs function to calculate the result */
if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG)
{
/*
- * instead of inverting the result, we invert the paramter
- * ordering
+ * instead of inverting the result, we invert the paramter ordering
*/
return cmp_abs(var2, var1);
}
}
int
-PGTYPESnumeric_from_int(signed int int_val, numeric *var)
+PGTYPESnumeric_from_int(signed int int_val, numeric * var)
{
/* implicit conversion */
signed long int long_int = int_val;
}
int
-PGTYPESnumeric_from_long(signed long int long_val, numeric *var)
+PGTYPESnumeric_from_long(signed long int long_val, numeric * var)
{
/* calculate the size of the long int number */
/* a number n needs log_10 n digits */
/*
- * however we multiply by 10 each time and compare instead of
- * calculating the logarithm
+ * however we multiply by 10 each time and compare instead of calculating
+ * the logarithm
*/
int size = 0;
/*
* we can abandon if abs_long_val reaches 0, because the memory is
- * initialized properly and filled with '0', so converting 10000
- * in only one step is no problem
+ * initialized properly and filled with '0', so converting 10000 in
+ * only one step is no problem
*/
} while (abs_long_val > 0);
}
int
-PGTYPESnumeric_copy(numeric *src, numeric *dst)
+PGTYPESnumeric_copy(numeric * src, numeric * dst)
{
int i;
}
int
-PGTYPESnumeric_from_double(double d, numeric *dst)
+PGTYPESnumeric_from_double(double d, numeric * dst)
{
char buffer[100];
numeric *tmp;
}
static int
-numericvar_to_double_no_overflow(numeric *var, double *dp)
+numericvar_to_double_no_overflow(numeric * var, double *dp)
{
char *tmp;
double val;
}
int
-PGTYPESnumeric_to_double(numeric *nv, double *dp)
+PGTYPESnumeric_to_double(numeric * nv, double *dp)
{
double tmp;
int i;
}
int
-PGTYPESnumeric_to_int(numeric *nv, int *ip)
+PGTYPESnumeric_to_int(numeric * nv, int *ip)
{
long l;
int i;
}
int
-PGTYPESnumeric_to_long(numeric *nv, long *lp)
+PGTYPESnumeric_to_long(numeric * nv, long *lp)
{
int i;
long l = 0;
}
int
-PGTYPESnumeric_to_decimal(numeric *src, decimal *dst)
+PGTYPESnumeric_to_decimal(numeric * src, decimal * dst)
{
int i;
}
int
-PGTYPESnumeric_from_decimal(decimal *src, numeric *dst)
+PGTYPESnumeric_from_decimal(decimal * src, numeric * dst)
{
int i;
{
return (((((hour * MINS_PER_HOUR) + min) * SECS_PER_MINUTE) + sec) * USECS_PER_SEC) + fsec;
} /* time2t() */
-
#else
static double
time2t(const int hour, const int min, const int sec, const fsec_t fsec)
* Returns -1 on failure (overflow).
*/
int
-tm2timestamp(struct tm *tm, fsec_t fsec, int *tzp, timestamp *result)
+tm2timestamp(struct tm * tm, fsec_t fsec, int *tzp, timestamp * result)
{
#ifdef HAVE_INT64_TIMESTAMP
int dDate;
int64 time;
-
#else
double dDate,
time;
* local time zone. If out of this range, leave as GMT. - tgl 97/05/27
*/
static int
-timestamp2tm(timestamp dt, int *tzp, struct tm *tm, fsec_t *fsec, char **tzn)
+timestamp2tm(timestamp dt, int *tzp, struct tm * tm, fsec_t *fsec, char **tzn)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 dDate,
dt2time(time, &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#else
time = dt;
- TMODULO(time, dDate, (double)SECS_PER_DAY);
+ TMODULO(time, dDate, (double) SECS_PER_DAY);
if (time < 0)
{
if (*fsec >= 1.0)
{
time = ceil(time);
- if (time >= (double)SECS_PER_DAY)
+ if (time >= (double) SECS_PER_DAY)
{
time = 0;
dDate += 1;
tm->tm_gmtoff = tx->tm_gmtoff;
tm->tm_zone = tx->tm_zone;
- *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */
+ *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */
if (tzn != NULL)
*tzn = (char *) tm->tm_zone;
#elif defined(HAVE_INT_TIMEZONE)
if (tzn != NULL)
*tzn = TZNAME_GLOBAL[(tm->tm_isdst > 0)];
#endif
-
#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
*tzp = 0;
/* Mark this as *no* time zone available */
/* AdjustTimestampForTypmod(&result, typmod); */
/*
- * Since it's difficult to test for noresult, make sure errno is 0 if
- * no error occured.
+ * Since it's difficult to test for noresult, make sure errno is 0 if no
+ * error occured.
*/
errno = 0;
return result;
char buf[MAXDATELEN + 1];
char *tzn = NULL;
fsec_t fsec;
- int DateStyle = 1; /* this defaults to ISO_DATES, shall we
- * make it an option? */
+ int DateStyle = 1; /* this defaults to ISO_DATES, shall we make
+ * it an option? */
if (TIMESTAMP_NOT_FINITE(tstamp))
EncodeSpecialTimestamp(tstamp, buf);
}
void
-PGTYPEStimestamp_current(timestamp *ts)
+PGTYPEStimestamp_current(timestamp * ts)
{
struct tm tm;
}
static int
-dttofmtasc_replace(timestamp *ts, date dDate, int dow, struct tm *tm,
+dttofmtasc_replace(timestamp * ts, date dDate, int dow, struct tm * tm,
char *output, int *pstr_len, char *fmtstr)
{
union un_fmt_comb replace_val;
case 'D':
/*
- * ts, dDate, dow, tm is information about the
- * timestamp
+ * ts, dDate, dow, tm is information about the timestamp
*
* q is the start of the current output buffer
*
case 'g':
/* XXX: fall back to strftime */
{
- char *fmt = "%g"; /* Keep compiler quiet
- * about 2-digit year */
+ char *fmt = "%g"; /* Keep compiler quiet about
+ * 2-digit year */
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, fmt, tm);
case 'x':
/* XXX: fall back to strftime */
{
- char *fmt = "%x"; /* Keep compiler quiet
- * about 2-digit year */
+ char *fmt = "%x"; /* Keep compiler quiet about
+ * 2-digit year */
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, fmt, tm);
int
-PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmtstr)
+PGTYPEStimestamp_fmt_asc(timestamp * ts, char *output, int str_len, char *fmtstr)
{
struct tm tm;
fsec_t fsec;
}
int
-PGTYPEStimestamp_sub(timestamp *ts1, timestamp *ts2, interval *iv)
+PGTYPEStimestamp_sub(timestamp * ts1, timestamp * ts2, interval * iv)
{
if (TIMESTAMP_NOT_FINITE(*ts1) || TIMESTAMP_NOT_FINITE(*ts2))
return PGTYPES_TS_ERR_EINFTIME;
}
int
-PGTYPEStimestamp_defmt_asc(char *str, char *fmt, timestamp *d)
+PGTYPEStimestamp_defmt_asc(char *str, char *fmt, timestamp * d)
{
int year,
month,
/*
* add an interval to a time stamp
*
-* *tout = tin + span
+* *tout = tin + span
*
-* returns 0 if successful
-* returns -1 if it fails
+* returns 0 if successful
+* returns -1 if it fails
*
*/
-
+
int
-PGTYPEStimestamp_add_interval(timestamp *tin, interval *span, timestamp *tout)
+PGTYPEStimestamp_add_interval(timestamp * tin, interval * span, timestamp * tout)
{
-
-
- if (TIMESTAMP_NOT_FINITE(*tin))
- *tout = *tin;
-
-
- else
- {
- if (span->month != 0)
- {
- struct tm tt,
- *tm = &tt;
- fsec_t fsec;
-
-
- if (timestamp2tm(*tin, NULL, tm, &fsec, NULL) !=0)
- return -1;
- tm->tm_mon += span->month;
- if (tm->tm_mon > MONTHS_PER_YEAR)
- {
- tm->tm_year += (tm->tm_mon - 1) / MONTHS_PER_YEAR;
- tm->tm_mon = (tm->tm_mon - 1) % MONTHS_PER_YEAR + 1;
- }
- else if (tm->tm_mon < 1)
- {
- tm->tm_year += tm->tm_mon / MONTHS_PER_YEAR - 1;
- tm->tm_mon = tm->tm_mon % MONTHS_PER_YEAR + MONTHS_PER_YEAR;
- }
-
-
- /* adjust for end of month boundary problems... */
- if (tm->tm_mday > day_tab[isleap(tm->tm_year)][tm->tm_mon - 1])
- tm->tm_mday = (day_tab[isleap(tm->tm_year)][tm->tm_mon - 1]);
-
-
- if (tm2timestamp(tm, fsec, NULL, tin) !=0)
- return -1;
- }
-
-
- *tin +=span->time;
- *tout = *tin;
- }
- return 0;
-
+
+
+ if (TIMESTAMP_NOT_FINITE(*tin))
+ *tout = *tin;
+
+
+ else
+ {
+ if (span->month != 0)
+ {
+ struct tm tt,
+ *tm = &tt;
+ fsec_t fsec;
+
+
+ if (timestamp2tm(*tin, NULL, tm, &fsec, NULL) != 0)
+ return -1;
+ tm->tm_mon += span->month;
+ if (tm->tm_mon > MONTHS_PER_YEAR)
+ {
+ tm->tm_year += (tm->tm_mon - 1) / MONTHS_PER_YEAR;
+ tm->tm_mon = (tm->tm_mon - 1) % MONTHS_PER_YEAR + 1;
+ }
+ else if (tm->tm_mon < 1)
+ {
+ tm->tm_year += tm->tm_mon / MONTHS_PER_YEAR - 1;
+ tm->tm_mon = tm->tm_mon % MONTHS_PER_YEAR + MONTHS_PER_YEAR;
+ }
+
+
+ /* adjust for end of month boundary problems... */
+ if (tm->tm_mday > day_tab[isleap(tm->tm_year)][tm->tm_mon - 1])
+ tm->tm_mday = (day_tab[isleap(tm->tm_year)][tm->tm_mon - 1]);
+
+
+ if (tm2timestamp(tm, fsec, NULL, tin) != 0)
+ return -1;
+ }
+
+
+ *tin += span->time;
+ *tout = *tin;
+ }
+ return 0;
+
}
-
-
+
+
/*
* subtract an interval from a time stamp
*
-* *tout = tin - span
+* *tout = tin - span
*
-* returns 0 if successful
-* returns -1 if it fails
+* returns 0 if successful
+* returns -1 if it fails
*
*/
-
+
int
-PGTYPEStimestamp_sub_interval(timestamp *tin, interval *span, timestamp *tout)
+PGTYPEStimestamp_sub_interval(timestamp * tin, interval * span, timestamp * tout)
{
- interval tspan;
-
- tspan.month = -span->month;
- tspan.time = -span->time;
-
-
- return PGTYPEStimestamp_add_interval(tin, &tspan, tout );
-}
+ interval tspan;
+
+ tspan.month = -span->month;
+ tspan.time = -span->time;
+
+ return PGTYPEStimestamp_add_interval(tin, &tspan, tout);
+}
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.92 2005/08/29 01:32:00 tgl Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.93 2005/10/15 02:49:47 momjian Exp $ */
/* New main for ecpg, the PostgreSQL embedded SQL precompiler. */
/* (C) Michael Meskes Feb 5th, 1998 */
printf(" -c automatically generate C code from embedded SQL code;\n"
" currently this works for EXEC SQL TYPE\n");
printf(" -C MODE set compatibility mode;\n"
- " MODE may be one of \"INFORMIX\", \"INFORMIX_SE\"\n");
+ " MODE may be one of \"INFORMIX\", \"INFORMIX_SE\"\n");
#ifdef YYDEBUG
printf(" -d generate parser debug output\n");
#endif
if (!(ptr->opened))
{
/*
- * Does not really make sense to declare a cursor
- * but not open it
+ * Does not really make sense to declare a cursor but
+ * not open it
*/
snprintf(errortext, sizeof(errortext), "cursor \"%s\" has been declared but not opened\n", ptr->name);
mmerror(PARSE_ERROR, ET_WARNING, errortext);
* lexical token lookup for reserved words in postgres embedded SQL
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg_keywords.c,v 1.30 2004/09/27 09:59:17 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg_keywords.c,v 1.31 2005/10/15 02:49:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{"section", SQL_SECTION},
{"short", SQL_SHORT},
{"signed", SQL_SIGNED},
- {"sql", SQL_SQL}, /* strange thing, used for into sql
- * descriptor MYDESC; */
+ {"sql", SQL_SQL}, /* strange thing, used for into sql descriptor
+ * MYDESC; */
{"sqlerror", SQL_SQLERROR},
{"sqlprint", SQL_SQLPRINT},
{"sqlwarning", SQL_SQLWARNING},
return NULL;
/*
- * Apply an ASCII-only downcasing. We must not use tolower() since it
- * may produce the wrong translation in some locales (eg, Turkish),
- * and we don't trust isupper() very much either. In an ASCII-based
- * encoding the tests against A and Z are sufficient, but we also
- * check isupper() so that we will work correctly under EBCDIC. The
- * actual case conversion step should work for either ASCII or EBCDIC.
+ * Apply an ASCII-only downcasing. We must not use tolower() since it may
+ * produce the wrong translation in some locales (eg, Turkish), and we
+ * don't trust isupper() very much either. In an ASCII-based encoding the
+ * tests against A and Z are sufficient, but we also check isupper() so
+ * that we will work correctly under EBCDIC. The actual case conversion
+ * step should work for either ASCII or EBCDIC.
*/
for (i = 0; i < len; i++)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/keywords.c,v 1.69 2005/10/04 13:28:21 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/keywords.c,v 1.70 2005/10/15 02:49:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{"analyze", ANALYZE},
{"and", AND},
{"any", ANY},
- {"array", ARRAY},
- {"as", AS},
- {"asc", ASC},
- {"assertion", ASSERTION},
- {"assignment", ASSIGNMENT},
- {"asymmetric", ASYMMETRIC},
- {"at", AT},
- {"authorization", AUTHORIZATION},
- {"backward", BACKWARD},
- {"before", BEFORE},
- {"begin", BEGIN_P},
- {"between", BETWEEN},
- {"bigint", BIGINT},
- {"binary", BINARY},
- {"bit", BIT},
- {"boolean", BOOLEAN_P},
- {"both", BOTH},
- {"by", BY},
- {"cache", CACHE},
- {"called", CALLED},
- {"cascade", CASCADE},
- {"case", CASE},
- {"cast", CAST},
- {"chain", CHAIN},
- {"char", CHAR_P},
- {"character", CHARACTER},
- {"characteristics", CHARACTERISTICS},
- {"check", CHECK},
- {"checkpoint", CHECKPOINT},
- {"class", CLASS},
- {"close", CLOSE},
- {"cluster", CLUSTER},
- {"coalesce", COALESCE},
- {"collate", COLLATE},
- {"column", COLUMN},
- {"comment", COMMENT},
- {"commit", COMMIT},
- {"committed", COMMITTED},
- {"connection", CONNECTION},
- {"constraint", CONSTRAINT},
- {"constraints", CONSTRAINTS},
- {"conversion", CONVERSION_P},
- {"convert", CONVERT},
- {"copy", COPY},
- {"create", CREATE},
- {"createdb", CREATEDB},
- {"createrole", CREATEROLE},
- {"createuser", CREATEUSER},
- {"cross", CROSS},
- {"csv", CSV},
- {"current_date", CURRENT_DATE},
- {"current_role", CURRENT_ROLE},
- {"current_time", CURRENT_TIME},
- {"current_timestamp", CURRENT_TIMESTAMP},
- {"cursor", CURSOR},
- {"cycle", CYCLE},
- {"database", DATABASE},
- {"day", DAY_P},
- {"deallocate", DEALLOCATE},
- {"dec", DEC},
- {"decimal", DECIMAL_P},
- {"declare", DECLARE},
- {"default", DEFAULT},
- {"defaults", DEFAULTS},
- {"deferrable", DEFERRABLE},
- {"deferred", DEFERRED},
- {"definer", DEFINER},
- {"delete", DELETE_P},
- {"delimiter", DELIMITER},
- {"delimiters", DELIMITERS},
- {"desc", DESC},
- {"disable", DISABLE_P},
- {"distinct", DISTINCT},
- {"do", DO},
- {"domain", DOMAIN_P},
- {"double", DOUBLE_P},
- {"drop", DROP},
- {"each", EACH},
- {"else", ELSE},
- {"enable", ENABLE_P},
- {"encoding", ENCODING},
- {"encrypted", ENCRYPTED},
- {"end", END_P},
- {"escape", ESCAPE},
- {"except", EXCEPT},
- {"excluding", EXCLUDING},
- {"exclusive", EXCLUSIVE},
- {"execute", EXECUTE},
- {"exists", EXISTS},
- {"explain", EXPLAIN},
- {"external", EXTERNAL},
- {"extract", EXTRACT},
- {"false", FALSE_P},
- {"fetch", FETCH},
- {"first", FIRST_P},
- {"float", FLOAT_P},
- {"for", FOR},
- {"force", FORCE},
- {"foreign", FOREIGN},
- {"forward", FORWARD},
- {"freeze", FREEZE},
- {"from", FROM},
- {"full", FULL},
- {"function", FUNCTION},
- {"get", GET},
- {"global", GLOBAL},
- {"grant", GRANT},
- {"granted", GRANTED},
- {"greatest", GREATEST},
- {"group", GROUP_P},
- {"handler", HANDLER},
- {"having", HAVING},
- {"header", HEADER},
- {"hold", HOLD},
- {"hour", HOUR_P},
- {"ilike", ILIKE},
- {"immediate", IMMEDIATE},
- {"immutable", IMMUTABLE},
- {"implicit", IMPLICIT_P},
- {"in", IN_P},
- {"including", INCLUDING},
- {"increment", INCREMENT},
- {"index", INDEX},
- {"inherit", INHERIT},
- {"inherits", INHERITS},
- {"initially", INITIALLY},
- {"inner", INNER_P},
- {"inout", INOUT},
- {"input", INPUT_P},
- {"insensitive", INSENSITIVE},
- {"insert", INSERT},
- {"instead", INSTEAD},
- {"int", INT_P},
- {"integer", INTEGER},
- {"intersect", INTERSECT},
- {"interval", INTERVAL},
- {"into", INTO},
- {"invoker", INVOKER},
- {"is", IS},
- {"isnull", ISNULL},
- {"isolation", ISOLATION},
- {"join", JOIN},
- {"key", KEY},
- {"lancompiler", LANCOMPILER},
- {"language", LANGUAGE},
- {"large", LARGE_P},
- {"last", LAST_P},
- {"leading", LEADING},
- {"least", LEAST},
- {"left", LEFT},
- {"level", LEVEL},
- {"like", LIKE},
- {"limit", LIMIT},
- {"listen", LISTEN},
- {"load", LOAD},
- {"local", LOCAL},
- {"location", LOCATION},
- {"lock", LOCK_P},
- {"login", LOGIN_P},
- {"match", MATCH},
- {"maxvalue", MAXVALUE},
- {"minute", MINUTE_P},
- {"minvalue", MINVALUE},
- {"mode", MODE},
- {"month", MONTH_P},
- {"move", MOVE},
- {"names", NAMES},
- {"national", NATIONAL},
- {"natural", NATURAL},
- {"nchar", NCHAR},
- {"new", NEW},
- {"next", NEXT},
- {"no", NO},
- {"nocreatedb", NOCREATEDB},
- {"nocreaterole", NOCREATEROLE},
- {"nocreateuser", NOCREATEUSER},
- {"noinherit", NOINHERIT},
- {"nologin", NOLOGIN_P},
- {"none", NONE},
- {"nosuperuser", NOSUPERUSER},
- {"not", NOT},
- {"nothing", NOTHING},
- {"notify", NOTIFY},
- {"notnull", NOTNULL},
- {"nowait", NOWAIT},
- {"null", NULL_P},
- {"nullif", NULLIF},
- {"numeric", NUMERIC},
- {"object", OBJECT_P},
- {"of", OF},
- {"off", OFF},
- {"offset", OFFSET},
- {"oids", OIDS},
- {"old", OLD},
- {"on", ON},
- {"only", ONLY},
- {"operator", OPERATOR},
- {"option", OPTION},
- {"or", OR},
- {"order", ORDER},
- {"out", OUT_P},
- {"outer", OUTER_P},
- {"overlaps", OVERLAPS},
- {"owner", OWNER},
- {"partial", PARTIAL},
- {"password", PASSWORD},
- {"position", POSITION},
- {"precision", PRECISION},
- {"prepare", PREPARE},
- {"prepared", PREPARED},
- {"preserve", PRESERVE},
- {"primary", PRIMARY},
- {"prior", PRIOR},
- {"privileges", PRIVILEGES},
- {"procedural", PROCEDURAL},
- {"procedure", PROCEDURE},
- {"quote", QUOTE},
- {"read", READ},
- {"real", REAL},
- {"recheck", RECHECK},
- {"references", REFERENCES},
- {"reindex", REINDEX},
- {"relative", RELATIVE_P},
- {"release", RELEASE},
- {"rename", RENAME},
- {"repeatable", REPEATABLE},
- {"replace", REPLACE},
- {"reset", RESET},
- {"restart", RESTART},
- {"restrict", RESTRICT},
- {"returns", RETURNS},
- {"revoke", REVOKE},
- {"right", RIGHT},
- {"role", ROLE},
- {"rollback", ROLLBACK},
- {"row", ROW},
- {"rows", ROWS},
- {"rule", RULE},
- {"savepoint", SAVEPOINT},
- {"schema", SCHEMA},
- {"scroll", SCROLL},
- {"second", SECOND_P},
- {"security", SECURITY},
- {"select", SELECT},
- {"sequence", SEQUENCE},
- {"serializable", SERIALIZABLE},
- {"session", SESSION},
- {"session_user", SESSION_USER},
- {"set", SET},
- {"setof", SETOF},
- {"share", SHARE},
- {"show", SHOW},
- {"similar", SIMILAR},
- {"simple", SIMPLE},
- {"smallint", SMALLINT},
- {"some", SOME},
- {"stable", STABLE},
- {"start", START},
- {"statement", STATEMENT},
- {"statistics", STATISTICS},
- {"stdin", STDIN},
- {"stdout", STDOUT},
- {"storage", STORAGE},
- {"strict", STRICT_P},
- {"substring", SUBSTRING},
- {"superuser", SUPERUSER_P},
- {"symmetric", SYMMETRIC},
- {"sysid", SYSID},
- {"system", SYSTEM_P},
- {"table", TABLE},
- {"tablespace", TABLESPACE},
- {"temp", TEMP},
- {"template", TEMPLATE},
- {"temporary", TEMPORARY},
- {"then", THEN},
- {"time", TIME},
- {"timestamp", TIMESTAMP},
- {"to", TO},
- {"toast", TOAST},
- {"trailing", TRAILING},
- {"transaction", TRANSACTION},
- {"treat", TREAT},
- {"trigger", TRIGGER},
- {"trim", TRIM},
- {"true", TRUE_P},
- {"truncate", TRUNCATE},
- {"trusted", TRUSTED},
- {"type", TYPE_P},
- {"uncommitted", UNCOMMITTED},
- {"unencrypted", UNENCRYPTED},
- {"union", UNION},
- {"unique", UNIQUE},
- {"unknown", UNKNOWN},
- {"unlisten", UNLISTEN},
- {"until", UNTIL},
- {"update", UPDATE},
- {"user", USER},
- {"using", USING},
- {"vacuum", VACUUM},
- {"valid", VALID},
- {"validator", VALIDATOR},
- {"values", VALUES},
- {"varchar", VARCHAR},
- {"varying", VARYING},
- {"verbose", VERBOSE},
- {"view", VIEW},
- {"volatile", VOLATILE},
- {"when", WHEN},
- {"where", WHERE},
- {"with", WITH},
- {"without", WITHOUT},
- {"work", WORK},
- {"write", WRITE},
- {"year", YEAR_P},
- {"zone", ZONE},
- };
+ {"array", ARRAY},
+ {"as", AS},
+ {"asc", ASC},
+ {"assertion", ASSERTION},
+ {"assignment", ASSIGNMENT},
+ {"asymmetric", ASYMMETRIC},
+ {"at", AT},
+ {"authorization", AUTHORIZATION},
+ {"backward", BACKWARD},
+ {"before", BEFORE},
+ {"begin", BEGIN_P},
+ {"between", BETWEEN},
+ {"bigint", BIGINT},
+ {"binary", BINARY},
+ {"bit", BIT},
+ {"boolean", BOOLEAN_P},
+ {"both", BOTH},
+ {"by", BY},
+ {"cache", CACHE},
+ {"called", CALLED},
+ {"cascade", CASCADE},
+ {"case", CASE},
+ {"cast", CAST},
+ {"chain", CHAIN},
+ {"char", CHAR_P},
+ {"character", CHARACTER},
+ {"characteristics", CHARACTERISTICS},
+ {"check", CHECK},
+ {"checkpoint", CHECKPOINT},
+ {"class", CLASS},
+ {"close", CLOSE},
+ {"cluster", CLUSTER},
+ {"coalesce", COALESCE},
+ {"collate", COLLATE},
+ {"column", COLUMN},
+ {"comment", COMMENT},
+ {"commit", COMMIT},
+ {"committed", COMMITTED},
+ {"connection", CONNECTION},
+ {"constraint", CONSTRAINT},
+ {"constraints", CONSTRAINTS},
+ {"conversion", CONVERSION_P},
+ {"convert", CONVERT},
+ {"copy", COPY},
+ {"create", CREATE},
+ {"createdb", CREATEDB},
+ {"createrole", CREATEROLE},
+ {"createuser", CREATEUSER},
+ {"cross", CROSS},
+ {"csv", CSV},
+ {"current_date", CURRENT_DATE},
+ {"current_role", CURRENT_ROLE},
+ {"current_time", CURRENT_TIME},
+ {"current_timestamp", CURRENT_TIMESTAMP},
+ {"cursor", CURSOR},
+ {"cycle", CYCLE},
+ {"database", DATABASE},
+ {"day", DAY_P},
+ {"deallocate", DEALLOCATE},
+ {"dec", DEC},
+ {"decimal", DECIMAL_P},
+ {"declare", DECLARE},
+ {"default", DEFAULT},
+ {"defaults", DEFAULTS},
+ {"deferrable", DEFERRABLE},
+ {"deferred", DEFERRED},
+ {"definer", DEFINER},
+ {"delete", DELETE_P},
+ {"delimiter", DELIMITER},
+ {"delimiters", DELIMITERS},
+ {"desc", DESC},
+ {"disable", DISABLE_P},
+ {"distinct", DISTINCT},
+ {"do", DO},
+ {"domain", DOMAIN_P},
+ {"double", DOUBLE_P},
+ {"drop", DROP},
+ {"each", EACH},
+ {"else", ELSE},
+ {"enable", ENABLE_P},
+ {"encoding", ENCODING},
+ {"encrypted", ENCRYPTED},
+ {"end", END_P},
+ {"escape", ESCAPE},
+ {"except", EXCEPT},
+ {"excluding", EXCLUDING},
+ {"exclusive", EXCLUSIVE},
+ {"execute", EXECUTE},
+ {"exists", EXISTS},
+ {"explain", EXPLAIN},
+ {"external", EXTERNAL},
+ {"extract", EXTRACT},
+ {"false", FALSE_P},
+ {"fetch", FETCH},
+ {"first", FIRST_P},
+ {"float", FLOAT_P},
+ {"for", FOR},
+ {"force", FORCE},
+ {"foreign", FOREIGN},
+ {"forward", FORWARD},
+ {"freeze", FREEZE},
+ {"from", FROM},
+ {"full", FULL},
+ {"function", FUNCTION},
+ {"get", GET},
+ {"global", GLOBAL},
+ {"grant", GRANT},
+ {"granted", GRANTED},
+ {"greatest", GREATEST},
+ {"group", GROUP_P},
+ {"handler", HANDLER},
+ {"having", HAVING},
+ {"header", HEADER},
+ {"hold", HOLD},
+ {"hour", HOUR_P},
+ {"ilike", ILIKE},
+ {"immediate", IMMEDIATE},
+ {"immutable", IMMUTABLE},
+ {"implicit", IMPLICIT_P},
+ {"in", IN_P},
+ {"including", INCLUDING},
+ {"increment", INCREMENT},
+ {"index", INDEX},
+ {"inherit", INHERIT},
+ {"inherits", INHERITS},
+ {"initially", INITIALLY},
+ {"inner", INNER_P},
+ {"inout", INOUT},
+ {"input", INPUT_P},
+ {"insensitive", INSENSITIVE},
+ {"insert", INSERT},
+ {"instead", INSTEAD},
+ {"int", INT_P},
+ {"integer", INTEGER},
+ {"intersect", INTERSECT},
+ {"interval", INTERVAL},
+ {"into", INTO},
+ {"invoker", INVOKER},
+ {"is", IS},
+ {"isnull", ISNULL},
+ {"isolation", ISOLATION},
+ {"join", JOIN},
+ {"key", KEY},
+ {"lancompiler", LANCOMPILER},
+ {"language", LANGUAGE},
+ {"large", LARGE_P},
+ {"last", LAST_P},
+ {"leading", LEADING},
+ {"least", LEAST},
+ {"left", LEFT},
+ {"level", LEVEL},
+ {"like", LIKE},
+ {"limit", LIMIT},
+ {"listen", LISTEN},
+ {"load", LOAD},
+ {"local", LOCAL},
+ {"location", LOCATION},
+ {"lock", LOCK_P},
+ {"login", LOGIN_P},
+ {"match", MATCH},
+ {"maxvalue", MAXVALUE},
+ {"minute", MINUTE_P},
+ {"minvalue", MINVALUE},
+ {"mode", MODE},
+ {"month", MONTH_P},
+ {"move", MOVE},
+ {"names", NAMES},
+ {"national", NATIONAL},
+ {"natural", NATURAL},
+ {"nchar", NCHAR},
+ {"new", NEW},
+ {"next", NEXT},
+ {"no", NO},
+ {"nocreatedb", NOCREATEDB},
+ {"nocreaterole", NOCREATEROLE},
+ {"nocreateuser", NOCREATEUSER},
+ {"noinherit", NOINHERIT},
+ {"nologin", NOLOGIN_P},
+ {"none", NONE},
+ {"nosuperuser", NOSUPERUSER},
+ {"not", NOT},
+ {"nothing", NOTHING},
+ {"notify", NOTIFY},
+ {"notnull", NOTNULL},
+ {"nowait", NOWAIT},
+ {"null", NULL_P},
+ {"nullif", NULLIF},
+ {"numeric", NUMERIC},
+ {"object", OBJECT_P},
+ {"of", OF},
+ {"off", OFF},
+ {"offset", OFFSET},
+ {"oids", OIDS},
+ {"old", OLD},
+ {"on", ON},
+ {"only", ONLY},
+ {"operator", OPERATOR},
+ {"option", OPTION},
+ {"or", OR},
+ {"order", ORDER},
+ {"out", OUT_P},
+ {"outer", OUTER_P},
+ {"overlaps", OVERLAPS},
+ {"owner", OWNER},
+ {"partial", PARTIAL},
+ {"password", PASSWORD},
+ {"position", POSITION},
+ {"precision", PRECISION},
+ {"prepare", PREPARE},
+ {"prepared", PREPARED},
+ {"preserve", PRESERVE},
+ {"primary", PRIMARY},
+ {"prior", PRIOR},
+ {"privileges", PRIVILEGES},
+ {"procedural", PROCEDURAL},
+ {"procedure", PROCEDURE},
+ {"quote", QUOTE},
+ {"read", READ},
+ {"real", REAL},
+ {"recheck", RECHECK},
+ {"references", REFERENCES},
+ {"reindex", REINDEX},
+ {"relative", RELATIVE_P},
+ {"release", RELEASE},
+ {"rename", RENAME},
+ {"repeatable", REPEATABLE},
+ {"replace", REPLACE},
+ {"reset", RESET},
+ {"restart", RESTART},
+ {"restrict", RESTRICT},
+ {"returns", RETURNS},
+ {"revoke", REVOKE},
+ {"right", RIGHT},
+ {"role", ROLE},
+ {"rollback", ROLLBACK},
+ {"row", ROW},
+ {"rows", ROWS},
+ {"rule", RULE},
+ {"savepoint", SAVEPOINT},
+ {"schema", SCHEMA},
+ {"scroll", SCROLL},
+ {"second", SECOND_P},
+ {"security", SECURITY},
+ {"select", SELECT},
+ {"sequence", SEQUENCE},
+ {"serializable", SERIALIZABLE},
+ {"session", SESSION},
+ {"session_user", SESSION_USER},
+ {"set", SET},
+ {"setof", SETOF},
+ {"share", SHARE},
+ {"show", SHOW},
+ {"similar", SIMILAR},
+ {"simple", SIMPLE},
+ {"smallint", SMALLINT},
+ {"some", SOME},
+ {"stable", STABLE},
+ {"start", START},
+ {"statement", STATEMENT},
+ {"statistics", STATISTICS},
+ {"stdin", STDIN},
+ {"stdout", STDOUT},
+ {"storage", STORAGE},
+ {"strict", STRICT_P},
+ {"substring", SUBSTRING},
+ {"superuser", SUPERUSER_P},
+ {"symmetric", SYMMETRIC},
+ {"sysid", SYSID},
+ {"system", SYSTEM_P},
+ {"table", TABLE},
+ {"tablespace", TABLESPACE},
+ {"temp", TEMP},
+ {"template", TEMPLATE},
+ {"temporary", TEMPORARY},
+ {"then", THEN},
+ {"time", TIME},
+ {"timestamp", TIMESTAMP},
+ {"to", TO},
+ {"toast", TOAST},
+ {"trailing", TRAILING},
+ {"transaction", TRANSACTION},
+ {"treat", TREAT},
+ {"trigger", TRIGGER},
+ {"trim", TRIM},
+ {"true", TRUE_P},
+ {"truncate", TRUNCATE},
+ {"trusted", TRUSTED},
+ {"type", TYPE_P},
+ {"uncommitted", UNCOMMITTED},
+ {"unencrypted", UNENCRYPTED},
+ {"union", UNION},
+ {"unique", UNIQUE},
+ {"unknown", UNKNOWN},
+ {"unlisten", UNLISTEN},
+ {"until", UNTIL},
+ {"update", UPDATE},
+ {"user", USER},
+ {"using", USING},
+ {"vacuum", VACUUM},
+ {"valid", VALID},
+ {"validator", VALIDATOR},
+ {"values", VALUES},
+ {"varchar", VARCHAR},
+ {"varying", VARYING},
+ {"verbose", VERBOSE},
+ {"view", VIEW},
+ {"volatile", VOLATILE},
+ {"when", WHEN},
+ {"where", WHERE},
+ {"with", WITH},
+ {"without", WITHOUT},
+ {"work", WORK},
+ {"write", WRITE},
+ {"year", YEAR_P},
+ {"zone", ZONE},
+};
/*
* ScanKeywordLookup - see if a given word is a keyword
* keywords are to be matched in this way even though non-keyword identifiers
* receive a different case-normalization mapping.
*/
- ScanKeyword *
- ScanKeywordLookup(char *text)
- {
- int len,
- i;
- char word[NAMEDATALEN];
- ScanKeyword *low;
- ScanKeyword *high;
-
- len = strlen(text);
- /* We assume all keywords are shorter than NAMEDATALEN. */
- if (len >= NAMEDATALEN)
- return NULL;
+ScanKeyword *
+ScanKeywordLookup(char *text)
+{
+ int len,
+ i;
+ char word[NAMEDATALEN];
+ ScanKeyword *low;
+ ScanKeyword *high;
- /*
- * Apply an ASCII-only downcasing. We must not use tolower()
- * since it may produce the wrong translation in some locales (eg,
- * Turkish).
- */
- for (i = 0; i < len; i++)
- {
- char ch = text[i];
+ len = strlen(text);
+ /* We assume all keywords are shorter than NAMEDATALEN. */
+ if (len >= NAMEDATALEN)
+ return NULL;
- if (ch >= 'A' && ch <= 'Z')
- ch += 'a' - 'A';
- word[i] = ch;
- }
- word[len] = '\0';
+ /*
+ * Apply an ASCII-only downcasing. We must not use tolower() since it may
+ * produce the wrong translation in some locales (eg, Turkish).
+ */
+ for (i = 0; i < len; i++)
+ {
+ char ch = text[i];
- /*
- * Now do a binary search using plain strcmp() comparison.
- */
- low = &ScanKeywords[0];
- high = endof(ScanKeywords) - 1;
- while (low <= high)
- {
- ScanKeyword *middle;
- int difference;
+ if (ch >= 'A' && ch <= 'Z')
+ ch += 'a' - 'A';
+ word[i] = ch;
+ }
+ word[len] = '\0';
- middle = low + (high - low) / 2;
- difference = strcmp(middle->name, word);
- if (difference == 0)
- return middle;
- else if (difference < 0)
- low = middle + 1;
- else
- high = middle - 1;
- }
+ /*
+ * Now do a binary search using plain strcmp() comparison.
+ */
+ low = &ScanKeywords[0];
+ high = endof(ScanKeywords) - 1;
+ while (low <= high)
+ {
+ ScanKeyword *middle;
+ int difference;
- return NULL;
+ middle = low + (high - low) / 2;
+ difference = strcmp(middle->name, word);
+ if (difference == 0)
+ return middle;
+ else if (difference < 0)
+ low = middle + 1;
+ else
+ high = middle - 1;
}
+
+ return NULL;
+}
case ECPGt_NO_INDICATOR: /* no indicator */
return ("ECPGt_NO_INDICATOR");
break;
- case ECPGt_char_variable: /* string that should not be
- * quoted */
+ case ECPGt_char_variable: /* string that should not be quoted */
return ("ECPGt_char_variable");
break;
case ECPGt_const: /* constant string quoted */
ECPGdump_a_simple(o, name,
type->u.element->type,
- type->u.element->size, type->size, NULL, prefix);
+ type->u.element->size, type->size, NULL, prefix);
if (ind_type != NULL)
{
*/
if ((atoi(varcharsize) > 1 ||
(atoi(arrsize) > 0) ||
- (atoi(varcharsize) == 0 && strcmp(varcharsize, "0") != 0) ||
+ (atoi(varcharsize) == 0 && strcmp(varcharsize, "0") != 0) ||
(atoi(arrsize) == 0 && strcmp(arrsize, "0") != 0))
&& siz == NULL)
sprintf(variable, "(%s%s)", prefix ? prefix : "", name);
case ECPGt_date:
/*
- * we have to use a pointer and translate the variable
- * type
+ * we have to use a pointer and translate the variable type
*/
sprintf(variable, "&(%s%s)", prefix ? prefix : "", name);
sprintf(offset, "sizeof(date)");
case ECPGt_timestamp:
/*
- * we have to use a pointer and translate the variable
- * type
+ * we have to use a pointer and translate the variable type
*/
sprintf(variable, "&(%s%s)", prefix ? prefix : "", name);
sprintf(offset, "sizeof(timestamp)");
ECPGdump_a_struct(FILE *o, const char *name, const char *ind_name, char *arrsiz, struct ECPGtype * type, struct ECPGtype * ind_type, const char *offsetarg, const char *prefix, const char *ind_prefix)
{
/*
- * If offset is NULL, then this is the first recursive level. If not
- * then we are in a struct in a struct and the offset is used as
- * offset.
+ * If offset is NULL, then this is the first recursive level. If not then
+ * we are in a struct in a struct and the offset is used as offset.
*/
struct ECPGstruct_member *p,
*ind_p = NULL;
struct ECPGtype
{
enum ECPGttype type;
- char *size; /* For array it is the number of elements.
- * For varchar it is the maxsize of the
- * area. */
- char *struct_sizeof; /* For a struct this is the sizeof() type
- * as string */
+ char *size; /* For array it is the number of elements. For
+ * varchar it is the maxsize of the area. */
+ char *struct_sizeof; /* For a struct this is the sizeof() type as
+ * string */
union
{
- struct ECPGtype *element; /* For an array this is the type
- * of the element */
+ struct ECPGtype *element; /* For an array this is the type of
+ * the element */
struct ECPGstruct_member *members; /* A pointer to a list of
* members. */
} u;
int count;
/*
- * We don't care about what's inside the array braces
- * so just eat up the character
+ * We don't care about what's inside the array braces so
+ * just eat up the character
*/
for (count = 1, end = next + 1; count; end++)
{
switch (*end)
{
- case '\0': /* found the end, but this time it has to
- * be an array element */
+ case '\0': /* found the end, but this time it has to be
+ * an array element */
if (members->type->type != ECPGt_array)
mmerror(PARSE_ERROR, ET_FATAL, "incorrectly formed variable %s", name);
if (*next == '[')
{
/*
- * We don't care about what's inside the array braces so just
- * eat up the characters
+ * We don't care about what's inside the array braces so just eat
+ * up the characters
*/
for (count = 1, end = next + 1; count; end++)
{
return;
/*
- * The list is build up from the beginning so lets first dump the end
- * of the list:
+ * The list is build up from the beginning so lets first dump the end of
+ * the list:
*/
dump_variables(list->next, mode);
if (atoi(*length) < 0)
{
/*
- * make sure we return length = -1 for arrays without
- * given bounds
+ * make sure we return length = -1 for arrays without given
+ * bounds
*/
if (atoi(*dimension) < 0 && !type_definition)
* exceed INITIAL_EXPBUFFER_SIZE (currently 256 bytes).
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.c,v 1.104 2005/10/08 19:32:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.c,v 1.105 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
#define STARTUP_MSG 7 /* Initialise a connection */
-#define STARTUP_KRB4_MSG 10 /* krb4 session follows. Not supported any more. */
+#define STARTUP_KRB4_MSG 10 /* krb4 session follows. Not supported any
+ * more. */
#define STARTUP_KRB5_MSG 11 /* krb5 session follows */
#define STARTUP_PASSWORD_MSG 14 /* Password follows */
if ((p = strchr(aname, '/')) || (p = strchr(aname, '@')))
*p = '\0';
#ifdef WIN32
- for (p = aname; *p ; p++)
+ for (p = aname; *p; p++)
*p = pg_tolower(*p);
#endif
}
/*
- * libpq uses a non-blocking socket. But kerberos needs a blocking
- * socket, and we have to block somehow to do mutual authentication
- * anyway. So we temporarily make it blocking.
+ * libpq uses a non-blocking socket. But kerberos needs a blocking socket,
+ * and we have to block somehow to do mutual authentication anyway. So we
+ * temporarily make it blocking.
*/
if (!pg_set_block(sock))
{
{
#if defined(HAVE_KRB5_ERROR_TEXT_DATA)
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- libpq_gettext("Kerberos 5 authentication rejected: %*s\n"),
+ libpq_gettext("Kerberos 5 authentication rejected: %*s\n"),
(int) err_ret->text.length, err_ret->text.data);
#elif defined(HAVE_KRB5_ERROR_E_DATA)
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- libpq_gettext("Kerberos 5 authentication rejected: %*s\n"),
+ libpq_gettext("Kerberos 5 authentication rejected: %*s\n"),
(int) err_ret->e_data->length,
(const char *) err_ret->e_data->data);
#else
char sebuf[256];
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- libpq_gettext("could not restore non-blocking mode on socket: %s\n"),
+ libpq_gettext("could not restore non-blocking mode on socket: %s\n"),
pqStrerror(errno, sebuf, sizeof(sebuf)));
ret = STATUS_ERROR;
}
#endif
/*
- * The backend doesn't care what we send here, but it wants exactly
- * one character to force recvmsg() to block and wait for us.
+ * The backend doesn't care what we send here, but it wants exactly one
+ * character to force recvmsg() to block and wait for us.
*/
buf = '\0';
iov.iov_base = &buf;
return STATUS_OK;
#else
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- libpq_gettext("SCM_CRED authentication method not supported\n"));
+ libpq_gettext("SCM_CRED authentication method not supported\n"));
return STATUS_ERROR;
#endif
}
case AUTH_REQ_KRB4:
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- libpq_gettext("Kerberos 4 authentication not supported\n"));
+ libpq_gettext("Kerberos 4 authentication not supported\n"));
return STATUS_ERROR;
case AUTH_REQ_KRB5:
break;
#else
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- libpq_gettext("Kerberos 5 authentication not supported\n"));
+ libpq_gettext("Kerberos 5 authentication not supported\n"));
return STATUS_ERROR;
#endif
if (pg_password_sendauth(conn, password, areq) != STATUS_OK)
{
(void) snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- "fe_sendauth: error sending password authentication\n");
+ "fe_sendauth: error sending password authentication\n");
return STATUS_ERROR;
}
break;
default:
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- libpq_gettext("authentication method %u not supported\n"), areq);
+ libpq_gettext("authentication method %u not supported\n"), areq);
return STATUS_ERROR;
}
const char *name = NULL;
char *authn;
MsgType authsvc;
+
#ifdef WIN32
char username[128];
DWORD namesize = sizeof(username) - 1;
if (authsvc != STARTUP_MSG && authsvc != STARTUP_KRB5_MSG)
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- libpq_gettext("fe_getauthname: invalid authentication system: %d\n"),
+ libpq_gettext("fe_getauthname: invalid authentication system: %d\n"),
authsvc);
authn = name ? strdup(name) : NULL;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.h,v 1.21 2005/06/27 02:04:26 neilc Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.h,v 1.22 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void fe_setauthsvc(const char *name, char *PQerrormsg);
extern char *fe_getauthname(char *PQerrormsg);
-#define PG_KRB5_VERSION "PGVER5.1" /* at most KRB_SENDAUTH_VLEN chars */
+#define PG_KRB5_VERSION "PGVER5.1" /* at most KRB_SENDAUTH_VLEN chars */
#endif /* FE_AUTH_H */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.321 2005/09/26 17:49:09 petere Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.322 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
static const PQconninfoOption PQconninfoOptions[] = {
/*
- * "authtype" is no longer used, so mark it "don't show". We keep it
- * in the array so as not to reject conninfo strings from old apps
- * that might still try to set it.
+ * "authtype" is no longer used, so mark it "don't show". We keep it in
+ * the array so as not to reject conninfo strings from old apps that might
+ * still try to set it.
*/
{"authtype", "PGAUTHTYPE", DefaultAuthtype, NULL,
"Database-Authtype", "D", 20},
#endif
/*
- * "sslmode" option is allowed even without client SSL support because
- * the client can still handle SSL modes "disable" and "allow".
+ * "sslmode" option is allowed even without client SSL support because the
+ * client can still handle SSL modes "disable" and "allow".
*/
{"sslmode", "PGSSLMODE", DefaultSSLMode, NULL,
"SSL-Mode", "", 8}, /* sizeof("disable") == 8 */
#ifdef KRB5
/* Kerberos authentication supports specifying the service name */
{"krbsrvname", "PGKRBSRVNAME", PG_KRB_SRVNAM, NULL,
- "Kerberos-service-name", "", 20},
+ "Kerberos-service-name", "", 20},
#endif
/* Terminating entry --- MUST BE LAST */
/*
* Move option values into conn structure
*
- * Don't put anything cute here --- intelligence should be in
- * connectOptions2 ...
+ * Don't put anything cute here --- intelligence should be in connectOptions2
+ * ...
*
* XXX: probably worth checking strdup() return value here...
*/
{
conn->status = CONNECTION_BAD;
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("invalid sslmode value: \"%s\"\n"),
+ libpq_gettext("invalid sslmode value: \"%s\"\n"),
conn->sslmode);
return false;
}
case 'p': /* "prefer" */
/*
- * warn user that an SSL connection will never be
- * negotiated since SSL was not compiled in?
+ * warn user that an SSL connection will never be negotiated
+ * since SSL was not compiled in?
*/
break;
char sebuf[256];
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not set socket to TCP no delay mode: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not set socket to TCP no delay mode: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
return 0;
}
#endif
NI_NUMERICSERV);
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "could not connect to server: %s\n"
- "\tIs the server running locally and accepting\n"
- "\tconnections on Unix domain socket \"%s\"?\n"
+ "could not connect to server: %s\n"
+ "\tIs the server running locally and accepting\n"
+ "\tconnections on Unix domain socket \"%s\"?\n"
),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)),
service);
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "could not connect to server: %s\n"
- "\tIs the server running on host \"%s\" and accepting\n"
- "\tTCP/IP connections on port %s?\n"
+ "could not connect to server: %s\n"
+ "\tIs the server running on host \"%s\" and accepting\n"
+ "\tTCP/IP connections on port %s?\n"
),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)),
conn->pghostaddr
conn->status = CONNECTION_NEEDED;
/*
- * The code for processing CONNECTION_NEEDED state is in
- * PQconnectPoll(), so that it can easily be re-executed if needed
- * again during the asynchronous startup process. However, we must
- * run it once here, because callers expect a success return from this
- * routine to mean that we are in PGRES_POLLING_WRITING connection
- * state.
+ * The code for processing CONNECTION_NEEDED state is in PQconnectPoll(),
+ * so that it can easily be re-executed if needed again during the
+ * asynchronous startup process. However, we must run it once here,
+ * because callers expect a success return from this routine to mean that
+ * we are in PGRES_POLLING_WRITING connection state.
*/
if (PQconnectPoll(conn) == PGRES_POLLING_WRITING)
return 1;
if (timeout > 0)
{
/*
- * Rounding could cause connection to fail; need at least 2
- * secs
+ * Rounding could cause connection to fail; need at least 2 secs
*/
if (timeout < 2)
timeout = 2;
{
/*
* Wait, if necessary. Note that the initial state (just after
- * PQconnectStart) is to wait for the socket to select for
- * writing.
+ * PQconnectStart) is to wait for the socket to select for writing.
*/
switch (flag)
{
switch (conn->status)
{
/*
- * We really shouldn't have been polled in these two cases,
- * but we can handle it.
+ * We really shouldn't have been polled in these two cases, but we
+ * can handle it.
*/
case CONNECTION_BAD:
return PGRES_POLLING_FAILED;
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
"invalid connection state, "
- "probably indicative of memory corruption\n"
+ "probably indicative of memory corruption\n"
));
goto error_return;
}
-keep_going: /* We will come back to here until there
- * is nothing left to do. */
+keep_going: /* We will come back to here until there is
+ * nothing left to do. */
switch (conn->status)
{
case CONNECTION_NEEDED:
{
/*
* Try to initiate a connection to one of the addresses
- * returned by getaddrinfo_all(). conn->addr_cur is the
- * next one to try. We fail when we run out of addresses
- * (reporting the error returned for the *last*
- * alternative, which may not be what users expect :-().
+ * returned by getaddrinfo_all(). conn->addr_cur is the next
+ * one to try. We fail when we run out of addresses
+ * (reporting the error returned for the *last* alternative,
+ * which may not be what users expect :-().
*/
while (conn->addr_cur != NULL)
{
if (conn->sock < 0)
{
/*
- * ignore socket() failure if we have more
- * addresses to try
+ * ignore socket() failure if we have more addresses
+ * to try
*/
if (addr_cur->ai_next != NULL)
{
continue;
}
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not create socket: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not create socket: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
break;
}
/*
- * Select socket options: no delay of outgoing data
- * for TCP sockets, nonblock mode, close-on-exec.
- * Fail if any of this fails.
+ * Select socket options: no delay of outgoing data for
+ * TCP sockets, nonblock mode, close-on-exec. Fail if any
+ * of this fails.
*/
if (!IS_AF_UNIX(addr_cur->ai_family))
{
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not set socket to non-blocking mode: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
closesocket(conn->sock);
conn->sock = -1;
conn->addr_cur = addr_cur->ai_next;
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not set socket to close-on-exec mode: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
closesocket(conn->sock);
conn->sock = -1;
conn->addr_cur = addr_cur->ai_next;
continue;
}
-#endif /* F_SETFD */
+#endif /* F_SETFD */
/*
- * Start/make connection. This should not block,
- * since we are in nonblock mode. If it does, well,
- * too bad.
+ * Start/make connection. This should not block, since we
+ * are in nonblock mode. If it does, well, too bad.
*/
if (connect(conn->sock, addr_cur->ai_addr,
addr_cur->ai_addrlen) < 0)
SOCK_ERRNO == 0)
{
/*
- * This is fine - we're in non-blocking mode,
- * and the connection is in progress. Tell
- * caller to wait for write-ready on socket.
+ * This is fine - we're in non-blocking mode, and
+ * the connection is in progress. Tell caller to
+ * wait for write-ready on socket.
*/
conn->status = CONNECTION_STARTED;
return PGRES_POLLING_WRITING;
else
{
/*
- * Hm, we're connected already --- seems the
- * "nonblock connection" wasn't. Advance the
- * state machine and go do the next stuff.
+ * Hm, we're connected already --- seems the "nonblock
+ * connection" wasn't. Advance the state machine and
+ * go do the next stuff.
*/
conn->status = CONNECTION_STARTED;
goto keep_going;
}
/*
- * This connection failed --- set up error report,
- * then close socket (do it this way in case close()
- * affects the value of errno...). We will ignore the
- * connect() failure and keep going if there are more
- * addresses.
+ * This connection failed --- set up error report, then
+ * close socket (do it this way in case close() affects
+ * the value of errno...). We will ignore the connect()
+ * failure and keep going if there are more addresses.
*/
connectFailureMessage(conn, SOCK_ERRNO);
if (conn->sock >= 0)
} /* loop over addresses */
/*
- * Ooops, no more addresses. An appropriate error message
- * is already set up, so just set the right status.
+ * Ooops, no more addresses. An appropriate error message is
+ * already set up, so just set the right status.
*/
goto error_return;
}
ACCEPT_TYPE_ARG3 optlen = sizeof(optval);
/*
- * Write ready, since we've made it here, so the
- * connection has been made ... or has failed.
+ * Write ready, since we've made it here, so the connection
+ * has been made ... or has failed.
*/
/*
(char *) &optval, &optlen) == -1)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not get socket error status: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not get socket error status: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
else if (optval != 0)
{
/*
- * When using a nonblocking connect, we will typically
- * see connect failures at this point, so provide a
- * friendly error message.
+ * When using a nonblocking connect, we will typically see
+ * connect failures at this point, so provide a friendly
+ * error message.
*/
connectFailureMessage(conn, optval);
/*
- * If more addresses remain, keep trying, just as in
- * the case where connect() returned failure
- * immediately.
+ * If more addresses remain, keep trying, just as in the
+ * case where connect() returned failure immediately.
*/
if (conn->addr_cur->ai_next != NULL)
{
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get client address from socket: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
#ifdef USE_SSL
/*
- * If SSL is enabled and we haven't already got it
- * running, request it instead of sending the startup
- * message.
+ * If SSL is enabled and we haven't already got it running,
+ * request it instead of sending the startup message.
*/
if (IS_AF_UNIX(conn->raddr.addr.ss_family))
{
/*
* Send the SSL request packet.
*
- * Theoretically, this could block, but it really
- * shouldn't since we only got here if the socket is
- * write-ready.
+ * Theoretically, this could block, but it really shouldn't
+ * since we only got here if the socket is write-ready.
*/
pv = htonl(NEGOTIATE_SSL_CODE);
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send SSL negotiation packet: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
goto error_return;
}
/* Ok, wait for response */
*/
if (PG_PROTOCOL_MAJOR(conn->pversion) >= 3)
startpacket = pqBuildStartupPacket3(conn, &packetlen,
- EnvironmentOptions);
+ EnvironmentOptions);
else
startpacket = pqBuildStartupPacket2(conn, &packetlen,
- EnvironmentOptions);
+ EnvironmentOptions);
if (!startpacket)
{
printfPQExpBuffer(&conn->errorMessage,
/*
* Send the startup packet.
*
- * Theoretically, this could block, but it really shouldn't
- * since we only got here if the socket is write-ready.
+ * Theoretically, this could block, but it really shouldn't since
+ * we only got here if the socket is write-ready.
*/
if (pqPacketSend(conn, 0, startpacket, packetlen) != STATUS_OK)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not send startup packet: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not send startup packet: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
free(startpacket);
goto error_return;
}
PostgresPollingStatusType pollres;
/*
- * On first time through, get the postmaster's response to
- * our SSL negotiation packet.
+ * On first time through, get the postmaster's response to our
+ * SSL negotiation packet.
*/
if (conn->ssl == NULL)
{
/*
* We use pqReadData here since it has the logic to
- * distinguish no-data-yet from connection closure.
- * Since conn->ssl isn't set, a plain recv() will occur.
+ * distinguish no-data-yet from connection closure. Since
+ * conn->ssl isn't set, a plain recv() will occur.
*/
char SSLok;
int rdresult;
}
/*
- * Handle authentication exchange: wait for postmaster
- * messages and respond as necessary.
+ * Handle authentication exchange: wait for postmaster messages
+ * and respond as necessary.
*/
case CONNECTION_AWAITING_RESPONSE:
{
AuthRequest areq;
/*
- * Scan the message from current point (note that if we
- * find the message is incomplete, we will return without
- * advancing inStart, and resume here next time).
+ * Scan the message from current point (note that if we find
+ * the message is incomplete, we will return without advancing
+ * inStart, and resume here next time).
*/
conn->inCursor = conn->inStart;
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "expected authentication request from "
- "server, but received %c\n"),
+ "expected authentication request from "
+ "server, but received %c\n"),
beresp);
goto error_return;
}
/*
* Try to validate message length before using it.
- * Authentication requests can't be very large. Errors
- * can be a little larger, but not huge. If we see a
- * large apparent length in an error, it means we're
- * really talking to a pre-3.0-protocol server; cope.
+ * Authentication requests can't be very large. Errors can be
+ * a little larger, but not huge. If we see a large apparent
+ * length in an error, it means we're really talking to a
+ * pre-3.0-protocol server; cope.
*/
if (beresp == 'R' && (msgLength < 8 || msgLength > 100))
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "expected authentication request from "
- "server, but received %c\n"),
+ "expected authentication request from "
+ "server, but received %c\n"),
beresp);
goto error_return;
}
conn->inStart = conn->inCursor;
/*
- * The postmaster typically won't end its message with
- * a newline, so add one to conform to libpq
- * conventions.
+ * The postmaster typically won't end its message with a
+ * newline, so add one to conform to libpq conventions.
*/
appendPQExpBufferChar(&conn->errorMessage, '\n');
/*
* Can't process if message body isn't all here yet.
*
- * (In protocol 2.0 case, we are assuming messages carry at
- * least 4 bytes of data.)
+ * (In protocol 2.0 case, we are assuming messages carry at least
+ * 4 bytes of data.)
*/
msgLength -= 4;
avail = conn->inEnd - conn->inCursor;
if (avail < msgLength)
{
/*
- * Before returning, try to enlarge the input buffer
- * if needed to hold the whole message; see notes in
+ * Before returning, try to enlarge the input buffer if
+ * needed to hold the whole message; see notes in
* pqParseInput3.
*/
if (pqCheckInBufferSpace(conn->inCursor + msgLength, conn))
/*
* if sslmode is "allow" and we haven't tried an SSL
- * connection already, then retry with an SSL
- * connection
+ * connection already, then retry with an SSL connection
*/
if (conn->sslmode[0] == 'a' /* "allow" */
&& conn->ssl == NULL
}
/*
- * if sslmode is "prefer" and we're in an SSL
- * connection, then do a non-SSL retry
+ * if sslmode is "prefer" and we're in an SSL connection,
+ * then do a non-SSL retry
*/
if (conn->sslmode[0] == 'p' /* "prefer" */
&& conn->ssl
}
/*
- * OK, we successfully read the message; mark data
- * consumed
+ * OK, we successfully read the message; mark data consumed
*/
conn->inStart = conn->inCursor;
/* Respond to the request if necessary. */
/*
- * Note that conn->pghost must be non-NULL if we are going
- * to avoid the Kerberos code doing a hostname look-up.
+ * Note that conn->pghost must be non-NULL if we are going to
+ * avoid the Kerberos code doing a hostname look-up.
*/
/*
- * XXX fe-auth.c has not been fixed to support
- * PQExpBuffers, so:
+ * XXX fe-auth.c has not been fixed to support PQExpBuffers,
+ * so:
*/
if (fe_sendauth(areq, conn, conn->pghost, conn->pgpass,
conn->errorMessage.data) != STATUS_OK)
conn->errorMessage.len = strlen(conn->errorMessage.data);
/*
- * Just make sure that any data sent by fe_sendauth is
- * flushed out. Although this theoretically could block,
- * it really shouldn't since we don't send large auth
- * responses.
+ * Just make sure that any data sent by fe_sendauth is flushed
+ * out. Although this theoretically could block, it really
+ * shouldn't since we don't send large auth responses.
*/
if (pqFlush(conn))
goto error_return;
{
/*
* Now we expect to hear from the backend. A ReadyForQuery
- * message indicates that startup is successful, but we
- * might also get an Error message indicating failure.
- * (Notice messages indicating nonfatal warnings are also
- * allowed by the protocol, as are ParameterStatus and
- * BackendKeyData messages.) Easiest way to handle this is
- * to let PQgetResult() read the messages. We just have to
- * fake it out about the state of the connection, by
- * setting asyncStatus = PGASYNC_BUSY (done above).
+ * message indicates that startup is successful, but we might
+ * also get an Error message indicating failure. (Notice
+ * messages indicating nonfatal warnings are also allowed by
+ * the protocol, as are ParameterStatus and BackendKeyData
+ * messages.) Easiest way to handle this is to let
+ * PQgetResult() read the messages. We just have to fake it
+ * out about the state of the connection, by setting
+ * asyncStatus = PGASYNC_BUSY (done above).
*/
if (PQisBusy(conn))
libpq_gettext("unexpected message from server during startup\n"));
/*
- * if the resultStatus is FATAL, then
- * conn->errorMessage already has a copy of the error;
- * needn't copy it back. But add a newline if it's not
- * there already, since postmaster error messages may
- * not have one.
+ * if the resultStatus is FATAL, then conn->errorMessage
+ * already has a copy of the error; needn't copy it back.
+ * But add a newline if it's not there already, since
+ * postmaster error messages may not have one.
*/
if (conn->errorMessage.len <= 0 ||
conn->errorMessage.data[conn->errorMessage.len - 1] != '\n')
case CONNECTION_SETENV:
/*
- * Do post-connection housekeeping (only needed in protocol
- * 2.0).
+ * Do post-connection housekeeping (only needed in protocol 2.0).
*
* We pretend that the connection is OK for the duration of these
* queries.
default:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "invalid connection state %c, "
- "probably indicative of memory corruption\n"
+ "invalid connection state %c, "
+ "probably indicative of memory corruption\n"
),
conn->status);
goto error_return;
error_return:
/*
- * We used to close the socket at this point, but that makes it
- * awkward for those above us if they wish to remove this socket from
- * their own records (an fd_set for example). We'll just have this
- * socket closed when PQfinish is called (which is compulsory even
- * after an error, since the connection structure must be freed).
+ * We used to close the socket at this point, but that makes it awkward
+ * for those above us if they wish to remove this socket from their own
+ * records (an fd_set for example). We'll just have this socket closed
+ * when PQfinish is called (which is compulsory even after an error, since
+ * the connection structure must be freed).
*/
conn->status = CONNECTION_BAD;
return PGRES_POLLING_FAILED;
PGconn *conn;
#ifdef WIN32
+
/*
- * Make sure socket support is up and running.
- * Even though this is done in libpqdll.c, that is only for MSVC and
- * BCC builds and doesn't work for static builds at all, so we have
- * to do it in the main code too.
+ * Make sure socket support is up and running. Even though this is done in
+ * libpqdll.c, that is only for MSVC and BCC builds and doesn't work for
+ * static builds at all, so we have to do it in the main code too.
*/
WSADATA wsaData;
#endif
/*
- * We try to send at least 8K at a time, which is the usual size of
- * pipe buffers on Unix systems. That way, when we are sending a
- * large amount of data, we avoid incurring extra kernel context swaps
- * for partial bufferloads. The output buffer is initially made 16K
- * in size, and we try to dump it after accumulating 8K.
+ * We try to send at least 8K at a time, which is the usual size of pipe
+ * buffers on Unix systems. That way, when we are sending a large amount
+ * of data, we avoid incurring extra kernel context swaps for partial
+ * bufferloads. The output buffer is initially made 16K in size, and we
+ * try to dump it after accumulating 8K.
*
- * With the same goal of minimizing context swaps, the input buffer will
- * be enlarged anytime it has less than 8K free, so we initially
- * allocate twice that.
+ * With the same goal of minimizing context swaps, the input buffer will be
+ * enlarged anytime it has less than 8K free, so we initially allocate
+ * twice that.
*/
conn->inBufSize = 16 * 1024;
conn->inBuffer = (char *) malloc(conn->inBufSize);
notify = conn->notifyHead;
while (notify != NULL)
{
- PGnotify *prev = notify;
+ PGnotify *prev = notify;
notify = notify->next;
free(prev);
}
/*
- * must reset the blocking status so a possible reconnect will work
- * don't call PQsetnonblocking() because it will fail if it's unable
- * to flush the connection.
+ * must reset the blocking status so a possible reconnect will work don't
+ * call PQsetnonblocking() because it will fail if it's unable to flush
+ * the connection.
*/
conn->nonblocking = FALSE;
notify = conn->notifyHead;
while (notify != NULL)
{
- PGnotify *prev = notify;
+ PGnotify *prev = notify;
notify = notify->next;
free(prev);
PGcancel *
PQgetCancel(PGconn *conn)
{
- PGcancel *cancel;
+ PGcancel *cancel;
if (!conn)
return NULL;
int save_errno = SOCK_ERRNO;
int tmpsock = -1;
char sebuf[256];
- int maxlen;
+ int maxlen;
struct
{
uint32 packetlen;
} crp;
/*
- * We need to open a temporary connection to the postmaster. Do
- * this with only kernel calls.
+ * We need to open a temporary connection to the postmaster. Do this with
+ * only kernel calls.
*/
if ((tmpsock = socket(raddr->addr.ss_family, SOCK_STREAM, 0)) < 0)
{
}
/*
- * Wait for the postmaster to close the connection, which indicates
- * that it's processed the request. Without this delay, we might
- * issue another command only to find that our cancel zaps that
- * command instead of the one we thought we were canceling. Note we
- * don't actually expect this read to obtain any data, we are just
- * waiting for EOF to be signaled.
+ * Wait for the postmaster to close the connection, which indicates that
+ * it's processed the request. Without this delay, we might issue another
+ * command only to find that our cancel zaps that command instead of the
+ * one we thought we were canceling. Note we don't actually expect this
+ * read to obtain any data, we are just waiting for EOF to be signaled.
*/
retry5:
if (recv(tmpsock, (char *) &crp, 1, 0) < 0)
return TRUE;
cancel_errReturn:
+
/*
- * Make sure we don't overflow the error buffer. Leave space for
- * the \n at the end, and for the terminating zero.
+ * Make sure we don't overflow the error buffer. Leave space for the \n at
+ * the end, and for the terminating zero.
*/
maxlen = errbufsize - strlen(errbuf) - 2;
if (maxlen >= 0)
* Returns TRUE if able to send the cancel request, FALSE if not.
*
* On failure, an error message is stored in *errbuf, which must be of size
- * errbufsize (recommended size is 256 bytes). *errbuf is not changed on
+ * errbufsize (recommended size is 256 bytes). *errbuf is not changed on
* success return.
*/
int
int
PQrequestCancel(PGconn *conn)
{
- int r;
+ int r;
/* Check we have an open connection */
if (!conn)
i;
/*
- * We have to special-case the environment variable PGSERVICE here,
- * since this is and should be called before inserting environment
- * defaults for other connection options.
+ * We have to special-case the environment variable PGSERVICE here, since
+ * this is and should be called before inserting environment defaults for
+ * other connection options.
*/
if (service == NULL)
service = getenv("PGSERVICE");
{
fclose(f);
printfPQExpBuffer(errorMessage,
- libpq_gettext("ERROR: line %d too long in service file \"%s\"\n"),
+ libpq_gettext("ERROR: line %d too long in service file \"%s\"\n"),
linenr,
serviceFile);
return 2;
if (group_found)
{
/*
- * Finally, we are in the right group and can parse
- * the line
+ * Finally, we are in the right group and can parse the
+ * line
*/
char *key,
*val;
*val++ = '\0';
/*
- * Set the parameter --- but don't override any
- * previous explicit setting.
+ * Set the parameter --- but don't override any previous
+ * explicit setting.
*/
found_keyword = false;
for (i = 0; options[i].keyword; i++)
}
/*
- * Now we have the name and the value. Search for the param
- * record.
+ * Now we have the name and the value. Search for the param record.
*/
for (option = options; option->keyword != NULL; option++)
{
if (option->keyword == NULL)
{
printfPQExpBuffer(errorMessage,
- libpq_gettext("invalid connection option \"%s\"\n"),
+ libpq_gettext("invalid connection option \"%s\"\n"),
pname);
PQconninfoFree(options);
free(buf);
free(buf);
/*
- * If there's a service spec, use it to obtain any
- * not-explicitly-given parameters.
+ * If there's a service spec, use it to obtain any not-explicitly-given
+ * parameters.
*/
if (parseServiceInfo(options, errorMessage))
{
}
/*
- * Get the fallback resources for parameters not specified in the
- * conninfo string nor the service.
+ * Get the fallback resources for parameters not specified in the conninfo
+ * string nor the service.
*/
for (option = options; option->keyword != NULL; option++)
{
FILE *fp;
char pgpassfile[MAXPGPATH];
struct stat stat_buf;
- char *passfile_env;
+ char *passfile_env;
#define LINELEN NAMEDATALEN*5
char buf[LINELEN];
if (!S_ISREG(stat_buf.st_mode))
{
fprintf(stderr,
- libpq_gettext("WARNING: password file \"%s\" is not a plain file\n"),
+ libpq_gettext("WARNING: password file \"%s\" is not a plain file\n"),
pgpassfile);
free(pgpassfile);
return NULL;
return false;
StrNCpy(buf, pwd->pw_dir, bufsize);
return true;
-
#else
char tmppath[MAX_PATH];
#ifdef ENABLE_THREAD_SAFETY
#ifndef WIN32
static pthread_mutex_t singlethread_lock = PTHREAD_MUTEX_INITIALIZER;
-
#else
static pthread_mutex_t singlethread_lock = NULL;
static long mutex_initlock = 0;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.175 2005/09/24 17:53:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.176 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#endif
/* keep this in same order as ExecStatusType in libpq-fe.h */
-char *const pgresStatus[] = {
+char *const pgresStatus[] = {
"PGRES_EMPTY_QUERY",
"PGRES_COMMAND_OK",
"PGRES_TUPLES_OK",
return res->null_field;
/*
- * If alignment is needed, round up the current position to an
- * alignment boundary.
+ * If alignment is needed, round up the current position to an alignment
+ * boundary.
*/
if (isBinary)
{
/*
* If the requested object is very large, give it its own block; this
- * avoids wasting what might be most of the current block to start a
- * new block. (We'd have to special-case requests bigger than the
- * block size anyway.) The object is always given binary alignment in
- * this case.
+ * avoids wasting what might be most of the current block to start a new
+ * block. (We'd have to special-case requests bigger than the block size
+ * anyway.) The object is always given binary alignment in this case.
*/
if (nBytes >= PGRESULT_SEP_ALLOC_THRESHOLD)
{
pqSaveErrorResult(PGconn *conn)
{
/*
- * If no old async result, just let PQmakeEmptyPGresult make one.
- * Likewise if old result is not an error message.
+ * If no old async result, just let PQmakeEmptyPGresult make one. Likewise
+ * if old result is not an error message.
*/
if (conn->result == NULL ||
conn->result->resultStatus != PGRES_FATAL_ERROR ||
PGresult *res;
/*
- * conn->result is the PGresult to return. If it is NULL (which
- * probably shouldn't happen) we assume there is an appropriate error
- * message in conn->errorMessage.
+ * conn->result is the PGresult to return. If it is NULL (which probably
+ * shouldn't happen) we assume there is an appropriate error message in
+ * conn->errorMessage.
*/
res = conn->result;
conn->result = NULL; /* handing over ownership to caller */
else
{
/*
- * Make sure PQerrorMessage agrees with result; it could be
- * different if we have concatenated messages.
+ * Make sure PQerrorMessage agrees with result; it could be different
+ * if we have concatenated messages.
*/
resetPQExpBuffer(&conn->errorMessage);
appendPQExpBufferStr(&conn->errorMessage,
* a trailing newline, and should not be more than one line).
*/
void
-pqInternalNotice(const PGNoticeHooks *hooks, const char *fmt, ...)
+pqInternalNotice(const PGNoticeHooks * hooks, const char *fmt,...)
{
char msgBuf[1024];
va_list args;
/* XXX should provide a SQLSTATE too? */
/*
- * Result text is always just the primary message + newline. If we
- * can't allocate it, don't bother invoking the receiver.
+ * Result text is always just the primary message + newline. If we can't
+ * allocate it, don't bother invoking the receiver.
*/
res->errMsg = (char *) pqResultAlloc(res, strlen(msgBuf) + 2, FALSE);
if (res->errMsg)
* Returns TRUE if OK, FALSE if not enough memory to add the row
*/
int
-pqAddTuple(PGresult *res, PGresAttValue *tup)
+pqAddTuple(PGresult *res, PGresAttValue * tup)
{
if (res->ntups >= res->tupArrSize)
{
/*
* Try to grow the array.
*
- * We can use realloc because shallow copying of the structure is
- * okay. Note that the first time through, res->tuples is NULL.
- * While ANSI says that realloc() should act like malloc() in that
- * case, some old C libraries (like SunOS 4.1.x) coredump instead.
- * On failure realloc is supposed to return NULL without damaging
- * the existing allocation. Note that the positions beyond
- * res->ntups are garbage, not necessarily NULL.
+ * We can use realloc because shallow copying of the structure is okay.
+ * Note that the first time through, res->tuples is NULL. While ANSI
+ * says that realloc() should act like malloc() in that case, some old
+ * C libraries (like SunOS 4.1.x) coredump instead. On failure realloc
+ * is supposed to return NULL without damaging the existing
+ * allocation. Note that the positions beyond res->ntups are garbage,
+ * not necessarily NULL.
*/
int newSize = (res->tupArrSize > 0) ? res->tupArrSize * 2 : 128;
PGresAttValue **newTuples;
* Store new info as a single malloc block
*/
pstatus = (pgParameterStatus *) malloc(sizeof(pgParameterStatus) +
- strlen(name) +strlen(value) + 2);
+ strlen(name) + strlen(value) + 2);
if (pstatus)
{
char *ptr;
}
/*
- * Special hacks: remember client_encoding as a numeric value, and
- * convert server version to a numeric form as well.
+ * Special hacks: remember client_encoding as a numeric value, and convert
+ * server version to a numeric form as well.
*/
if (strcmp(name, "client_encoding") == 0)
conn->client_encoding = pg_char_to_encoding(value);
if (!query)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("command string is a null pointer\n"));
+ libpq_gettext("command string is a null pointer\n"));
return 0;
}
conn->queryclass = PGQUERY_SIMPLE;
/*
- * Give the data a push. In nonblock mode, don't complain if we're
- * unable to send it all; PQgetResult() will do any additional
- * flushing needed.
+ * Give the data a push. In nonblock mode, don't complain if we're unable
+ * to send it all; PQgetResult() will do any additional flushing needed.
*/
if (pqFlush(conn) < 0)
{
if (!command)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("command string is a null pointer\n"));
+ libpq_gettext("command string is a null pointer\n"));
return 0;
}
/*
* PQsendPrepare
- * Submit a Parse message, but don't wait for it to finish
+ * Submit a Parse message, but don't wait for it to finish
*
* Returns: 1 if successfully submitted
- * 0 if error (conn->errorMessage is set)
+ * 0 if error (conn->errorMessage is set)
*/
int
PQsendPrepare(PGconn *conn,
if (!stmtName)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("statement name is a null pointer\n"));
+ libpq_gettext("statement name is a null pointer\n"));
return 0;
}
if (!query)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("command string is a null pointer\n"));
+ libpq_gettext("command string is a null pointer\n"));
return 0;
}
if (PG_PROTOCOL_MAJOR(conn->pversion) < 3)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("function requires at least protocol version 3.0\n"));
+ libpq_gettext("function requires at least protocol version 3.0\n"));
return 0;
}
if (nParams > 0 && paramTypes)
{
- int i;
+ int i;
if (pqPutInt(nParams, 2, conn) < 0)
goto sendFailed;
conn->queryclass = PGQUERY_PREPARE;
/*
- * Give the data a push. In nonblock mode, don't complain if we're
- * unable to send it all; PQgetResult() will do any additional
- * flushing needed.
+ * Give the data a push. In nonblock mode, don't complain if we're unable
+ * to send it all; PQgetResult() will do any additional flushing needed.
*/
if (pqFlush(conn) < 0)
goto sendFailed;
if (!stmtName)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("statement name is a null pointer\n"));
+ libpq_gettext("statement name is a null pointer\n"));
return 0;
}
if (conn->asyncStatus != PGASYNC_IDLE)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("another command is already in progress\n"));
+ libpq_gettext("another command is already in progress\n"));
return false;
}
if (PG_PROTOCOL_MAJOR(conn->pversion) < 3)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("function requires at least protocol version 3.0\n"));
+ libpq_gettext("function requires at least protocol version 3.0\n"));
return 0;
}
/*
- * We will send Parse (if needed), Bind, Describe Portal, Execute,
- * Sync, using specified statement name and the unnamed portal.
+ * We will send Parse (if needed), Bind, Describe Portal, Execute, Sync,
+ * using specified statement name and the unnamed portal.
*/
if (command)
conn->queryclass = PGQUERY_EXTENDED;
/*
- * Give the data a push. In nonblock mode, don't complain if we're
- * unable to send it all; PQgetResult() will do any additional
- * flushing needed.
+ * Give the data a push. In nonblock mode, don't complain if we're unable
+ * to send it all; PQgetResult() will do any additional flushing needed.
*/
if (pqFlush(conn) < 0)
goto sendFailed;
{
/*
* Accept any available input data, ignoring errors. Note that if
- * pqReadData decides the backend has closed the channel, it will
- * close our side of the socket --- that's just what we want here.
+ * pqReadData decides the backend has closed the channel, it will close
+ * our side of the socket --- that's just what we want here.
*/
while (pqReadData(conn) > 0)
/* loop until no more data readable */ ;
return 0;
/*
- * for non-blocking connections try to flush the send-queue, otherwise
- * we may never get a response for something that may not have already
- * been sent because it's in our write buffer!
+ * for non-blocking connections try to flush the send-queue, otherwise we
+ * may never get a response for something that may not have already been
+ * sent because it's in our write buffer!
*/
if (pqIsnonblocking(conn))
{
}
/*
- * Load more data, if available. We do this no matter what state we
- * are in, since we are probably getting called because the
- * application wants to get rid of a read-select condition. Note that
- * we will NOT block waiting for more input.
+ * Load more data, if available. We do this no matter what state we are
+ * in, since we are probably getting called because the application wants
+ * to get rid of a read-select condition. Note that we will NOT block
+ * waiting for more input.
*/
if (pqReadData(conn) < 0)
return 0;
int flushResult;
/*
- * If data remains unsent, send it. Else we might be waiting for
- * the result of a command the backend hasn't even got yet.
+ * If data remains unsent, send it. Else we might be waiting for the
+ * result of a command the backend hasn't even got yet.
*/
while ((flushResult = pqFlush(conn)) > 0)
{
break;
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("unexpected asyncStatus: %d\n"),
+ libpq_gettext("unexpected asyncStatus: %d\n"),
(int) conn->asyncStatus);
res = PQmakeEmptyPGresult(conn, PGRES_FATAL_ERROR);
break;
/*
* PQprepare
- * Creates a prepared statement by issuing a v3.0 parse message.
+ * Creates a prepared statement by issuing a v3.0 parse message.
*
* If the query was not even sent, return NULL; conn->errorMessage is set to
* a relevant message.
return false;
/*
- * Silently discard any prior query result that application didn't
- * eat. This is probably poor design, but it's here for backward
- * compatibility.
+ * Silently discard any prior query result that application didn't eat.
+ * This is probably poor design, but it's here for backward compatibility.
*/
while ((result = PQgetResult(conn)) != NULL)
{
{
/* In protocol 3, we can get out of a COPY IN state */
if (PQputCopyEnd(conn,
- libpq_gettext("COPY terminated by new PQexec")) < 0)
+ libpq_gettext("COPY terminated by new PQexec")) < 0)
return false;
/* keep waiting to swallow the copy's failure message */
}
{
/* In older protocols we have to punt */
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("COPY IN state must be terminated first\n"));
+ libpq_gettext("COPY IN state must be terminated first\n"));
return false;
}
}
if (PG_PROTOCOL_MAJOR(conn->pversion) >= 3)
{
/*
- * In protocol 3, we can get out of a COPY OUT state: we
- * just switch back to BUSY and allow the remaining COPY
- * data to be dropped on the floor.
+ * In protocol 3, we can get out of a COPY OUT state: we just
+ * switch back to BUSY and allow the remaining COPY data to be
+ * dropped on the floor.
*/
conn->asyncStatus = PGASYNC_BUSY;
/* keep waiting to swallow the copy's completion message */
{
/* In older protocols we have to punt */
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("COPY OUT state must be terminated first\n"));
+ libpq_gettext("COPY OUT state must be terminated first\n"));
return false;
}
}
PGresult *lastResult;
/*
- * For backwards compatibility, return the last result if there are
- * more than one --- but merge error messages if we get more than one
- * error result.
+ * For backwards compatibility, return the last result if there are more
+ * than one --- but merge error messages if we get more than one error
+ * result.
*
* We have to stop if we see copy in/out, however. We will resume parsing
* after application performs the data transfer.
result = lastResult;
/*
- * Make sure PQerrorMessage agrees with concatenated
- * result
+ * Make sure PQerrorMessage agrees with concatenated result
*/
resetPQExpBuffer(&conn->errorMessage);
appendPQExpBufferStr(&conn->errorMessage, result->errMsg);
if (nbytes > 0)
{
/*
- * Try to flush any previously sent data in preference to growing
- * the output buffer. If we can't enlarge the buffer enough to
- * hold the data, return 0 in the nonblock case, else hard error.
- * (For simplicity, always assume 5 bytes of overhead even in
- * protocol 2.0 case.)
+ * Try to flush any previously sent data in preference to growing the
+ * output buffer. If we can't enlarge the buffer enough to hold the
+ * data, return 0 in the nonblock case, else hard error. (For
+ * simplicity, always assume 5 bytes of overhead even in protocol 2.0
+ * case.)
*/
if ((conn->outBufSize - conn->outCount - 5) < nbytes)
{
}
/*
- * If we sent the COPY command in extended-query mode, we must
- * issue a Sync as well.
+ * If we sent the COPY command in extended-query mode, we must issue a
+ * Sync as well.
*/
if (conn->queryclass != PGQUERY_SIMPLE)
{
return -1;
/*
- * Note: it is correct to reject a zero-length input string; the
- * proper input to match a zero-length field name would be "".
+ * Note: it is correct to reject a zero-length input string; the proper
+ * input to match a zero-length field name would be "".
*/
if (field_name == NULL ||
field_name[0] == '\0' ||
/*
* Note: this code will not reject partially quoted strings, eg
- * foo"BAR"foo will become fooBARfoo when it probably ought to be an
- * error condition.
+ * foo"BAR"foo will become fooBARfoo when it probably ought to be an error
+ * condition.
*/
field_case = strdup(field_name);
if (field_case == NULL)
char *endptr = NULL;
unsigned long result;
- if (!res ||
- !res->cmdStatus ||
- strncmp(res->cmdStatus, "INSERT ", 7) != 0 ||
- res->cmdStatus[7] < '0' ||
- res->cmdStatus[7] > '9')
+ if (!res ||
+ !res->cmdStatus ||
+ strncmp(res->cmdStatus, "INSERT ", 7) != 0 ||
+ res->cmdStatus[7] < '0' ||
+ res->cmdStatus[7] > '9')
return InvalidOid;
result = strtoul(res->cmdStatus + 7, &endptr, 10);
return (0);
/*
- * to guarantee constancy for flushing/query/result-polling behavior
- * we need to flush the send queue at this point in order to guarantee
- * proper behavior. this is ok because either they are making a
- * transition _from_ or _to_ blocking mode, either way we can block
- * them.
+ * to guarantee constancy for flushing/query/result-polling behavior we
+ * need to flush the send queue at this point in order to guarantee proper
+ * behavior. this is ok because either they are making a transition _from_
+ * or _to_ blocking mode, either way we can block them.
*/
/* if we are going from blocking to non-blocking flush here */
if (pqFlush(conn))
/*
* Note: if we see '\' followed by something that isn't a
* recognized escape sequence, we loop around having done
- * nothing except advance i. Therefore the something will
- * be emitted as ordinary data on the next cycle. Corner
- * case: '\' at end of string will just be discarded.
+ * nothing except advance i. Therefore the something will be
+ * emitted as ordinary data on the next cycle. Corner case:
+ * '\' at end of string will just be discarded.
*/
break;
break;
}
}
- buflen = j; /* buflen is the length of the dequoted
- * data */
+ buflen = j; /* buflen is the length of the dequoted data */
/* Shrink the buffer to be no larger than necessary */
/* +1 avoids unportable behavior when buflen==0 */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-lobj.c,v 1.53 2005/06/13 02:26:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-lobj.c,v 1.54 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (conn->lobjfuncs->fn_lo_create == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function lo_create\n"));
+ libpq_gettext("cannot determine OID of function lo_create\n"));
return InvalidOid;
}
char sebuf[256];
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not open file \"%s\": %s\n"),
- filename, pqStrerror(errno, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not open file \"%s\": %s\n"),
+ filename, pqStrerror(errno, sebuf, sizeof(sebuf)));
return InvalidOid;
}
if (lobjOid == InvalidOid)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not create large object for file \"%s\"\n"),
+ libpq_gettext("could not create large object for file \"%s\"\n"),
filename);
(void) close(fd);
return InvalidOid;
if (lobj == -1)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not open large object %u\n"),
+ libpq_gettext("could not open large object %u\n"),
lobjOid);
(void) close(fd);
return InvalidOid;
if (tmp < nbytes)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("error while reading file \"%s\"\n"),
+ libpq_gettext("error while reading file \"%s\"\n"),
filename);
(void) close(fd);
(void) lo_close(conn, lobj);
if (lobj == -1)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not open large object %u\n"), lobjId);
+ libpq_gettext("could not open large object %u\n"), lobjId);
return -1;
}
char sebuf[256];
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not open file \"%s\": %s\n"),
- filename, pqStrerror(errno, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not open file \"%s\": %s\n"),
+ filename, pqStrerror(errno, sebuf, sizeof(sebuf)));
(void) lo_close(conn, lobj);
return -1;
}
if (tmp < nbytes)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("error while writing to file \"%s\"\n"),
+ libpq_gettext("error while writing to file \"%s\"\n"),
filename);
(void) lo_close(conn, lobj);
(void) close(fd);
if (close(fd))
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("error while writing to file \"%s\"\n"),
+ libpq_gettext("error while writing to file \"%s\"\n"),
filename);
return -1;
}
MemSet((char *) lobjfuncs, 0, sizeof(PGlobjfuncs));
/*
- * Execute the query to get all the functions at once. In 7.3 and
- * later we need to be schema-safe. lo_create only exists in 8.1
- * and up.
+ * Execute the query to get all the functions at once. In 7.3 and later
+ * we need to be schema-safe. lo_create only exists in 8.1 and up.
*/
if (conn->sversion >= 70300)
query = "select proname, oid from pg_catalog.pg_proc "
PQclear(res);
/*
- * Finally check that we really got all large object interface
- * functions --- except lo_create, which may not exist.
+ * Finally check that we really got all large object interface functions
+ * --- except lo_create, which may not exist.
*/
if (lobjfuncs->fn_lo_open == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function lo_open\n"));
+ libpq_gettext("cannot determine OID of function lo_open\n"));
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_close == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function lo_close\n"));
+ libpq_gettext("cannot determine OID of function lo_close\n"));
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_creat == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function lo_creat\n"));
+ libpq_gettext("cannot determine OID of function lo_creat\n"));
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_unlink == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function lo_unlink\n"));
+ libpq_gettext("cannot determine OID of function lo_unlink\n"));
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_lseek == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function lo_lseek\n"));
+ libpq_gettext("cannot determine OID of function lo_lseek\n"));
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_tell == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function lo_tell\n"));
+ libpq_gettext("cannot determine OID of function lo_tell\n"));
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_read == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function loread\n"));
+ libpq_gettext("cannot determine OID of function loread\n"));
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_write == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function lowrite\n"));
+ libpq_gettext("cannot determine OID of function lowrite\n"));
free(lobjfuncs);
return -1;
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.121 2005/09/26 17:49:09 petere Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.122 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
break;
default:
pqInternalNotice(&conn->noticeHooks,
- "integer of size %lu not supported by pqGetInt",
+ "integer of size %lu not supported by pqGetInt",
(unsigned long) bytes);
return EOF;
}
break;
default:
pqInternalNotice(&conn->noticeHooks,
- "integer of size %lu not supported by pqPutInt",
+ "integer of size %lu not supported by pqPutInt",
(unsigned long) bytes);
return EOF;
}
return 0;
/*
- * If we need to enlarge the buffer, we first try to double it in
- * size; if that doesn't work, enlarge in multiples of 8K. This
- * avoids thrashing the malloc pool by repeated small enlargements.
+ * If we need to enlarge the buffer, we first try to double it in size; if
+ * that doesn't work, enlarge in multiples of 8K. This avoids thrashing
+ * the malloc pool by repeated small enlargements.
*
* Note: tests for newsize > 0 are to catch integer overflow.
*/
return 0;
/*
- * If we need to enlarge the buffer, we first try to double it in
- * size; if that doesn't work, enlarge in multiples of 8K. This
- * avoids thrashing the malloc pool by repeated small enlargements.
+ * If we need to enlarge the buffer, we first try to double it in size; if
+ * that doesn't work, enlarge in multiples of 8K. This avoids thrashing
+ * the malloc pool by repeated small enlargements.
*
* Note: tests for newsize > 0 are to catch integer overflow.
*/
}
/*
- * If the buffer is fairly full, enlarge it. We need to be able to
- * enlarge the buffer in case a single message exceeds the initial
- * buffer size. We enlarge before filling the buffer entirely so as
- * to avoid asking the kernel for a partial packet. The magic constant
- * here should be large enough for a TCP packet or Unix pipe
- * bufferload. 8K is the usual pipe buffer size, so...
+ * If the buffer is fairly full, enlarge it. We need to be able to enlarge
+ * the buffer in case a single message exceeds the initial buffer size.
+ * We enlarge before filling the buffer entirely so as to avoid asking the
+ * kernel for a partial packet. The magic constant here should be large
+ * enough for a TCP packet or Unix pipe bufferload. 8K is the usual pipe
+ * buffer size, so...
*/
if (conn->inBufSize - conn->inEnd < 8192)
{
if (pqCheckInBufferSpace(conn->inEnd + 8192, conn))
{
/*
- * We don't insist that the enlarge worked, but we need some
- * room
+ * We don't insist that the enlarge worked, but we need some room
*/
if (conn->inBufSize - conn->inEnd < 100)
return -1; /* errorMessage already set */
goto definitelyFailed;
#endif
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not receive data from server: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not receive data from server: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
return -1;
}
if (nread > 0)
conn->inEnd += nread;
/*
- * Hack to deal with the fact that some kernels will only give us
- * back 1 packet per recv() call, even if we asked for more and
- * there is more available. If it looks like we are reading a
- * long message, loop back to recv() again immediately, until we
- * run out of data or buffer space. Without this, the
- * block-and-restart behavior of libpq's higher levels leads to
- * O(N^2) performance on long messages.
+ * Hack to deal with the fact that some kernels will only give us back
+ * 1 packet per recv() call, even if we asked for more and there is
+ * more available. If it looks like we are reading a long message,
+ * loop back to recv() again immediately, until we run out of data or
+ * buffer space. Without this, the block-and-restart behavior of
+ * libpq's higher levels leads to O(N^2) performance on long messages.
*
- * Since we left-justified the data above, conn->inEnd gives the
- * amount of data already read in the current message. We
- * consider the message "long" once we have acquired 32k ...
+ * Since we left-justified the data above, conn->inEnd gives the amount
+ * of data already read in the current message. We consider the
+ * message "long" once we have acquired 32k ...
*/
if (conn->inEnd > 32768 &&
(conn->inBufSize - conn->inEnd) >= 8192)
return 1; /* got a zero read after successful tries */
/*
- * A return value of 0 could mean just that no data is now available,
- * or it could mean EOF --- that is, the server has closed the
- * connection. Since we have the socket in nonblock mode, the only way
- * to tell the difference is to see if select() is saying that the
- * file is ready. Grumble. Fortunately, we don't expect this path to
- * be taken much, since in normal practice we should not be trying to
- * read data unless the file selected for reading already.
+ * A return value of 0 could mean just that no data is now available, or
+ * it could mean EOF --- that is, the server has closed the connection.
+ * Since we have the socket in nonblock mode, the only way to tell the
+ * difference is to see if select() is saying that the file is ready.
+ * Grumble. Fortunately, we don't expect this path to be taken much,
+ * since in normal practice we should not be trying to read data unless
+ * the file selected for reading already.
*
- * In SSL mode it's even worse: SSL_read() could say WANT_READ and then
- * data could arrive before we make the pqReadReady() test. So we
- * must play dumb and assume there is more data, relying on the SSL
- * layer to detect true EOF.
+ * In SSL mode it's even worse: SSL_read() could say WANT_READ and then data
+ * could arrive before we make the pqReadReady() test. So we must play
+ * dumb and assume there is more data, relying on the SSL layer to detect
+ * true EOF.
*/
#ifdef USE_SSL
goto definitelyFailed;
#endif
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not receive data from server: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not receive data from server: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
return -1;
}
if (nread > 0)
}
/*
- * OK, we are getting a zero read even though select() says ready.
- * This means the connection has been closed. Cope.
+ * OK, we are getting a zero read even though select() says ready. This
+ * means the connection has been closed. Cope.
*/
definitelyFailed:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.\n"));
+ "server closed the connection unexpectedly\n"
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.\n"));
conn->status = CONNECTION_BAD; /* No more connection to backend */
pqsecure_close(conn);
closesocket(conn->sock);
if (sent < 0)
{
/*
- * Anything except EAGAIN/EWOULDBLOCK/EINTR is trouble. If
- * it's EPIPE or ECONNRESET, assume we've lost the backend
- * connection permanently.
+ * Anything except EAGAIN/EWOULDBLOCK/EINTR is trouble. If it's
+ * EPIPE or ECONNRESET, assume we've lost the backend connection
+ * permanently.
*/
switch (SOCK_ERRNO)
{
#endif
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.\n"));
+ "server closed the connection unexpectedly\n"
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.\n"));
/*
- * We used to close the socket here, but that's a bad
- * idea since there might be unread data waiting
- * (typically, a NOTICE message from the backend
- * telling us it's committing hara-kiri...). Leave
- * the socket open until pqReadData finds no more data
- * can be read. But abandon attempt to send data.
+ * We used to close the socket here, but that's a bad idea
+ * since there might be unread data waiting (typically, a
+ * NOTICE message from the backend telling us it's
+ * committing hara-kiri...). Leave the socket open until
+ * pqReadData finds no more data can be read. But abandon
+ * attempt to send data.
*/
conn->outCount = 0;
return -1;
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not send data to server: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not send data to server: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
/* We don't assume it's a fatal error... */
conn->outCount = 0;
return -1;
/*
* There are scenarios in which we can't send data because the
- * communications channel is full, but we cannot expect the
- * server to clear the channel eventually because it's blocked
- * trying to send data to us. (This can happen when we are
- * sending a large amount of COPY data, and the server has
- * generated lots of NOTICE responses.) To avoid a deadlock
- * situation, we must be prepared to accept and buffer
- * incoming data before we try again. Furthermore, it is
- * possible that such incoming data might not arrive until
- * after we've gone to sleep. Therefore, we wait for either
- * read ready or write ready.
+ * communications channel is full, but we cannot expect the server
+ * to clear the channel eventually because it's blocked trying to
+ * send data to us. (This can happen when we are sending a large
+ * amount of COPY data, and the server has generated lots of
+ * NOTICE responses.) To avoid a deadlock situation, we must be
+ * prepared to accept and buffer incoming data before we try
+ * again. Furthermore, it is possible that such incoming data
+ * might not arrive until after we've gone to sleep. Therefore,
+ * we wait for either read ready or write ready.
*/
if (pqReadData(conn) < 0)
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("select() failed: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
}
return result;
}
return poll(&input_fd, 1, timeout_ms);
-
#else /* !HAVE_POLL */
fd_set input_mask;
{
/* dgettext() preserves errno, but bindtextdomain() doesn't */
#ifdef WIN32
- int save_errno = GetLastError();
+ int save_errno = GetLastError();
#else
- int save_errno = errno;
+ int save_errno = errno;
#endif
const char *ldir;
* didn't really belong there.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-print.c,v 1.63 2005/08/23 21:02:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-print.c,v 1.64 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
unsigned char *fieldNotNum, int *fieldMax,
const int fieldMaxLen, FILE *fout);
static char *do_header(FILE *fout, const PQprintOpt *po, const int nFields,
- int *fieldMax, const char **fieldNames, unsigned char *fieldNotNum,
+ int *fieldMax, const char **fieldNames, unsigned char *fieldNotNum,
const int fs_len, const PGresult *res);
static void output_row(FILE *fout, const PQprintOpt *po, const int nFields, char **fields,
unsigned char *fieldNotNum, int *fieldMax, char *border,
int total_line_length = 0;
int usePipe = 0;
char *pagerenv;
+
#if defined(ENABLE_THREAD_SAFETY) && !defined(WIN32)
sigset_t osigset;
bool sigpipe_masked = false;
#ifdef TIOCGWINSZ
struct winsize screen_size;
-
#else
struct winsize
{
)
{
/*
- * If we think there'll be more than one screen of output, try
- * to pipe to the pager program.
+ * If we think there'll be more than one screen of output, try to
+ * pipe to the pager program.
*/
#ifdef TIOCGWINSZ
if (ioctl(fileno(stdout), TIOCGWINSZ, &screen_size) == -1 ||
sigpipe_masked = true;
#else
oldsigpipehandler = pqsignal(SIGPIPE, SIG_IGN);
-#endif /* ENABLE_THREAD_SAFETY */
-#endif /* WIN32 */
+#endif /* ENABLE_THREAD_SAFETY */
+#endif /* WIN32 */
}
else
fout = stdout;
{
if (po->html3)
fprintf(fout,
- " %d\n",
+ " %d\n",
po->tableOpt ? po->tableOpt : "", i);
else
fprintf(fout, libpq_gettext("-- RECORD %d --\n"), i);
{
if (po->caption)
fprintf(fout,
- " %s\n",
+ " %s\n",
po->tableOpt ? po->tableOpt : "",
po->caption);
else
" "
"Retrieved %d rows * %d fields"
"\n",
- po->tableOpt ? po->tableOpt : "", nTups, nFields);
+ po->tableOpt ? po->tableOpt : "", nTups, nFields);
}
else
fprintf(fout, " ", po->tableOpt ? po->tableOpt : "");
_pclose(fout);
#else
pclose(fout);
-
+
#ifdef ENABLE_THREAD_SAFETY
/* we can't easily verify if EPIPE occurred, so say it did */
if (sigpipe_masked)
pq_reset_sigpipe(&osigset, sigpipe_pending, true);
#else
pqsignal(SIGPIPE, oldsigpipehandler);
-#endif /* ENABLE_THREAD_SAFETY */
-#endif /* WIN32 */
+#endif /* ENABLE_THREAD_SAFETY */
+#endif /* WIN32 */
}
if (po->html3 && !po->expanded)
fputs(" \n", fout);
}
/*
- * Above loop will believe E in first column is numeric; also,
- * we insist on a digit in the last column for a numeric. This
- * test is still not bulletproof but it handles most cases.
+ * Above loop will believe E in first column is numeric; also, we
+ * insist on a digit in the last column for a numeric. This test
+ * is still not bulletproof but it handles most cases.
*/
if (*pval == 'E' || *pval == 'e' ||
!(ch >= '0' && ch <= '9'))
if (po->html3)
fprintf(fout, " %s | ",
- fieldNotNum[field_index] ? "left" : "right", p ? p : "");
+ fieldNotNum[field_index] ? "left" : "right", p ? p : "");
else
{
fprintf(fout,
FILE *fout, /* output stream */
int PrintAttNames, /* print attribute names or not */
int TerseOutput, /* delimiter bars or not? */
- int colWidth /* width of column, if 0, use variable
- * width */
+ int colWidth /* width of column, if 0, use variable width */
)
{
int nFields;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol2.c,v 1.18 2005/06/12 00:00:21 neilc Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol2.c,v 1.19 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
"invalid setenv state %c, "
- "probably indicative of memory corruption\n"
+ "probably indicative of memory corruption\n"
),
conn->setenv_state);
goto error_return;
{
/*
* Send SET commands for stuff directed by Environment
- * Options. Note: we assume that SET commands won't
- * start transaction blocks, even in a 7.3 server with
+ * Options. Note: we assume that SET commands won't start
+ * transaction blocks, even in a 7.3 server with
* autocommit off.
*/
char setQuery[100]; /* note length limit in
conn->next_eo->pgName, val);
#ifdef CONNECTDEBUG
fprintf(stderr,
- "Use environment variable %s to send %s\n",
+ "Use environment variable %s to send %s\n",
conn->next_eo->envName, setQuery);
#endif
if (!PQsendQuery(conn, setQuery))
case SETENV_STATE_QUERY1_SEND:
{
/*
- * Issue query to get information we need. Here we
- * must use begin/commit in case autocommit is off by
- * default in a 7.3 server.
+ * Issue query to get information we need. Here we must
+ * use begin/commit in case autocommit is off by default
+ * in a 7.3 server.
*
* Note: version() exists in all protocol-2.0-supporting
* backends. In 7.3 it would be safer to write
val += 11;
/*
- * strip off platform part (scribbles on
- * result, naughty naughty)
+ * strip off platform part (scribbles on result,
+ * naughty naughty)
*/
ptr = strchr(val, ' ');
if (ptr)
const char *query;
/*
- * pg_client_encoding does not exist in pre-7.2
- * servers. So we need to be prepared for an error
- * here. Do *not* start a transaction block, except
- * in 7.3 servers where we need to prevent
- * autocommit-off from starting a transaction anyway.
+ * pg_client_encoding does not exist in pre-7.2 servers.
+ * So we need to be prepared for an error here. Do *not*
+ * start a transaction block, except in 7.3 servers where
+ * we need to prevent autocommit-off from starting a
+ * transaction anyway.
*/
if (conn->sversion >= 70300 &&
conn->sversion < 70400)
{
/* Extract client encoding and save it */
val = PQgetvalue(res, 0, 0);
- if (val && *val) /* null should not happen,
- * but */
+ if (val && *val) /* null should not happen, but */
pqSaveParameterStatus(conn, "client_encoding",
val);
}
else
{
/*
- * Error: presumably function not available,
- * so use PGCLIENTENCODING or SQL_ASCII as the
+ * Error: presumably function not available, so
+ * use PGCLIENTENCODING or SQL_ASCII as the
* fallback.
*/
val = getenv("PGCLIENTENCODING");
default:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("invalid state %c, "
- "probably indicative of memory corruption\n"),
+ "probably indicative of memory corruption\n"),
conn->setenv_state);
goto error_return;
}
for (;;)
{
/*
- * Quit if in COPY_OUT state: we expect raw data from the server
- * until PQendcopy is called. Don't try to parse it according to
- * the normal protocol. (This is bogus. The data lines ought to
- * be part of the protocol and have identifying leading
- * characters.)
+ * Quit if in COPY_OUT state: we expect raw data from the server until
+ * PQendcopy is called. Don't try to parse it according to the normal
+ * protocol. (This is bogus. The data lines ought to be part of the
+ * protocol and have identifying leading characters.)
*/
if (conn->asyncStatus == PGASYNC_COPY_OUT)
return;
* NOTIFY and NOTICE messages can happen in any state besides COPY
* OUT; always process them right away.
*
- * Most other messages should only be processed while in BUSY state.
- * (In particular, in READY state we hold off further parsing
- * until the application collects the current PGresult.)
+ * Most other messages should only be processed while in BUSY state. (In
+ * particular, in READY state we hold off further parsing until the
+ * application collects the current PGresult.)
*
* However, if the state is IDLE then we got trouble; we need to deal
* with the unexpected message somehow.
/*
* Unexpected message in IDLE state; need to recover somehow.
* ERROR messages are displayed using the notice processor;
- * anything else is just dropped on the floor after displaying
- * a suitable warning notice. (An ERROR is very possibly the
- * backend telling us why it is about to close the connection,
- * so we don't want to just discard it...)
+ * anything else is just dropped on the floor after displaying a
+ * suitable warning notice. (An ERROR is very possibly the
+ * backend telling us why it is about to close the connection, so
+ * we don't want to just discard it...)
*/
if (id == 'E')
{
else
{
pqInternalNotice(&conn->noticeHooks,
- "message type 0x%02x arrived from server while idle",
+ "message type 0x%02x arrived from server while idle",
id);
/* Discard the unexpected message; good idea?? */
conn->inStart = conn->inEnd;
if (conn->result == NULL)
{
conn->result = PQmakeEmptyPGresult(conn,
- PGRES_COMMAND_OK);
+ PGRES_COMMAND_OK);
if (!conn->result)
return;
}
id);
if (conn->result == NULL)
conn->result = PQmakeEmptyPGresult(conn,
- PGRES_EMPTY_QUERY);
+ PGRES_EMPTY_QUERY);
conn->asyncStatus = PGASYNC_READY;
break;
case 'K': /* secret key data from the backend */
/*
- * This is expected only during backend startup, but
- * it's just as easy to handle it as part of the main
- * loop. Save the data and continue processing.
+ * This is expected only during backend startup, but it's
+ * just as easy to handle it as part of the main loop.
+ * Save the data and continue processing.
*/
if (pqGetInt(&(conn->be_pid), 4, conn))
return;
return;
/* We pretty much ignore this message type... */
break;
- case 'T': /* row descriptions (start of query
- * results) */
+ case 'T': /* row descriptions (start of query results) */
if (conn->result == NULL)
{
/* First 'T' in a query sequence */
{
/*
* A new 'T' message is treated as the start of
- * another PGresult. (It is not clear that this
- * is really possible with the current backend.)
- * We stop parsing until the application accepts
- * the current result.
+ * another PGresult. (It is not clear that this is
+ * really possible with the current backend.) We stop
+ * parsing until the application accepts the current
+ * result.
*/
conn->asyncStatus = PGASYNC_READY;
return;
MemSet(conn->curTuple, 0, nfields * sizeof(PGresAttValue));
/*
- * If it's binary, fix the column format indicators. We assume
- * the backend will consistently send either B or D, not a mix.
+ * If it's binary, fix the column format indicators. We assume the
+ * backend will consistently send either B or D, not a mix.
*/
if (binary)
{
/* Replace partially constructed result with an error result */
/*
- * we do NOT use pqSaveErrorResult() here, because of the likelihood
- * that there's not enough memory to concatenate messages...
+ * we do NOT use pqSaveErrorResult() here, because of the likelihood that
+ * there's not enough memory to concatenate messages...
*/
pqClearAsyncResult(conn);
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("out of memory for query result\n"));
/*
- * XXX: if PQmakeEmptyPGresult() fails, there's probably not much
- * we can do to recover...
+ * XXX: if PQmakeEmptyPGresult() fails, there's probably not much we can
+ * do to recover...
*/
conn->result = PQmakeEmptyPGresult(conn, PGRES_FATAL_ERROR);
conn->asyncStatus = PGASYNC_READY;
/*
* Since the message might be pretty long, we create a temporary
- * PQExpBuffer rather than using conn->workBuffer. workBuffer is
- * intended for stuff that is expected to be short.
+ * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
+ * for stuff that is expected to be short.
*/
initPQExpBuffer(&workBuf);
if (pqGets(&workBuf, conn))
goto failure;
/*
- * Break the message into fields. We can't do very much here, but we
- * can split the severity code off, and remove trailing newlines.
- * Also, we use the heuristic that the primary message extends only to
- * the first newline --- anything after that is detail message. (In
- * some cases it'd be better classed as hint, but we can hardly be
- * expected to guess that here.)
+ * Break the message into fields. We can't do very much here, but we can
+ * split the severity code off, and remove trailing newlines. Also, we use
+ * the heuristic that the primary message extends only to the first
+ * newline --- anything after that is detail message. (In some cases it'd
+ * be better classed as hint, but we can hardly be expected to guess that
+ * here.)
*/
while (workBuf.len > 0 && workBuf.data[workBuf.len - 1] == '\n')
workBuf.data[--workBuf.len] = '\0';
/*
* Either save error as current async result, or just emit the notice.
- * Also, if it's an error and we were in a transaction block, assume
- * the server has now gone to error-in-transaction state.
+ * Also, if it's an error and we were in a transaction block, assume the
+ * server has now gone to error-in-transaction state.
*/
if (isError)
{
/*
* Normally we get into INERROR state by detecting an Error message.
- * However, if we see one of these tags then we know for sure the
- * server is in abort state ...
+ * However, if we see one of these tags then we know for sure the server
+ * is in abort state ...
*/
else if (strcmp(cmdTag, "*ABORT STATE*") == 0) /* pre-7.3 only */
conn->xactStatus = PQTRANS_INERROR;
return EOF;
/*
- * Store the relation name right after the PQnotify structure so it
- * can all be freed at once. We don't use NAMEDATALEN because we
- * don't want to tie this interface to a specific server name length.
+ * Store the relation name right after the PQnotify structure so it can
+ * all be freed at once. We don't use NAMEDATALEN because we don't want
+ * to tie this interface to a specific server name length.
*/
nmlen = strlen(conn->workBuffer.data);
newNotify = (PGnotify *) malloc(sizeof(PGnotify) + nmlen + 1);
msgLength = conn->inCursor - conn->inStart;
/*
- * If it's the end-of-data marker, consume it, exit COPY_OUT mode,
- * and let caller read status with PQgetResult().
+ * If it's the end-of-data marker, consume it, exit COPY_OUT mode, and
+ * let caller read status with PQgetResult().
*/
if (msgLength == 3 &&
strncmp(&conn->inBuffer[conn->inStart], "\\.\n", 3) == 0)
}
/*
- * Since this is a purely synchronous routine, we don't bother to
- * maintain conn->inCursor; there is no need to back up.
+ * Since this is a purely synchronous routine, we don't bother to maintain
+ * conn->inCursor; there is no need to back up.
*/
while (maxlen > 1)
{
return -1; /* we are not doing a copy... */
/*
- * Move data from libpq's buffer to the caller's. We want to accept
- * data only in units of whole lines, not partial lines. This ensures
- * that we can recognize the terminator line "\\.\n". (Otherwise, if
- * it happened to cross a packet/buffer boundary, we might hand the
- * first one or two characters off to the caller, which we shouldn't.)
+ * Move data from libpq's buffer to the caller's. We want to accept data
+ * only in units of whole lines, not partial lines. This ensures that we
+ * can recognize the terminator line "\\.\n". (Otherwise, if it happened
+ * to cross a packet/buffer boundary, we might hand the first one or two
+ * characters off to the caller, which we shouldn't.)
*/
conn->inCursor = conn->inStart;
/*
* We don't have a complete line. We'd prefer to leave it in libpq's
- * buffer until the rest arrives, but there is a special case: what if
- * the line is longer than the buffer the caller is offering us? In
- * that case we'd better hand over a partial line, else we'd get into
- * an infinite loop. Do this in a way that ensures we can't
- * misrecognize a terminator line later: leave last 3 characters in
- * libpq buffer.
+ * buffer until the rest arrives, but there is a special case: what if the
+ * line is longer than the buffer the caller is offering us? In that case
+ * we'd better hand over a partial line, else we'd get into an infinite
+ * loop. Do this in a way that ensures we can't misrecognize a terminator
+ * line later: leave last 3 characters in libpq buffer.
*/
if (avail == 0 && bufsize > 3)
{
}
/*
- * make sure no data is waiting to be sent, abort if we are
- * non-blocking and the flush fails
+ * make sure no data is waiting to be sent, abort if we are non-blocking
+ * and the flush fails
*/
if (pqFlush(conn) && pqIsnonblocking(conn))
return (1);
* Trouble. For backwards-compatibility reasons, we issue the error
* message as if it were a notice (would be nice to get rid of this
* silliness, but too many apps probably don't handle errors from
- * PQendcopy reasonably). Note that the app can still obtain the
- * error status from the PGconn object.
+ * PQendcopy reasonably). Note that the app can still obtain the error
+ * status from the PGconn object.
*/
if (conn->errorMessage.len > 0)
{
PQclear(result);
/*
- * The worst case is that we've lost sync with the backend entirely
- * due to application screwup of the copy in/out protocol. To recover,
- * reset the connection (talk about using a sledgehammer...)
+ * The worst case is that we've lost sync with the backend entirely due to
+ * application screwup of the copy in/out protocol. To recover, reset the
+ * connection (talk about using a sledgehammer...)
*/
pqInternalNotice(&conn->noticeHooks,
- "lost synchronization with server, resetting connection");
+ "lost synchronization with server, resetting connection");
/*
* Users doing non-blocking connections need to handle the reset
- * themselves, they'll need to check the connection status if we
- * return an error.
+ * themselves, they'll need to check the connection status if we return an
+ * error.
*/
if (pqIsnonblocking(conn))
PQresetStart(conn);
}
/*
- * Scan the message. If we run out of data, loop around to try
- * again.
+ * Scan the message. If we run out of data, loop around to try again.
*/
conn->inCursor = conn->inStart;
needInput = true;
/*
* We should see V or E response to the command, but might get N
- * and/or A notices first. We also need to swallow the final Z
- * before returning.
+ * and/or A notices first. We also need to swallow the final Z before
+ * returning.
*/
switch (id)
{
{
/* The backend violates the protocol. */
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("protocol error: id=0x%x\n"),
+ libpq_gettext("protocol error: id=0x%x\n"),
id);
pqSaveErrorResult(conn);
conn->inStart = conn->inCursor;
default:
/* The backend violates the protocol. */
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("protocol error: id=0x%x\n"),
+ libpq_gettext("protocol error: id=0x%x\n"),
id);
pqSaveErrorResult(conn);
conn->inStart = conn->inCursor;
*/
char *
pqBuildStartupPacket2(PGconn *conn, int *packetlen,
- const PQEnvironmentOption *options)
+ const PQEnvironmentOption * options)
{
StartupPacket *startpacket;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol3.c,v 1.21 2005/06/12 00:00:21 neilc Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol3.c,v 1.22 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int getNotify(PGconn *conn);
static int getCopyStart(PGconn *conn, ExecStatusType copytype);
static int getReadyForQuery(PGconn *conn);
-static int build_startup_packet(const PGconn *conn, char *packet,
- const PQEnvironmentOption *options);
+static int build_startup_packet(const PGconn *conn, char *packet,
+ const PQEnvironmentOption * options);
/*
for (;;)
{
/*
- * Try to read a message. First get the type code and length.
- * Return if not enough data.
+ * Try to read a message. First get the type code and length. Return
+ * if not enough data.
*/
conn->inCursor = conn->inStart;
if (pqGetc(&id, conn))
return;
/*
- * Try to validate message type/length here. A length less than 4
- * is definitely broken. Large lengths should only be believed
- * for a few message types.
+ * Try to validate message type/length here. A length less than 4 is
+ * definitely broken. Large lengths should only be believed for a few
+ * message types.
*/
if (msgLength < 4)
{
if (avail < msgLength)
{
/*
- * Before returning, enlarge the input buffer if needed to
- * hold the whole message. This is better than leaving it to
- * pqReadData because we can avoid multiple cycles of
- * realloc() when the message is large; also, we can implement
- * a reasonable recovery strategy if we are unable to make the
- * buffer big enough.
+ * Before returning, enlarge the input buffer if needed to hold
+ * the whole message. This is better than leaving it to
+ * pqReadData because we can avoid multiple cycles of realloc()
+ * when the message is large; also, we can implement a reasonable
+ * recovery strategy if we are unable to make the buffer big
+ * enough.
*/
if (pqCheckInBufferSpace(conn->inCursor + msgLength, conn))
{
/*
- * XXX add some better recovery code... plan is to skip
- * over the message using its length, then report an
- * error. For the moment, just treat this like loss of
- * sync (which indeed it might be!)
+ * XXX add some better recovery code... plan is to skip over
+ * the message using its length, then report an error. For the
+ * moment, just treat this like loss of sync (which indeed it
+ * might be!)
*/
handleSyncLoss(conn, id, msgLength);
}
}
/*
- * NOTIFY and NOTICE messages can happen in any state; always
- * process them right away.
+ * NOTIFY and NOTICE messages can happen in any state; always process
+ * them right away.
*
- * Most other messages should only be processed while in BUSY state.
- * (In particular, in READY state we hold off further parsing
- * until the application collects the current PGresult.)
+ * Most other messages should only be processed while in BUSY state. (In
+ * particular, in READY state we hold off further parsing until the
+ * application collects the current PGresult.)
*
* However, if the state is IDLE then we got trouble; we need to deal
* with the unexpected message somehow.
*
- * ParameterStatus ('S') messages are a special case: in IDLE state
- * we must process 'em (this case could happen if a new value was
- * adopted from config file due to SIGHUP), but otherwise we hold
- * off until BUSY state.
+ * ParameterStatus ('S') messages are a special case: in IDLE state we
+ * must process 'em (this case could happen if a new value was adopted
+ * from config file due to SIGHUP), but otherwise we hold off until
+ * BUSY state.
*/
if (id == 'A')
{
* ERROR messages are displayed using the notice processor;
* ParameterStatus is handled normally; anything else is just
* dropped on the floor after displaying a suitable warning
- * notice. (An ERROR is very possibly the backend telling us
- * why it is about to close the connection, so we don't want
- * to just discard it...)
+ * notice. (An ERROR is very possibly the backend telling us why
+ * it is about to close the connection, so we don't want to just
+ * discard it...)
*/
if (id == 'E')
{
else
{
pqInternalNotice(&conn->noticeHooks,
- "message type 0x%02x arrived from server while idle",
+ "message type 0x%02x arrived from server while idle",
id);
/* Discard the unexpected message */
conn->inCursor += msgLength;
if (conn->result == NULL)
{
conn->result = PQmakeEmptyPGresult(conn,
- PGRES_COMMAND_OK);
+ PGRES_COMMAND_OK);
if (!conn->result)
return;
}
if (conn->result == NULL)
{
conn->result = PQmakeEmptyPGresult(conn,
- PGRES_EMPTY_QUERY);
+ PGRES_EMPTY_QUERY);
if (!conn->result)
return;
}
if (conn->result == NULL)
{
conn->result = PQmakeEmptyPGresult(conn,
- PGRES_COMMAND_OK);
+ PGRES_COMMAND_OK);
if (!conn->result)
return;
}
case 'K': /* secret key data from the backend */
/*
- * This is expected only during backend startup, but
- * it's just as easy to handle it as part of the main
- * loop. Save the data and continue processing.
+ * This is expected only during backend startup, but it's
+ * just as easy to handle it as part of the main loop.
+ * Save the data and continue processing.
*/
if (pqGetInt(&(conn->be_pid), 4, conn))
return;
{
/*
* A new 'T' message is treated as the start of
- * another PGresult. (It is not clear that this
- * is really possible with the current backend.)
- * We stop parsing until the application accepts
- * the current result.
+ * another PGresult. (It is not clear that this is
+ * really possible with the current backend.) We stop
+ * parsing until the application accepts the current
+ * result.
*/
conn->asyncStatus = PGASYNC_READY;
return;
/*
* NoData indicates that we will not be seeing a
- * RowDescription message because the statement or
- * portal inquired about doesn't return rows. Set up a
- * COMMAND_OK result, instead of TUPLES_OK.
+ * RowDescription message because the statement or portal
+ * inquired about doesn't return rows. Set up a COMMAND_OK
+ * result, instead of TUPLES_OK.
*/
if (conn->result == NULL)
conn->result = PQmakeEmptyPGresult(conn,
- PGRES_COMMAND_OK);
+ PGRES_COMMAND_OK);
break;
case 'D': /* Data Row */
if (conn->result != NULL &&
return;
}
else if (conn->result != NULL &&
- conn->result->resultStatus == PGRES_FATAL_ERROR)
+ conn->result->resultStatus == PGRES_FATAL_ERROR)
{
/*
- * We've already choked for some reason. Just
- * discard tuples till we get to the end of the
- * query.
+ * We've already choked for some reason. Just discard
+ * tuples till we get to the end of the query.
*/
conn->inCursor += msgLength;
}
case 'd': /* Copy Data */
/*
- * If we see Copy Data, just silently drop it. This
- * would only occur if application exits COPY OUT mode
- * too early.
+ * If we see Copy Data, just silently drop it. This would
+ * only occur if application exits COPY OUT mode too
+ * early.
*/
conn->inCursor += msgLength;
break;
case 'c': /* Copy Done */
/*
- * If we see Copy Done, just silently drop it. This
- * is the normal case during PQendcopy. We will keep
- * swallowing data, expecting to see command-complete
- * for the COPY command.
+ * If we see Copy Done, just silently drop it. This is
+ * the normal case during PQendcopy. We will keep
+ * swallowing data, expecting to see command-complete for
+ * the COPY command.
*/
break;
default:
{
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "lost synchronization with server: got message type \"%c\", length %d\n"),
+ "lost synchronization with server: got message type \"%c\", length %d\n"),
id, msgLength);
/* build an error result holding the error message */
pqSaveErrorResult(conn);
/*
* Since the fields might be pretty long, we create a temporary
- * PQExpBuffer rather than using conn->workBuffer. workBuffer is
- * intended for stuff that is expected to be short. We shouldn't use
+ * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
+ * for stuff that is expected to be short. We shouldn't use
* conn->errorMessage either, since this might be only a notice.
*/
initPQExpBuffer(&workBuf);
/*
* Make a PGresult to hold the accumulated fields. We temporarily lie
- * about the result status, so that PQmakeEmptyPGresult doesn't
- * uselessly copy conn->errorMessage.
+ * about the result status, so that PQmakeEmptyPGresult doesn't uselessly
+ * copy conn->errorMessage.
*/
res = PQmakeEmptyPGresult(conn, PGRES_EMPTY_QUERY);
if (!res)
}
/*
- * Store the strings right after the PQnotify structure so it can all
- * be freed at once. We don't use NAMEDATALEN because we don't want
- * to tie this interface to a specific server name length.
+ * Store the strings right after the PQnotify structure so it can all be
+ * freed at once. We don't use NAMEDATALEN because we don't want to tie
+ * this interface to a specific server name length.
*/
nmlen = strlen(svname);
extralen = strlen(conn->workBuffer.data);
for (;;)
{
/*
- * Do we have the next input message? To make life simpler for
- * async callers, we keep returning 0 until the next message is
- * fully available, even if it is not Copy Data.
+ * Do we have the next input message? To make life simpler for async
+ * callers, we keep returning 0 until the next message is fully
+ * available, even if it is not Copy Data.
*/
conn->inCursor = conn->inStart;
if (pqGetc(&id, conn))
conn->copy_is_binary)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("PQgetline: not doing text COPY OUT\n"));
+ libpq_gettext("PQgetline: not doing text COPY OUT\n"));
*s = '\0';
return EOF;
}
/*
* Recognize the next input message. To make life simpler for async
- * callers, we keep returning 0 until the next message is fully
- * available even if it is not Copy Data. This should keep PQendcopy
- * from blocking.
+ * callers, we keep returning 0 until the next message is fully available
+ * even if it is not Copy Data. This should keep PQendcopy from blocking.
*/
conn->inCursor = conn->inStart;
if (pqGetc(&id, conn))
return 0;
/*
- * Cannot proceed unless it's a Copy Data message. Anything else
- * means end of copy mode.
+ * Cannot proceed unless it's a Copy Data message. Anything else means
+ * end of copy mode.
*/
if (id != 'd')
return -1;
return 1;
/*
- * If we sent the COPY command in extended-query mode, we must
- * issue a Sync as well.
+ * If we sent the COPY command in extended-query mode, we must issue a
+ * Sync as well.
*/
if (conn->queryclass != PGQUERY_SIMPLE)
{
}
/*
- * make sure no data is waiting to be sent, abort if we are
- * non-blocking and the flush fails
+ * make sure no data is waiting to be sent, abort if we are non-blocking
+ * and the flush fails
*/
if (pqFlush(conn) && pqIsnonblocking(conn))
return (1);
resetPQExpBuffer(&conn->errorMessage);
/*
- * Non blocking connections may have to abort at this point. If
- * everyone played the game there should be no problem, but in error
- * scenarios the expected messages may not have arrived yet. (We are
- * assuming that the backend's packetizing will ensure that
- * CommandComplete arrives along with the CopyDone; are there corner
- * cases where that doesn't happen?)
+ * Non blocking connections may have to abort at this point. If everyone
+ * played the game there should be no problem, but in error scenarios the
+ * expected messages may not have arrived yet. (We are assuming that the
+ * backend's packetizing will ensure that CommandComplete arrives along
+ * with the CopyDone; are there corner cases where that doesn't happen?)
*/
if (pqIsnonblocking(conn) && PQisBusy(conn))
return (1);
* Trouble. For backwards-compatibility reasons, we issue the error
* message as if it were a notice (would be nice to get rid of this
* silliness, but too many apps probably don't handle errors from
- * PQendcopy reasonably). Note that the app can still obtain the
- * error status from the PGconn object.
+ * PQendcopy reasonably). Note that the app can still obtain the error
+ * status from the PGconn object.
*/
if (conn->errorMessage.len > 0)
{
}
/*
- * Scan the message. If we run out of data, loop around to try
- * again.
+ * Scan the message. If we run out of data, loop around to try again.
*/
needInput = true;
continue;
/*
- * Try to validate message type/length here. A length less than 4
- * is definitely broken. Large lengths should only be believed
- * for a few message types.
+ * Try to validate message type/length here. A length less than 4 is
+ * definitely broken. Large lengths should only be believed for a few
+ * message types.
*/
if (msgLength < 4)
{
if (avail < msgLength)
{
/*
- * Before looping, enlarge the input buffer if needed to hold
- * the whole message. See notes in parseInput.
+ * Before looping, enlarge the input buffer if needed to hold the
+ * whole message. See notes in parseInput.
*/
if (pqCheckInBufferSpace(conn->inCursor + msgLength, conn))
{
/*
- * XXX add some better recovery code... plan is to skip
- * over the message using its length, then report an
- * error. For the moment, just treat this like loss of
- * sync (which indeed it might be!)
+ * XXX add some better recovery code... plan is to skip over
+ * the message using its length, then report an error. For the
+ * moment, just treat this like loss of sync (which indeed it
+ * might be!)
*/
handleSyncLoss(conn, id, msgLength);
break;
/*
* We should see V or E response to the command, but might get N
- * and/or A notices first. We also need to swallow the final Z
- * before returning.
+ * and/or A notices first. We also need to swallow the final Z before
+ * returning.
*/
switch (id)
{
default:
/* The backend violates the protocol. */
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("protocol error: id=0x%x\n"),
+ libpq_gettext("protocol error: id=0x%x\n"),
id);
pqSaveErrorResult(conn);
/* trust the specified message length as what to skip */
*/
char *
pqBuildStartupPacket3(PGconn *conn, int *packetlen,
- const PQEnvironmentOption *options)
+ const PQEnvironmentOption * options)
{
char *startpacket;
*/
static int
build_startup_packet(const PGconn *conn, char *packet,
- const PQEnvironmentOption *options)
+ const PQEnvironmentOption * options)
{
int packet_len = 0;
const PQEnvironmentOption *next_eo;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.71 2005/08/28 16:37:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.72 2005/10/15 02:49:48 momjian Exp $
*
* NOTES
* [ Most of these notes are wrong/obsolete, but perhaps not all ]
char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not establish SSL connection: %s\n"),
+ libpq_gettext("could not establish SSL connection: %s\n"),
err);
SSLerrfree(err);
close_SSL(conn);
return PGRES_POLLING_FAILED;
}
+
/*
- * Initialize errorMessage to empty. This allows open_client_SSL()
- * to detect whether client_cert_cb() has stored a message.
+ * Initialize errorMessage to empty. This allows open_client_SSL() to
+ * detect whether client_cert_cb() has stored a message.
*/
resetPQExpBuffer(&conn->errorMessage);
}
case SSL_ERROR_WANT_WRITE:
/*
- * Returning 0 here would cause caller to wait for
- * read-ready, which is not correct since what SSL wants
- * is wait for write-ready. The former could get us stuck
- * in an infinite wait, so don't risk it; busy-loop
- * instead.
+ * Returning 0 here would cause caller to wait for read-ready,
+ * which is not correct since what SSL wants is wait for
+ * write-ready. The former could get us stuck in an infinite
+ * wait, so don't risk it; busy-loop instead.
*/
goto rloop;
case SSL_ERROR_SYSCALL:
if (n == -1)
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("SSL SYSCALL error: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
else
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: EOF detected\n"));
+ libpq_gettext("SSL SYSCALL error: EOF detected\n"));
SOCK_ERRNO_SET(ECONNRESET);
n = -1;
char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL error: %s\n"), err);
+ libpq_gettext("SSL error: %s\n"), err);
SSLerrfree(err);
}
/* fall through */
break;
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("unrecognized SSL error code: %d\n"),
+ libpq_gettext("unrecognized SSL error code: %d\n"),
err);
n = -1;
break;
pqsecure_write(PGconn *conn, const void *ptr, size_t len)
{
ssize_t n;
-
-#ifndef WIN32
+
+#ifndef WIN32
#ifdef ENABLE_THREAD_SAFETY
sigset_t osigmask;
bool sigpipe_pending;
bool got_epipe = false;
-
+
if (pq_block_sigpipe(&osigmask, &sigpipe_pending) < 0)
return -1;
#else
pqsigfunc oldsighandler = pqsignal(SIGPIPE, SIG_IGN);
-#endif /* ENABLE_THREAD_SAFETY */
-#endif /* WIN32 */
-
+#endif /* ENABLE_THREAD_SAFETY */
+#endif /* WIN32 */
+
#ifdef USE_SSL
if (conn->ssl)
{
/*
* Returning 0 here causes caller to wait for write-ready,
- * which is not really the right thing, but it's the best
- * we can do.
+ * which is not really the right thing, but it's the best we
+ * can do.
*/
n = 0;
break;
got_epipe = true;
#endif
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("SSL SYSCALL error: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
}
else
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: EOF detected\n"));
+ libpq_gettext("SSL SYSCALL error: EOF detected\n"));
SOCK_ERRNO_SET(ECONNRESET);
n = -1;
}
char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL error: %s\n"), err);
+ libpq_gettext("SSL error: %s\n"), err);
SSLerrfree(err);
}
/* fall through */
break;
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("unrecognized SSL error code: %d\n"),
+ libpq_gettext("unrecognized SSL error code: %d\n"),
err);
n = -1;
break;
got_epipe = true;
#endif
}
-
+
#ifndef WIN32
#ifdef ENABLE_THREAD_SAFETY
pq_reset_sigpipe(&osigmask, sigpipe_pending, got_epipe);
#else
pqsignal(SIGPIPE, oldsighandler);
-#endif /* ENABLE_THREAD_SAFETY */
-#endif /* WIN32 */
+#endif /* ENABLE_THREAD_SAFETY */
+#endif /* WIN32 */
return n;
}
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("error querying socket: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
return -1;
}
int herrno = 0;
/*
- * Currently, pqGethostbyname() is used only on platforms that
- * don't have getaddrinfo(). If you enable this function, you
- * should convert the pqGethostbyname() function call to use
- * getaddrinfo().
+ * Currently, pqGethostbyname() is used only on platforms that don't
+ * have getaddrinfo(). If you enable this function, you should
+ * convert the pqGethostbyname() function call to use getaddrinfo().
*/
pqGethostbyname(conn->peer_cn, &hpstr, buf, sizeof(buf),
&h, &herrno);
if (h == NULL)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not get information about host \"%s\": %s\n"),
+ libpq_gettext("could not get information about host \"%s\": %s\n"),
conn->peer_cn, hstrerror(h_errno));
return -1;
}
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
"server common name \"%s\" does not resolve to %ld.%ld.%ld.%ld\n"),
- conn->peer_cn, (l >> 24) % 0x100, (l >> 16) % 0x100,
+ conn->peer_cn, (l >> 24) % 0x100, (l >> 16) % 0x100,
(l >> 8) % 0x100, l % 0x100);
break;
default:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "server common name \"%s\" does not resolve to peer address\n"),
+ "server common name \"%s\" does not resolve to peer address\n"),
conn->peer_cn);
}
return -1;
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/*
* Load precomputed DH parameters.
{
char homedir[MAXPGPATH];
struct stat buf;
+
#ifndef WIN32
struct stat buf2;
#endif
if (!pqGetHomeDirectory(homedir, sizeof(homedir)))
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not get user information\n"));
+ libpq_gettext("could not get user information\n"));
return 0;
}
if ((fp = fopen(fnbuf, "r")) == NULL)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not open certificate file \"%s\": %s\n"),
+ libpq_gettext("could not open certificate file \"%s\": %s\n"),
fnbuf, pqStrerror(errno, sebuf, sizeof(sebuf)));
return 0;
}
char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not read certificate file \"%s\": %s\n"),
+ libpq_gettext("could not read certificate file \"%s\": %s\n"),
fnbuf, err);
SSLerrfree(err);
fclose(fp);
if (stat(fnbuf, &buf) == -1)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("certificate present, but not private key file \"%s\"\n"),
+ libpq_gettext("certificate present, but not private key file \"%s\"\n"),
fnbuf);
return 0;
}
buf.st_uid != geteuid())
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("private key file \"%s\" has wrong permissions\n"),
+ libpq_gettext("private key file \"%s\" has wrong permissions\n"),
fnbuf);
return 0;
}
if ((fp = fopen(fnbuf, "r")) == NULL)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not open private key file \"%s\": %s\n"),
+ libpq_gettext("could not open private key file \"%s\": %s\n"),
fnbuf, pqStrerror(errno, sebuf, sizeof(sebuf)));
return 0;
}
char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not read private key file \"%s\": %s\n"),
+ libpq_gettext("could not read private key file \"%s\": %s\n"),
fnbuf, err);
SSLerrfree(err);
fclose(fp);
char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("certificate does not match private key file \"%s\": %s\n"),
+ libpq_gettext("certificate does not match private key file \"%s\": %s\n"),
fnbuf, err);
SSLerrfree(err);
return 0;
pq_threadidcallback(void)
{
/*
- * This is not starndard-compliant. pthread_self() returns
- * pthread_t, and shouldn't be cast to unsigned long, but
- * CRYPTO_set_id_callback requires it, so we have to do it.
+ * This is not starndard-compliant. pthread_self() returns pthread_t, and
+ * shouldn't be cast to unsigned long, but CRYPTO_set_id_callback requires
+ * it, so we have to do it.
*/
return (unsigned long) pthread_self();
}
else
pthread_mutex_unlock(&pq_lockarray[n]);
}
-
#endif /* ENABLE_THREAD_SAFETY */
static int
#ifdef ENABLE_THREAD_SAFETY
#ifndef WIN32
static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
-
#else
static pthread_mutex_t init_mutex = NULL;
static long mutex_initlock = 0;
char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not create SSL context: %s\n"),
+ libpq_gettext("could not create SSL context: %s\n"),
err);
SSLerrfree(err);
#ifdef ENABLE_THREAD_SAFETY
r = SSL_connect(conn->ssl);
if (r <= 0)
{
- int err = SSL_get_error(conn->ssl, r);
+ int err = SSL_get_error(conn->ssl, r);
switch (err)
{
if (r == -1)
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ libpq_gettext("SSL SYSCALL error: %s\n"),
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
else
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: EOF detected\n"));
+ libpq_gettext("SSL SYSCALL error: EOF detected\n"));
close_SSL(conn);
return PGRES_POLLING_FAILED;
}
* these will be detected by client_cert_cb() which is
* called from SSL_connect(). We want to return that
* error message and not the rather unhelpful error that
- * OpenSSL itself returns. So check to see if an error
+ * OpenSSL itself returns. So check to see if an error
* message was already stored.
*/
if (conn->errorMessage.len == 0)
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("unrecognized SSL error code: %d\n"),
+ libpq_gettext("unrecognized SSL error code: %d\n"),
err);
close_SSL(conn);
return PGRES_POLLING_FAILED;
if (r != X509_V_OK)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("certificate could not be validated: %s\n"),
+ libpq_gettext("certificate could not be validated: %s\n"),
X509_verify_cert_error_string(r));
close_SSL(conn);
return PGRES_POLLING_FAILED;
char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("certificate could not be obtained: %s\n"),
+ libpq_gettext("certificate could not be obtained: %s\n"),
err);
SSLerrfree(err);
close_SSL(conn);
/*
* this is necessary to eliminate man-in-the-middle attacks and
- * impersonations where the attacker somehow learned the server's
- * private key
+ * impersonations where the attacker somehow learned the server's private
+ * key
*/
if (verify_peer(conn) == -1)
{
return NULL;
return conn->ssl;
}
-
-#else /* !USE_SSL */
+#else /* !USE_SSL */
void *
PQgetssl(PGconn *conn)
{
return NULL;
}
-
#endif /* USE_SSL */
#if defined(ENABLE_THREAD_SAFETY) && !defined(WIN32)
/*
- * Block SIGPIPE for this thread. This prevents send()/write() from exiting
+ * Block SIGPIPE for this thread. This prevents send()/write() from exiting
* the application.
*/
int
pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending)
{
- sigset_t sigpipe_sigset;
- sigset_t sigset;
-
+ sigset_t sigpipe_sigset;
+ sigset_t sigset;
+
sigemptyset(&sigpipe_sigset);
sigaddset(&sigpipe_sigset, SIGPIPE);
/* Is there a pending SIGPIPE? */
if (sigpending(&sigset) != 0)
return -1;
-
+
if (sigismember(&sigset, SIGPIPE))
*sigpipe_pending = true;
else
}
else
*sigpipe_pending = false;
-
+
return 0;
}
-
+
/*
* Discard any pending SIGPIPE and reset the signal mask.
*
* Note: we are effectively assuming here that the C library doesn't queue
- * up multiple SIGPIPE events. If it did, then we'd accidentally leave
+ * up multiple SIGPIPE events. If it did, then we'd accidentally leave
* ours in the queue when an event was already pending and we got another.
* As long as it doesn't queue multiple events, we're OK because the caller
* can't tell the difference.
* gotten one, pass got_epipe = TRUE.
*
* We do not want this to change errno, since if it did that could lose
- * the error code from a preceding send(). We essentially assume that if
+ * the error code from a preceding send(). We essentially assume that if
* we were able to do pq_block_sigpipe(), this can't fail.
*/
void
pq_reset_sigpipe(sigset_t *osigset, bool sigpipe_pending, bool got_epipe)
{
- int save_errno = SOCK_ERRNO;
- int signo;
- sigset_t sigset;
+ int save_errno = SOCK_ERRNO;
+ int signo;
+ sigset_t sigset;
/* Clear SIGPIPE only if none was pending */
if (got_epipe && !sigpipe_pending)
if (sigpending(&sigset) == 0 &&
sigismember(&sigset, SIGPIPE))
{
- sigset_t sigpipe_sigset;
-
+ sigset_t sigpipe_sigset;
+
sigemptyset(&sigpipe_sigset);
sigaddset(&sigpipe_sigset, SIGPIPE);
sigwait(&sigpipe_sigset, &signo);
}
}
-
+
/* Restore saved block mask */
pthread_sigmask(SIG_SETMASK, osigset, NULL);
SOCK_ERRNO_SET(save_errno);
}
-#endif /* ENABLE_THREAD_SAFETY */
+#endif /* ENABLE_THREAD_SAFETY */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.119 2005/09/24 17:53:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.120 2005/10/15 02:49:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/*
* Although it is okay to add to this list, values which become unused
- * should never be removed, nor should constants be redefined - that
- * would break compatibility with existing code.
+ * should never be removed, nor should constants be redefined - that would
+ * break compatibility with existing code.
*/
CONNECTION_OK,
CONNECTION_BAD,
/* Non-blocking mode only below here */
/*
- * The existence of these should never be relied upon - they should
- * only be used for user feedback or similar purposes.
+ * The existence of these should never be relied upon - they should only
+ * be used for user feedback or similar purposes.
*/
CONNECTION_STARTED, /* Waiting for connection to be made. */
CONNECTION_MADE, /* Connection OK; waiting to send. */
* anything was executed properly by the
* backend */
PGRES_TUPLES_OK, /* a query command that returns tuples was
- * executed properly by the backend,
- * PGresult contains the result tuples */
+ * executed properly by the backend, PGresult
+ * contains the result tuples */
PGRES_COPY_OUT, /* Copy Out data transfer in progress */
PGRES_COPY_IN, /* Copy In data transfer in progress */
- PGRES_BAD_RESPONSE, /* an unexpected response was recv'd from
- * the backend */
+ PGRES_BAD_RESPONSE, /* an unexpected response was recv'd from the
+ * backend */
PGRES_NONFATAL_ERROR, /* notice or warning message */
PGRES_FATAL_ERROR /* query failed */
} ExecStatusType;
typedef struct _PQprintOpt
{
- pqbool header; /* print output field headings and row
- * count */
+ pqbool header; /* print output field headings and row count */
pqbool align; /* fill align the fields */
pqbool standard; /* old brain dead format */
pqbool html3; /* output html tables */
char *fieldSep; /* field separator */
char *tableOpt; /* insert to HTML */
char *caption; /* HTML */
- char **fieldName; /* null terminated array of replacement
- * field names */
+ char **fieldName; /* null terminated array of replacement field
+ * names */
} PQprintOpt;
/* ----------------
char *compiled; /* Fallback compiled in default value */
char *val; /* Option's current value, or NULL */
char *label; /* Label for field in connect dialog */
- char *dispchar; /* Character to display for this field in
- * a connect dialog. Values are: ""
- * Display entered value as is "*"
- * Password field - hide value "D" Debug
- * option - don't show by default */
+ char *dispchar; /* Character to display for this field in a
+ * connect dialog. Values are: "" Display
+ * entered value as is "*" Password field -
+ * hide value "D" Debug option - don't show
+ * by default */
int dispsize; /* Field size in characters for dialog */
} PQconninfoOption;
const int *paramFormats,
int resultFormat);
extern PGresult *PQprepare(PGconn *conn, const char *stmtName,
- const char *query, int nParams,
- const Oid *paramTypes);
+ const char *query, int nParams,
+ const Oid *paramTypes);
extern PGresult *PQexecPrepared(PGconn *conn,
const char *stmtName,
int nParams,
const int *paramFormats,
int resultFormat);
extern int PQsendPrepare(PGconn *conn, const char *stmtName,
- const char *query, int nParams,
- const Oid *paramTypes);
+ const char *query, int nParams,
+ const Oid *paramTypes);
extern int PQsendQueryPrepared(PGconn *conn,
const char *stmtName,
int nParams,
FILE *fout, /* output stream */
int printAttName, /* print attribute names */
int terseOutput, /* delimiter bars */
- int width); /* width of column, if 0, use variable
- * width */
+ int width); /* width of column, if 0, use variable width */
/* === in fe-lobj.c === */
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.107 2005/08/23 21:02:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.108 2005/10/15 02:49:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#endif
#ifdef WIN32_CLIENT_ONLY
-typedef int ssize_t; /* ssize_t doesn't exist in VC (at least
- * not VC6) */
+typedef int ssize_t; /* ssize_t doesn't exist in VC (at least not
+ * VC6) */
#endif
/* include stuff common to fe and be */
Oid typid; /* type id */
int typlen; /* type size */
int atttypmod; /* type-specific modifier info */
-} PGresAttDesc;
+} PGresAttDesc;
/*
* Data for a single attribute of a single tuple
typedef struct pgresAttValue
{
int len; /* length in bytes of the value */
- char *value; /* actual value, plus terminating zero
- * byte */
-} PGresAttValue;
+ char *value; /* actual value, plus terminating zero byte */
+} PGresAttValue;
/* Typedef for message-field list entries */
typedef struct pgMessageField
struct pgMessageField *next; /* list link */
char code; /* field code */
char contents[1]; /* field value (VARIABLE LENGTH) */
-} PGMessageField;
+} PGMessageField;
/* Fields needed for notice handling */
typedef struct
void *noticeRecArg;
PQnoticeProcessor noticeProc; /* notice message processor */
void *noticeProcArg;
-} PGNoticeHooks;
+} PGNoticeHooks;
struct pg_result
{
* PGresAttValue's */
int tupArrSize; /* allocated size of tuples array */
ExecStatusType resultStatus;
- char cmdStatus[CMDSTATUS_LEN]; /* cmd status from the
- * query */
+ char cmdStatus[CMDSTATUS_LEN]; /* cmd status from the query */
int binary; /* binary tuple values if binary == 1,
* otherwise text */
/*
- * These fields are copied from the originating PGconn, so that
- * operations on the PGresult don't have to reference the PGconn.
+ * These fields are copied from the originating PGconn, so that operations
+ * on the PGresult don't have to reference the PGconn.
*/
PGNoticeHooks noticeHooks;
int client_encoding; /* encoding id */
/*
* Error information (all NULL if not an error result). errMsg is the
- * "overall" error message returned by PQresultErrorMessage. If we
- * have per-field info then it is stored in a linked list.
+ * "overall" error message returned by PQresultErrorMessage. If we have
+ * per-field info then it is stored in a linked list.
*/
char *errMsg; /* error message, or NULL if no error */
PGMessageField *errFields; /* message broken into fields */
char null_field[1];
/*
- * Space management information. Note that attDescs and error stuff,
- * if not null, point into allocated blocks. But tuples points to a
+ * Space management information. Note that attDescs and error stuff, if
+ * not null, point into allocated blocks. But tuples points to a
* separately malloc'd block, so that we can realloc it.
*/
PGresult_data *curBlock; /* most recently allocated block */
PGASYNC_READY, /* result ready for PQgetResult */
PGASYNC_COPY_IN, /* Copy In data transfer in progress */
PGASYNC_COPY_OUT /* Copy Out data transfer in progress */
-} PGAsyncStatusType;
+} PGAsyncStatusType;
/* PGQueryClass tracks which query protocol we are now executing */
typedef enum
PGQUERY_SIMPLE, /* simple Query protocol (PQexec) */
PGQUERY_EXTENDED, /* full Extended protocol (PQexecParams) */
PGQUERY_PREPARE /* Parse only (PQprepare) */
-} PGQueryClass;
+} PGQueryClass;
/* PGSetenvStatusType defines the state of the PQSetenv state machine */
/* (this is used only for 2.0-protocol connections) */
SETENV_STATE_QUERY2_SEND, /* About to send a status query */
SETENV_STATE_QUERY2_WAIT, /* Waiting for query to complete */
SETENV_STATE_IDLE
-} PGSetenvStatusType;
+} PGSetenvStatusType;
/* Typedef for the EnvironmentOptions[] array */
typedef struct PQEnvironmentOption
{
const char *envName, /* name of an environment variable */
*pgName; /* name of corresponding SET variable */
-} PQEnvironmentOption;
+} PQEnvironmentOption;
/* Typedef for parameter-status list entries */
typedef struct pgParameterStatus
char *name; /* parameter name */
char *value; /* parameter value */
/* Note: name and value are stored in same malloc block as struct is */
-} pgParameterStatus;
+} pgParameterStatus;
/* large-object-access data ... allocated only if large-object code is used. */
typedef struct pgLobjfuncs
Oid fn_lo_tell; /* OID of backend function lo_tell */
Oid fn_lo_read; /* OID of backend function LOread */
Oid fn_lo_write; /* OID of backend function LOwrite */
-} PGlobjfuncs;
+} PGlobjfuncs;
/*
* PGconn stores all the state data associated with a single connection
struct pg_conn
{
/* Saved values of connection options */
- char *pghost; /* the machine on which the server is
- * running */
- char *pghostaddr; /* the IPv4 address of the machine on
- * which the server is running, in IPv4
- * numbers-and-dots notation. Takes
- * precedence over above. */
+ char *pghost; /* the machine on which the server is running */
+ char *pghostaddr; /* the IPv4 address of the machine on which
+ * the server is running, in IPv4
+ * numbers-and-dots notation. Takes precedence
+ * over above. */
char *pgport; /* the server's communication port */
- char *pgunixsocket; /* the Unix-domain socket that the server
- * is listening on; if NULL, uses a
- * default constructed from pgport */
+ char *pgunixsocket; /* the Unix-domain socket that the server is
+ * listening on; if NULL, uses a default
+ * constructed from pgport */
char *pgtty; /* tty on which the backend messages is
* displayed (OBSOLETE, NOT USED) */
char *connect_timeout; /* connection timeout (numeric string) */
char *pgpass;
char *sslmode; /* SSL mode (require,prefer,allow,disable) */
#ifdef KRB5
- char *krbsrvname; /* Kerberos service name */
+ char *krbsrvname; /* Kerberos service name */
#endif
/* Optional file to write trace info to */
PGTransactionStatusType xactStatus;
/* note: xactStatus never changes to ACTIVE */
PGQueryClass queryclass;
- bool nonblocking; /* whether this connection is using
- * nonblock sending semantics */
+ bool nonblocking; /* whether this connection is using nonblock
+ * sending semantics */
char copy_is_binary; /* 1 = copy binary, 0 = copy text */
- int copy_already_done; /* # bytes already returned in
- * COPY OUT */
+ int copy_already_done; /* # bytes already returned in COPY
+ * OUT */
PGnotify *notifyHead; /* oldest unreported Notify msg */
PGnotify *notifyTail; /* newest unreported Notify msg */
pgParameterStatus *pstatus; /* ParameterStatus data */
int client_encoding; /* encoding id */
PGVerbosity verbosity; /* error/notice message verbosity */
- PGlobjfuncs *lobjfuncs; /* private state for large-object access
- * fns */
+ PGlobjfuncs *lobjfuncs; /* private state for large-object access fns */
/* Buffer for data received from backend and not yet processed */
char *inBuffer; /* currently allocated buffer */
int inBufSize; /* allocated size of buffer */
- int inStart; /* offset to first unconsumed data in
- * buffer */
+ int inStart; /* offset to first unconsumed data in buffer */
int inCursor; /* next byte to tentatively consume */
- int inEnd; /* offset to first position after avail
- * data */
+ int inEnd; /* offset to first position after avail data */
/* Buffer for data not yet sent to backend */
char *outBuffer; /* currently allocated buffer */
int outCount; /* number of chars waiting in buffer */
/* State for constructing messages in outBuffer */
- int outMsgStart; /* offset to msg start (length word); if
- * -1, msg has no length word */
+ int outMsgStart; /* offset to msg start (length word); if -1,
+ * msg has no length word */
int outMsgEnd; /* offset to msg end (so far) */
/* Status for asynchronous result construction */
extern void pqSaveErrorResult(PGconn *conn);
extern PGresult *pqPrepareAsyncResult(PGconn *conn);
extern void
-pqInternalNotice(const PGNoticeHooks *hooks, const char *fmt, ...)
+pqInternalNotice(const PGNoticeHooks * hooks, const char *fmt,...)
/* This lets gcc check the format string for consistency. */
__attribute__((format(printf, 2, 3)));
-extern int pqAddTuple(PGresult *res, PGresAttValue *tup);
+extern int pqAddTuple(PGresult *res, PGresAttValue * tup);
extern void pqSaveMessageField(PGresult *res, char code,
const char *value);
extern void pqSaveParameterStatus(PGconn *conn, const char *name,
extern PostgresPollingStatusType pqSetenvPoll(PGconn *conn);
extern char *pqBuildStartupPacket2(PGconn *conn, int *packetlen,
- const PQEnvironmentOption *options);
+ const PQEnvironmentOption * options);
extern void pqParseInput2(PGconn *conn);
extern int pqGetCopyData2(PGconn *conn, char **buffer, int async);
extern int pqGetline2(PGconn *conn, char *s, int maxlen);
/* === in fe-protocol3.c === */
extern char *pqBuildStartupPacket3(PGconn *conn, int *packetlen,
- const PQEnvironmentOption *options);
+ const PQEnvironmentOption * options);
extern void pqParseInput3(PGconn *conn);
extern int pqGetErrorNotice3(PGconn *conn, bool isError);
extern int pqGetCopyData3(PGconn *conn, char **buffer, int async);
/* === in fe-misc.c === */
/*
- * "Get" and "Put" routines return 0 if successful, EOF if not. Note that
- * for Get, EOF merely means the buffer is exhausted, not that there is
+ * "Get" and "Put" routines return 0 if successful, EOF if not. Note that for
+ * Get, EOF merely means the buffer is exhausted, not that there is
* necessarily any error.
*/
extern int pqCheckOutBufferSpace(int bytes_needed, PGconn *conn);
#if defined(ENABLE_THREAD_SAFETY) && !defined(WIN32)
extern int pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending);
extern void pq_reset_sigpipe(sigset_t *osigset, bool sigpipe_pending,
- bool got_epipe);
+ bool got_epipe);
#endif
/*
extern char *
libpq_gettext(const char *msgid)
__attribute__((format_arg(1)));
-
#else
#define libpq_gettext(x) (x)
#endif
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/pqexpbuffer.c,v 1.20 2004/12/31 22:03:50 pgsql Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/pqexpbuffer.c,v 1.21 2005/10/15 02:49:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *newdata;
/*
- * Guard against ridiculous "needed" values, which can occur if we're
- * fed bogus data. Without this, we can get an overflow or infinite
- * loop in the following.
+ * Guard against ridiculous "needed" values, which can occur if we're fed
+ * bogus data. Without this, we can get an overflow or infinite loop in
+ * the following.
*/
if (needed >= ((size_t) INT_MAX - str->len))
return 0;
return 1; /* got enough space already */
/*
- * We don't want to allocate just a little more space with each
- * append; for efficiency, double the buffer size each time it
- * overflows. Actually, we might need to more than double it if
- * 'needed' is big...
+ * We don't want to allocate just a little more space with each append;
+ * for efficiency, double the buffer size each time it overflows.
+ * Actually, we might need to more than double it if 'needed' is big...
*/
newlen = (str->maxlen > 0) ? (2 * str->maxlen) : 64;
while (needed > newlen)
newlen = 2 * newlen;
/*
- * Clamp to INT_MAX in case we went past it. Note we are assuming
- * here that INT_MAX <= UINT_MAX/2, else the above loop could
- * overflow. We will still have newlen >= needed.
+ * Clamp to INT_MAX in case we went past it. Note we are assuming here
+ * that INT_MAX <= UINT_MAX/2, else the above loop could overflow. We
+ * will still have newlen >= needed.
*/
if (newlen > (size_t) INT_MAX)
newlen = (size_t) INT_MAX;
{
/*
* Try to format the given string into the available space; but if
- * there's hardly any space, don't bother trying, just fall
- * through to enlarge the buffer first.
+ * there's hardly any space, don't bother trying, just fall through to
+ * enlarge the buffer first.
*/
if (str->maxlen > str->len + 16)
{
{
/*
* Try to format the given string into the available space; but if
- * there's hardly any space, don't bother trying, just fall
- * through to enlarge the buffer first.
+ * there's hardly any space, don't bother trying, just fall through to
+ * enlarge the buffer first.
*/
if (str->maxlen > str->len + 16)
{
str->len += datalen;
/*
- * Keep a trailing null in place, even though it's probably useless
- * for binary data...
+ * Keep a trailing null in place, even though it's probably useless for
+ * binary data...
*/
str->data[str->len] = '\0';
}
typedef HANDLE pthread_mutex_t;
typedef int pthread_once_t;
-DWORD pthread_self();
+DWORD pthread_self();
-void pthread_setspecific(pthread_key_t, void*);
-void* pthread_getspecific(pthread_key_t);
+void pthread_setspecific(pthread_key_t, void *);
+void *pthread_getspecific(pthread_key_t);
-void pthread_mutex_init(pthread_mutex_t *, void *attr);
-void pthread_mutex_lock(pthread_mutex_t*); // blocking
-void pthread_mutex_unlock(pthread_mutex_t*);
+void pthread_mutex_init(pthread_mutex_t *, void *attr);
+void pthread_mutex_lock(pthread_mutex_t *);
+
+//blocking
+void pthread_mutex_unlock(pthread_mutex_t *);
#endif
dlls[i].handle = (void *) LoadLibraryEx(
dlls[i].dll_name,
0,
- LOAD_LIBRARY_AS_DATAFILE);
+ LOAD_LIBRARY_AS_DATAFILE);
}
if (dlls[i].dll_name && !dlls[i].handle)
success = 0 != FormatMessage(
flags,
dlls[i].handle, err,
- MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
strerrbuf, buflen - 64,
0
);
* ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.92 2005/08/24 19:06:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.93 2005/10/15 02:49:49 momjian Exp $
*
**********************************************************************/
bool lanpltrusted;
bool fn_retistuple; /* true, if function returns tuple */
bool fn_retisset; /* true, if function returns set */
- bool fn_retisarray; /* true if function returns array */
+ bool fn_retisarray; /* true if function returns array */
Oid result_oid; /* Oid of result type */
- FmgrInfo result_in_func; /* I/O function and arg for result type */
+ FmgrInfo result_in_func; /* I/O function and arg for result type */
Oid result_typioparam;
int nargs;
FmgrInfo arg_out_func[FUNC_MAX_ARGS];
bool arg_is_rowtype[FUNC_MAX_ARGS];
SV *reference;
-} plperl_proc_desc;
+} plperl_proc_desc;
/**********************************************************************
static void plperl_init_shared_libs(pTHX);
static HV *plperl_spi_execute_fetch_result(SPITupleTable *, int, int);
-void plperl_return_next(SV *);
+void plperl_return_next(SV *);
/*
* This routine is a crock, and so is everyplace that calls it. The problem
return;
DefineCustomBoolVariable(
- "plperl.use_strict",
- "If true, will compile trusted and untrusted perl code in strict mode",
- NULL,
- &plperl_use_strict,
- PGC_USERSET,
- NULL, NULL);
+ "plperl.use_strict",
+ "If true, will compile trusted and untrusted perl code in strict mode",
+ NULL,
+ &plperl_use_strict,
+ PGC_USERSET,
+ NULL, NULL);
EmitWarningsOnPlaceholders("plperl");
"$PLContainer->deny('require');" \
"sub ::mk_strict_safefunc {" \
" my $ret = $PLContainer->reval(qq[sub { BEGIN { strict->import(); } $_[0] $_[1] }]); " \
- " $@ =~ s/\\(eval \\d+\\) //g if $@; return $ret; }"
+ " $@ =~ s/\\(eval \\d+\\) //g if $@; return $ret; }"
#define SAFE_BAD \
"use vars qw($PLContainer); $PLContainer = new Safe('PLPerl');" \
static void
plperl_init_interp(void)
{
- static char *embedding[3] = {
+ static char *embedding[3] = {
"", "-e", PERLBOOT
};
* assume that floating-point comparisons are exact, so use a slightly
* smaller comparison value.
*/
- if (safe_version < 2.0899 )
+ if (safe_version < 2.0899)
{
/* not safe, so disallow all trusted funcs */
eval_pv(SAFE_BAD, FALSE);
static char *
strip_trailing_ws(const char *msg)
{
- char *res = pstrdup(msg);
- int len = strlen(res);
+ char *res = pstrdup(msg);
+ int len = strlen(res);
- while (len > 0 && isspace((unsigned char) res[len-1]))
+ while (len > 0 && isspace((unsigned char) res[len - 1]))
res[--len] = '\0';
return res;
}
/* Build a tuple from a hash. */
static HeapTuple
-plperl_build_tuple_result(HV *perlhash, AttInMetadata *attinmeta)
+plperl_build_tuple_result(HV * perlhash, AttInMetadata *attinmeta)
{
TupleDesc td = attinmeta->tupdesc;
char **values;
hv_iterinit(perlhash);
while ((val = hv_iternextsv(perlhash, &key, &klen)))
{
- int attn = SPI_fnumber(td, key);
+ int attn = SPI_fnumber(td, key);
if (attn <= 0 || td->attrs[attn - 1]->attisdropped)
ereport(ERROR,
/*
* convert perl array to postgres string representation
*/
-static SV*
-plperl_convert_to_pg_array(SV *src)
+static SV *
+plperl_convert_to_pg_array(SV * src)
{
- SV* rv;
- int count;
- dSP ;
+ SV *rv;
+ int count;
+
+ dSP;
- PUSHMARK(SP) ;
+ PUSHMARK(SP);
XPUSHs(src);
- PUTBACK ;
+ PUTBACK;
count = call_pv("::_plperl_to_pg_array", G_SCALAR);
- SPAGAIN ;
+ SPAGAIN;
if (count != 1)
elog(ERROR, "unexpected _plperl_to_pg_array failure");
rv = POPs;
-
- PUTBACK ;
- return rv;
+ PUTBACK;
+
+ return rv;
}
tupdesc = tdata->tg_relation->rd_att;
relid = DatumGetCString(
- DirectFunctionCall1(oidout,
- ObjectIdGetDatum(tdata->tg_relation->rd_id)
- )
- );
+ DirectFunctionCall1(oidout,
+ ObjectIdGetDatum(tdata->tg_relation->rd_id)
+ )
+ );
hv_store(hv, "name", 4, newSVpv(tdata->tg_trigger->tgname, 0), 0);
hv_store(hv, "relid", 5, newSVpv(relid, 0), 0);
if (tdata->tg_trigger->tgnargs > 0)
{
- AV *av = newAV();
- for (i=0; i < tdata->tg_trigger->tgnargs; i++)
+ AV *av = newAV();
+
+ for (i = 0; i < tdata->tg_trigger->tgnargs; i++)
av_push(av, newSVpv(tdata->tg_trigger->tgargs[i], 0));
- hv_store(hv, "args", 4, newRV_noinc((SV *)av), 0);
+ hv_store(hv, "args", 4, newRV_noinc((SV *) av), 0);
}
hv_store(hv, "relname", 7,
level = "UNKNOWN";
hv_store(hv, "level", 5, newSVpv(level, 0), 0);
- return newRV_noinc((SV*)hv);
+ return newRV_noinc((SV *) hv);
}
/* Set up the new tuple returned from a trigger. */
static HeapTuple
-plperl_modify_tuple(HV *hvTD, TriggerData *tdata, HeapTuple otup)
+plperl_modify_tuple(HV * hvTD, TriggerData *tdata, HeapTuple otup)
{
SV **svp;
HV *hvNew;
&typinput, &typioparam);
fmgr_info(typinput, &finfo);
modvalues[slotsused] = FunctionCall3(&finfo,
- CStringGetDatum(SvPV(val, PL_na)),
- ObjectIdGetDatum(typioparam),
+ CStringGetDatum(SvPV(val, PL_na)),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(tupdesc->attrs[attn - 1]->atttypmod));
modnulls[slotsused] = ' ';
}
Datum
plperl_call_handler(PG_FUNCTION_ARGS)
{
- Datum retval;
+ Datum retval;
plperl_proc_desc *save_prodesc;
FunctionCallInfo save_caller_info;
Tuplestorestate *save_tuple_store;
- TupleDesc save_tuple_desc;
+ TupleDesc save_tuple_desc;
plperl_init_all();
dSP;
SV *subref;
int count;
- char *compile_sub;
+ char *compile_sub;
if (trusted && !plperl_safe_init_done)
{
/*
* G_KEEPERR seems to be needed here, else we don't recognize compile
- * errors properly. Perhaps it's because there's another level of
- * eval inside mksafefunc?
+ * errors properly. Perhaps it's because there's another level of eval
+ * inside mksafefunc?
*/
if (trusted && plperl_use_strict)
*
**********************************************************************/
-EXTERN_C void boot_DynaLoader(pTHX_ CV *cv);
-EXTERN_C void boot_SPI(pTHX_ CV *cv);
+EXTERN_C void boot_DynaLoader(pTHX_ CV * cv);
+EXTERN_C void boot_SPI(pTHX_ CV * cv);
static void
plperl_init_shared_libs(pTHX)
static SV *
-plperl_call_perl_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo)
+plperl_call_perl_func(plperl_proc_desc * desc, FunctionCallInfo fcinfo)
{
dSP;
SV *retval;
int i;
int count;
- SV *sv;
+ SV *sv;
ENTER;
SAVETMPS;
PUSHMARK(SP);
- XPUSHs(&PL_sv_undef); /* no trigger data */
+ XPUSHs(&PL_sv_undef); /* no trigger data */
for (i = 0; i < desc->nargs; i++)
{
fcinfo->arg[i]));
sv = newSVpv(tmp, 0);
#if PERL_BCDVERSION >= 0x5006000L
- if (GetDatabaseEncoding() == PG_UTF8) SvUTF8_on(sv);
+ if (GetDatabaseEncoding() == PG_UTF8)
+ SvUTF8_on(sv);
#endif
XPUSHs(sv_2mortal(sv));
pfree(tmp);
static SV *
-plperl_call_perl_trigger_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo,
- SV *td)
+plperl_call_perl_trigger_func(plperl_proc_desc * desc, FunctionCallInfo fcinfo,
+ SV * td)
{
dSP;
SV *retval;
SV *perlret;
Datum retval;
ReturnSetInfo *rsi;
- SV* array_ret = NULL;
+ SV *array_ret = NULL;
if (SPI_connect() != SPI_OK_CONNECT)
elog(ERROR, "could not connect to SPI manager");
plperl_current_tuple_store = 0;
plperl_current_tuple_desc = 0;
- rsi = (ReturnSetInfo *)fcinfo->resultinfo;
+ rsi = (ReturnSetInfo *) fcinfo->resultinfo;
if (prodesc->fn_retisset)
{
{
/*
* If the Perl function returned an arrayref, we pretend that it
- * called return_next() for each element of the array, to handle
- * old SRFs that didn't know about return_next(). Any other sort
- * of return value is an error.
+ * called return_next() for each element of the array, to handle old
+ * SRFs that didn't know about return_next(). Any other sort of return
+ * value is an error.
*/
if (SvTYPE(perlret) == SVt_RV &&
SvTYPE(SvRV(perlret)) == SVt_PVAV)
{
- int i = 0;
- SV **svp = 0;
- AV *rav = (AV *)SvRV(perlret);
- while ((svp = av_fetch(rav, i, FALSE)) != NULL)
+ int i = 0;
+ SV **svp = 0;
+ AV *rav = (AV *) SvRV(perlret);
+
+ while ((svp = av_fetch(rav, i, FALSE)) != NULL)
{
plperl_return_next(*svp);
i++;
}
rsi->returnMode = SFRM_Materialize;
- if (plperl_current_tuple_store)
+ if (plperl_current_tuple_store)
{
rsi->setResult = plperl_current_tuple_store;
rsi->setDesc = plperl_current_tuple_desc;
}
- retval = (Datum)0;
+ retval = (Datum) 0;
}
else if (SvTYPE(perlret) == SVt_NULL)
{
if (rsi && IsA(rsi, ReturnSetInfo))
rsi->isDone = ExprEndResult;
fcinfo->isnull = true;
- retval = (Datum)0;
+ retval = (Datum) 0;
}
else if (prodesc->fn_retistuple)
{
/* Return a perl hash converted to a Datum */
- TupleDesc td;
+ TupleDesc td;
AttInMetadata *attinmeta;
- HeapTuple tup;
+ HeapTuple tup;
if (!SvOK(perlret) || SvTYPE(perlret) != SVt_RV ||
SvTYPE(SvRV(perlret)) != SVt_PVHV)
}
attinmeta = TupleDescGetAttInMetadata(td);
- tup = plperl_build_tuple_result((HV *)SvRV(perlret), attinmeta);
+ tup = plperl_build_tuple_result((HV *) SvRV(perlret), attinmeta);
retval = HeapTupleGetDatum(tup);
}
else
{
- /* Return a perl string converted to a Datum */
- char *val;
-
- if (prodesc->fn_retisarray && SvROK(perlret) &&
+ /* Return a perl string converted to a Datum */
+ char *val;
+
+ if (prodesc->fn_retisarray && SvROK(perlret) &&
SvTYPE(SvRV(perlret)) == SVt_PVAV)
- {
- array_ret = plperl_convert_to_pg_array(perlret);
- SvREFCNT_dec(perlret);
- perlret = array_ret;
- }
+ {
+ array_ret = plperl_convert_to_pg_array(perlret);
+ SvREFCNT_dec(perlret);
+ perlret = array_ret;
+ }
val = SvPV(perlret, PL_na);
}
if (array_ret == NULL)
- SvREFCNT_dec(perlret);
+ SvREFCNT_dec(perlret);
return retval;
}
else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
retval = (Datum) trigdata->tg_trigtuple;
else
- retval = (Datum) 0; /* can this happen? */
+ retval = (Datum) 0; /* can this happen? */
}
else
{
{
ereport(WARNING,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("ignoring modified tuple in DELETE trigger")));
+ errmsg("ignoring modified tuple in DELETE trigger")));
trv = NULL;
}
}
int proname_len;
plperl_proc_desc *prodesc = NULL;
int i;
- SV **svp;
+ SV **svp;
/* We'll need the pg_proc tuple in any case... */
procTup = SearchSysCache(PROCOID,
* function's pg_proc entry without changing its OID.
************************************************************/
uptodate = (prodesc->fn_xmin == HeapTupleHeaderGetXmin(procTup->t_data) &&
- prodesc->fn_cmin == HeapTupleHeaderGetCmin(procTup->t_data));
+ prodesc->fn_cmin == HeapTupleHeaderGetCmin(procTup->t_data));
if (!uptodate)
{
if (!is_trigger)
{
typeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(procStruct->prorettype),
+ ObjectIdGetDatum(procStruct->prorettype),
0, 0, 0);
if (!HeapTupleIsValid(typeTup))
{
free(prodesc);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("plperl functions cannot return type %s",
- format_type_be(procStruct->prorettype))));
+ errmsg("plperl functions cannot return type %s",
+ format_type_be(procStruct->prorettype))));
}
}
prodesc->fn_retistuple = (typeStruct->typtype == 'c' ||
procStruct->prorettype == RECORDOID);
- prodesc->fn_retisarray =
- (typeStruct->typlen == -1 && typeStruct->typelem) ;
+ prodesc->fn_retisarray =
+ (typeStruct->typlen == -1 && typeStruct->typelem);
perm_fmgr_info(typeStruct->typinput, &(prodesc->result_in_func));
prodesc->result_typioparam = getTypeIOParam(typeTup);
for (i = 0; i < prodesc->nargs; i++)
{
typeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(procStruct->proargtypes.values[i]),
+ ObjectIdGetDatum(procStruct->proargtypes.values[i]),
0, 0, 0);
if (!HeapTupleIsValid(typeTup))
{
free(prodesc);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("plperl functions cannot take type %s",
- format_type_be(procStruct->proargtypes.values[i]))));
+ errmsg("plperl functions cannot take type %s",
+ format_type_be(procStruct->proargtypes.values[i]))));
}
if (typeStruct->typtype == 'c')
************************************************************/
prodesc->reference = plperl_create_sub(proc_source, prodesc->lanpltrusted);
pfree(proc_source);
- if (!prodesc->reference) /* can this happen? */
+ if (!prodesc->reference) /* can this happen? */
{
free(prodesc->proname);
free(prodesc);
Oid typoutput;
bool typisvarlena;
int namelen;
- SV *sv;
+ SV *sv;
if (tupdesc->attrs[i]->attisdropped)
continue;
namelen = strlen(attname);
attr = heap_getattr(tuple, i + 1, tupdesc, &isnull);
- if (isnull) {
+ if (isnull)
+ {
/* Store (attname => undef) and move on. */
hv_store(hv, attname, namelen, newSV(0), 0);
continue;
HV *ret_hv;
/*
- * Execute the query inside a sub-transaction, so we can cope with
- * errors sanely
+ * Execute the query inside a sub-transaction, so we can cope with errors
+ * sanely
*/
MemoryContext oldcontext = CurrentMemoryContext;
ResourceOwner oldowner = CurrentResourceOwner;
ReleaseCurrentSubTransaction();
MemoryContextSwitchTo(oldcontext);
CurrentResourceOwner = oldowner;
+
/*
- * AtEOSubXact_SPI() should not have popped any SPI context,
- * but just in case it did, make sure we remain connected.
+ * AtEOSubXact_SPI() should not have popped any SPI context, but just
+ * in case it did, make sure we remain connected.
*/
SPI_restore_connection();
}
CurrentResourceOwner = oldowner;
/*
- * If AtEOSubXact_SPI() popped any SPI context of the subxact,
- * it will have left us in a disconnected state. We need this
- * hack to return to connected state.
+ * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will
+ * have left us in a disconnected state. We need this hack to return
+ * to connected state.
*/
SPI_restore_connection();
void
-plperl_return_next(SV *sv)
+plperl_return_next(SV * sv)
{
plperl_proc_desc *prodesc = plperl_current_prodesc;
FunctionCallInfo fcinfo = plperl_current_caller_info;
- ReturnSetInfo *rsi = (ReturnSetInfo *)fcinfo->resultinfo;
+ ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
MemoryContext cxt;
- HeapTuple tuple;
- TupleDesc tupdesc;
+ HeapTuple tuple;
+ TupleDesc tupdesc;
if (!sv)
return;
cxt = MemoryContextSwitchTo(rsi->econtext->ecxt_per_query_memory);
if (!plperl_current_tuple_store)
- plperl_current_tuple_store =
+ plperl_current_tuple_store =
tuplestore_begin_heap(true, false, work_mem);
if (prodesc->fn_retistuple)
rettype = get_call_result_type(fcinfo, NULL, &tupdesc);
tupdesc = CreateTupleDescCopy(tupdesc);
attinmeta = TupleDescGetAttInMetadata(tupdesc);
- tuple = plperl_build_tuple_result((HV *)SvRV(sv), attinmeta);
+ tuple = plperl_build_tuple_result((HV *) SvRV(sv), attinmeta);
}
else
{
- Datum ret;
- bool isNull;
+ Datum ret;
+ bool isNull;
tupdesc = CreateTupleDescCopy(rsi->expectedDesc);
if (SvOK(sv) && SvTYPE(sv) != SVt_NULL)
{
- char *val = SvPV(sv, PL_na);
+ char *val = SvPV(sv, PL_na);
+
ret = FunctionCall3(&prodesc->result_in_func,
PointerGetDatum(val),
ObjectIdGetDatum(prodesc->result_typioparam),
Int32GetDatum(-1));
isNull = false;
}
- else {
- ret = (Datum)0;
+ else
+ {
+ ret = (Datum) 0;
isNull = true;
}
SV *
plperl_spi_query(char *query)
{
- SV *cursor;
+ SV *cursor;
MemoryContext oldcontext = CurrentMemoryContext;
ResourceOwner oldowner = CurrentResourceOwner;
PG_TRY();
{
- void *plan;
- Portal portal = NULL;
+ void *plan;
+ Portal portal = NULL;
plan = SPI_prepare(query, 0, NULL);
if (plan)
SV *
plperl_spi_fetchrow(char *cursor)
{
- SV *row = newSV(0);
- Portal p = SPI_cursor_find(cursor);
+ SV *row = newSV(0);
+ Portal p = SPI_cursor_find(cursor);
if (!p)
return row;
SPI_cursor_fetch(p, true, 1);
- if (SPI_processed == 0) {
+ if (SPI_processed == 0)
+ {
SPI_cursor_close(p);
return row;
}
#else
#if defined(USE_THREADS)
static SV *
-newRV_noinc(SV *sv)
+newRV_noinc(SV * sv)
{
SV *nsv = (SV *) newRV(sv);
SvREFCNT_dec(sv);
return nsv;
}
-
#else
#define newRV_noinc(sv) \
(PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
#if defined(NEED_newCONSTSUB)
static
#else
-extern void newCONSTSUB(HV *stash, char *name, SV *sv);
+extern void newCONSTSUB(HV * stash, char *name, SV * sv);
#endif
#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
#endif
newSVOP(OP_CONST, 0, newSVpv(name, 0)),
- newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == ""
- * -- GMB */
+ newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == "" --
+ * GMB */
newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
);
#define aMY_CXT my_cxtp
#define aMY_CXT_ aMY_CXT,
#define _aMY_CXT ,aMY_CXT
-
#else /* single interpreter */
#define START_MY_CXT static my_cxt_t my_cxt;
((SvFLAGS(sv) & (SVf_POK|SVf_UTF8)) == (SVf_POK) \
? ((lp = SvCUR(sv)), SvPVX(sv)) : my_sv_2pvbyte(aTHX_ sv, &lp))
static char *
-my_sv_2pvbyte(pTHX_ register SV *sv, STRLEN *lp)
+my_sv_2pvbyte(pTHX_ register SV * sv, STRLEN * lp)
{
sv_utf8_downgrade(sv, 0);
return SvPV(sv, *lp);
((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \
? SvPVX(sv) : sv_2pv_nolen(sv))
static char *
-sv_2pv_nolen(pTHX_ register SV *sv)
+sv_2pv_nolen(pTHX_ register SV * sv)
{
STRLEN n_a;
/* this is actually in plperl.c */
HV *plperl_spi_exec(char *, int);
-void plperl_return_next(SV *);
-SV *plperl_spi_query(char *);
-SV *plperl_spi_fetchrow(char *);
+void plperl_return_next(SV *);
+SV *plperl_spi_query(char *);
+SV *plperl_spi_fetchrow(char *);
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.93 2005/09/24 22:54:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.94 2005/10/15 02:49:49 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
{
PLpgSQL_func_hashkey key;
PLpgSQL_function *function;
-} plpgsql_HashEnt;
+} plpgsql_HashEnt;
#define FUNCS_PER_USER 128 /* initial table size */
{
const char *label;
int sqlerrstate;
-} ExceptionLabelMap;
+} ExceptionLabelMap;
static const ExceptionLabelMap exception_label_map[] = {
#include "plerrcodes.h"
*/
static PLpgSQL_function *do_compile(FunctionCallInfo fcinfo,
HeapTuple procTup,
- PLpgSQL_func_hashkey *hashkey,
+ PLpgSQL_func_hashkey * hashkey,
bool forValidator);
-static int fetchArgInfo(HeapTuple procTup,
- Oid **p_argtypes, char ***p_argnames,
- char **p_argmodes);
+static int fetchArgInfo(HeapTuple procTup,
+ Oid **p_argtypes, char ***p_argnames,
+ char **p_argmodes);
static PLpgSQL_row *build_row_from_class(Oid classOid);
-static PLpgSQL_row *build_row_from_vars(PLpgSQL_variable **vars, int numvars);
+static PLpgSQL_row *build_row_from_vars(PLpgSQL_variable ** vars, int numvars);
static PLpgSQL_type *build_datatype(HeapTuple typeTup, int32 typmod);
static void compute_function_hashkey(FunctionCallInfo fcinfo,
Form_pg_proc procStruct,
- PLpgSQL_func_hashkey *hashkey,
+ PLpgSQL_func_hashkey * hashkey,
bool forValidator);
static void plpgsql_resolve_polymorphic_argtypes(int numargs,
Oid *argtypes, char *argmodes,
Node *call_expr, bool forValidator,
const char *proname);
-static PLpgSQL_function *plpgsql_HashTableLookup(PLpgSQL_func_hashkey *func_key);
-static void plpgsql_HashTableInsert(PLpgSQL_function *function,
- PLpgSQL_func_hashkey *func_key);
-static void plpgsql_HashTableDelete(PLpgSQL_function *function);
-static void delete_function(PLpgSQL_function *func);
+static PLpgSQL_function *plpgsql_HashTableLookup(PLpgSQL_func_hashkey * func_key);
+static void plpgsql_HashTableInsert(PLpgSQL_function * function,
+ PLpgSQL_func_hashkey * func_key);
+static void plpgsql_HashTableDelete(PLpgSQL_function * function);
+static void delete_function(PLpgSQL_function * func);
/* ----------
* plpgsql_compile Make an execution tree for a PL/pgSQL function.
procStruct = (Form_pg_proc) GETSTRUCT(procTup);
/*
- * See if there's already a cache entry for the current FmgrInfo. If
- * not, try to find one in the hash table.
+ * See if there's already a cache entry for the current FmgrInfo. If not,
+ * try to find one in the hash table.
*/
function = (PLpgSQL_function *) fcinfo->flinfo->fn_extra;
{
/* We have a compiled function, but is it still valid? */
if (!(function->fn_xmin == HeapTupleHeaderGetXmin(procTup->t_data) &&
- function->fn_cmin == HeapTupleHeaderGetCmin(procTup->t_data)))
+ function->fn_cmin == HeapTupleHeaderGetCmin(procTup->t_data)))
{
/* Nope, drop the function and associated storage */
delete_function(function);
}
/*
- * If the function wasn't found or was out-of-date, we have to compile
- * it
+ * If the function wasn't found or was out-of-date, we have to compile it
*/
if (!function)
{
/*
- * Calculate hashkey if we didn't already; we'll need it to store
- * the completed function.
+ * Calculate hashkey if we didn't already; we'll need it to store the
+ * completed function.
*/
if (!hashkey_valid)
compute_function_hashkey(fcinfo, procStruct, &hashkey,
static PLpgSQL_function *
do_compile(FunctionCallInfo fcinfo,
HeapTuple procTup,
- PLpgSQL_func_hashkey *hashkey,
+ PLpgSQL_func_hashkey * hashkey,
bool forValidator)
{
Form_pg_proc procStruct = (Form_pg_proc) GETSTRUCT(procTup);
MemoryContext func_cxt;
/*
- * Setup the scanner input and error info. We assume that this
- * function cannot be invoked recursively, so there's no need to save
- * and restore the static variables used here.
+ * Setup the scanner input and error info. We assume that this function
+ * cannot be invoked recursively, so there's no need to save and restore
+ * the static variables used here.
*/
prosrcdatum = SysCacheGetAttr(PROCOID, procTup,
Anum_pg_proc_prosrc, &isnull);
datums_last = 0;
/*
- * Do extra syntax checks when validating the function
- * definition. We skip this when actually compiling functions for
- * execution, for performance reasons.
+ * Do extra syntax checks when validating the function definition. We skip
+ * this when actually compiling functions for execution, for performance
+ * reasons.
*/
plpgsql_check_syntax = forValidator;
/*
- * Create the new function node. We allocate the function and all
- * of its compile-time storage (e.g. parse tree) in its own memory
- * context. This allows us to reclaim the function's storage
- * cleanly.
+ * Create the new function node. We allocate the function and all of its
+ * compile-time storage (e.g. parse tree) in its own memory context. This
+ * allows us to reclaim the function's storage cleanly.
*/
func_cxt = AllocSetContextCreate(TopMemoryContext,
"PL/PgSQL function context",
function->fn_cmin = HeapTupleHeaderGetCmin(procTup->t_data);
function->fn_functype = functype;
function->fn_cxt = func_cxt;
- function->out_param_varno = -1; /* set up for no OUT param */
+ function->out_param_varno = -1; /* set up for no OUT param */
switch (functype)
{
case T_FUNCTION:
+
/*
- * Fetch info about the procedure's parameters. Allocations
- * aren't needed permanently, so make them in tmp cxt.
+ * Fetch info about the procedure's parameters. Allocations aren't
+ * needed permanently, so make them in tmp cxt.
*
- * We also need to resolve any polymorphic input or output
- * argument types. In validation mode we won't be able to,
- * so we arbitrarily assume we are dealing with integers.
+ * We also need to resolve any polymorphic input or output argument
+ * types. In validation mode we won't be able to, so we
+ * arbitrarily assume we are dealing with integers.
*/
MemoryContextSwitchTo(compile_tmp_cxt);
argdtype->ttype != PLPGSQL_TTYPE_ROW)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("plpgsql functions cannot take type %s",
- format_type_be(argtypeid))));
+ errmsg("plpgsql functions cannot take type %s",
+ format_type_be(argtypeid))));
/* Build variable and add to datum list */
argvariable = plpgsql_build_variable(buf, 0,
/*
* If there's just one OUT parameter, out_param_varno points
- * directly to it. If there's more than one, build a row
- * that holds all of them.
+ * directly to it. If there's more than one, build a row that
+ * holds all of them.
*/
if (num_out_args == 1)
function->out_param_varno = out_arg_variables[0]->dno;
}
/*
- * Check for a polymorphic returntype. If found, use the
- * actual returntype type from the caller's FuncExpr node, if
- * we have one. (In validation mode we arbitrarily assume we
- * are dealing with integers.)
+ * Check for a polymorphic returntype. If found, use the actual
+ * returntype type from the caller's FuncExpr node, if we have
+ * one. (In validation mode we arbitrarily assume we are dealing
+ * with integers.)
*
- * Note: errcode is FEATURE_NOT_SUPPORTED because it should
- * always work; if it doesn't we're in some context that fails
- * to make the info available.
+ * Note: errcode is FEATURE_NOT_SUPPORTED because it should always
+ * work; if it doesn't we're in some context that fails to make
+ * the info available.
*/
rettypeid = procStruct->prorettype;
if (rettypeid == ANYARRAYOID || rettypeid == ANYELEMENTOID)
if (!OidIsValid(rettypeid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("could not determine actual return type "
- "for polymorphic function \"%s\"",
- plpgsql_error_funcname)));
+ errmsg("could not determine actual return type "
+ "for polymorphic function \"%s\"",
+ plpgsql_error_funcname)));
}
}
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("plpgsql functions cannot return type %s",
- format_type_be(rettypeid))));
+ errmsg("plpgsql functions cannot return type %s",
+ format_type_be(rettypeid))));
}
if (typeStruct->typrelid != InvalidOid ||
num_out_args == 0)
{
(void) plpgsql_build_variable("$0", 0,
- build_datatype(typeTup, -1),
+ build_datatype(typeTup, -1),
true);
}
}
if (procStruct->pronargs != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("trigger functions cannot have declared arguments"),
+ errmsg("trigger functions cannot have declared arguments"),
errhint("You probably want to use TG_NARGS and TG_ARGV instead.")));
/* Add the record for referencing NEW */
/* Add the variable tg_name */
var = plpgsql_build_variable("tg_name", 0,
- plpgsql_build_datatype(NAMEOID, -1),
+ plpgsql_build_datatype(NAMEOID, -1),
true);
function->tg_name_varno = var->dno;
/* Add the variable tg_when */
var = plpgsql_build_variable("tg_when", 0,
- plpgsql_build_datatype(TEXTOID, -1),
+ plpgsql_build_datatype(TEXTOID, -1),
true);
function->tg_when_varno = var->dno;
/* Add the variable tg_level */
var = plpgsql_build_variable("tg_level", 0,
- plpgsql_build_datatype(TEXTOID, -1),
+ plpgsql_build_datatype(TEXTOID, -1),
true);
function->tg_level_varno = var->dno;
/* Add the variable tg_op */
var = plpgsql_build_variable("tg_op", 0,
- plpgsql_build_datatype(TEXTOID, -1),
+ plpgsql_build_datatype(TEXTOID, -1),
true);
function->tg_op_varno = var->dno;
/* Add the variable tg_relid */
var = plpgsql_build_variable("tg_relid", 0,
- plpgsql_build_datatype(OIDOID, -1),
+ plpgsql_build_datatype(OIDOID, -1),
true);
function->tg_relid_varno = var->dno;
/* Add the variable tg_relname */
var = plpgsql_build_variable("tg_relname", 0,
- plpgsql_build_datatype(NAMEOID, -1),
+ plpgsql_build_datatype(NAMEOID, -1),
true);
function->tg_relname_varno = var->dno;
/* Add the variable tg_nargs */
var = plpgsql_build_variable("tg_nargs", 0,
- plpgsql_build_datatype(INT4OID, -1),
+ plpgsql_build_datatype(INT4OID, -1),
true);
function->tg_nargs_varno = var->dno;
/*
* If it has OUT parameters or returns VOID or returns a set, we allow
- * control to fall off the end without an explicit RETURN statement.
- * The easiest way to implement this is to add a RETURN statement to the
- * end of the statement list during parsing. However, if the outer block
- * has an EXCEPTION clause, we need to make a new outer block, since the
- * added RETURN shouldn't act like it is inside the EXCEPTION clause.
+ * control to fall off the end without an explicit RETURN statement. The
+ * easiest way to implement this is to add a RETURN statement to the end
+ * of the statement list during parsing. However, if the outer block has
+ * an EXCEPTION clause, we need to make a new outer block, since the added
+ * RETURN shouldn't act like it is inside the EXCEPTION clause.
*/
if (num_out_args > 0 || function->fn_rettype == VOIDOID ||
function->fn_retset)
PLpgSQL_stmt_block *new;
new = palloc0(sizeof(PLpgSQL_stmt_block));
- new->cmd_type = PLPGSQL_STMT_BLOCK;
- new->body = list_make1(function->action);
+ new->cmd_type = PLPGSQL_STMT_BLOCK;
+ new->body = list_make1(function->action);
function->action = new;
}
if (arg)
{
/*
- * Try to convert syntax error position to reference text of
- * original CREATE FUNCTION command.
+ * Try to convert syntax error position to reference text of original
+ * CREATE FUNCTION command.
*/
if (function_parse_error_transpose((const char *) arg))
return;
/*
- * Done if a syntax error position was reported; otherwise we have
- * to fall back to a "near line N" report.
+ * Done if a syntax error position was reported; otherwise we have to
+ * fall back to a "near line N" report.
*/
}
* deconstruct_array() since the array data is just going to look like
* a C array of values.
*/
- arr = DatumGetArrayTypeP(proallargtypes); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(proallargtypes); /* ensure not toasted */
numargs = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numargs < 0 ||
deconstruct_array(DatumGetArrayTypeP(proargnames),
TEXTOID, -1, false, 'i',
&elems, &nelems);
- if (nelems != numargs) /* should not happen */
+ if (nelems != numargs) /* should not happen */
elog(ERROR, "proargnames must have the same number of elements as the function has arguments");
*p_argnames = (char **) palloc(sizeof(char *) * numargs);
for (i = 0; i < numargs; i++)
{
if (strcmp(cp[0], "tg_argv") == 0)
{
- bool save_spacescanned = plpgsql_SpaceScanned;
+ bool save_spacescanned = plpgsql_SpaceScanned;
PLpgSQL_trigarg *trigarg;
trigarg = palloc0(sizeof(PLpgSQL_trigarg));
}
/*
- * Nothing found - up to now it's a word without any special meaning
- * for us.
+ * Nothing found - up to now it's a word without any special meaning for
+ * us.
*/
pfree(cp[0]);
return T_WORD;
case PLPGSQL_NSTYPE_REC:
{
/*
- * First word is a record name, so second word must be a
- * field in this record.
+ * First word is a record name, so second word must be a field
+ * in this record.
*/
PLpgSQL_recfield *new;
case PLPGSQL_NSTYPE_ROW:
{
/*
- * First word is a row name, so second word must be a
- * field in this row.
+ * First word is a row name, so second word must be a field in
+ * this row.
*/
PLpgSQL_row *row;
int i;
case PLPGSQL_NSTYPE_REC:
{
/*
- * This word is a record name, so third word must be a
- * field in this record.
+ * This word is a record name, so third word must be a field
+ * in this record.
*/
PLpgSQL_recfield *new;
case PLPGSQL_NSTYPE_ROW:
{
/*
- * This word is a row name, so third word must be a field
- * in this row.
+ * This word is a row name, so third word must be a field in
+ * this row.
*/
PLpgSQL_row *row;
int i;
pfree(cp[1]);
/*
- * Do a lookup on the compiler's namestack. But ensure it moves up to
- * the toplevel.
+ * Do a lookup on the compiler's namestack. But ensure it moves up to the
+ * toplevel.
*/
old_nsstate = plpgsql_ns_setlocal(false);
nse = plpgsql_ns_lookup(cp[0], NULL);
}
/*
- * Word wasn't found on the namestack. Try to find a data type with
- * that name, but ignore pg_type entries that are in fact class types.
+ * Word wasn't found on the namestack. Try to find a data type with that
+ * name, but ignore pg_type entries that are in fact class types.
*/
typeOid = LookupTypeName(makeTypeName(cp[0]));
if (OidIsValid(typeOid))
}
/*
- * Nothing found - up to now it's a word without any special meaning
- * for us.
+ * Nothing found - up to now it's a word without any special meaning for
+ * us.
*/
pfree(cp[0]);
return T_ERROR;
nse = plpgsql_ns_lookup(cp[0], NULL);
/*
- * If this is a label lookup the second word in that label's
- * namestack level
+ * If this is a label lookup the second word in that label's namestack
+ * level
*/
if (nse != NULL)
{
elog(ERROR, "cache lookup failed for type %u", attrStruct->atttypid);
/*
- * Found that - build a compiler type struct in the caller's cxt
- * and return it
+ * Found that - build a compiler type struct in the caller's cxt and
+ * return it
*/
MemoryContextSwitchTo(oldCxt);
plpgsql_yylval.dtype = build_datatype(typetup, attrStruct->atttypmod);
int i;
RangeVar *relvar;
MemoryContext oldCxt;
- int result = T_ERROR;
+ int result = T_ERROR;
/* Avoid memory leaks in the long-term function context */
oldCxt = MemoryContextSwitchTo(compile_tmp_cxt);
cp[1][qualified_att_len - i - 1] = '\0';
relvar = makeRangeVarFromNameList(stringToQualifiedNameList(cp[0],
- "plpgsql_parse_tripwordtype"));
+ "plpgsql_parse_tripwordtype"));
classOid = RangeVarGetRelid(relvar, true);
if (!OidIsValid(classOid))
goto done;
elog(ERROR, "cache lookup failed for type %u", attrStruct->atttypid);
/*
- * Found that - build a compiler type struct in the caller's cxt
- * and return it
+ * Found that - build a compiler type struct in the caller's cxt and
+ * return it
*/
MemoryContextSwitchTo(oldCxt);
plpgsql_yylval.dtype = build_datatype(typetup, attrStruct->atttypmod);
*
* The returned struct may be a PLpgSQL_var, PLpgSQL_row, or
* PLpgSQL_rec depending on the given datatype, and is allocated via
- * palloc. The struct is automatically added to the current datum
+ * palloc. The struct is automatically added to the current datum
* array, and optionally to the current namespace.
*/
PLpgSQL_variable *
-plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type *dtype,
+plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type * dtype,
bool add2namespace)
{
PLpgSQL_variable *result;
case PLPGSQL_TTYPE_REC:
{
/*
- * "record" type -- build a variable-contents record
- * variable
+ * "record" type -- build a variable-contents record variable
*/
PLpgSQL_rec *rec;
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("variable \"%s\" has pseudo-type %s",
refname, format_type_be(dtype->typoid))));
- result = NULL; /* keep compiler quiet */
+ result = NULL; /* keep compiler quiet */
break;
default:
elog(ERROR, "unrecognized ttype: %d", dtype->ttype);
errmsg("relation \"%s\" is not a table", relname)));
/*
- * Create a row datum entry and all the required variables that it
- * will point to.
+ * Create a row datum entry and all the required variables that it will
+ * point to.
*/
row = palloc0(sizeof(PLpgSQL_row));
row->dtype = PLPGSQL_DTYPE_ROW;
/*
* Create the internal variable for the field
*
- * We know if the table definitions contain a default value or if
- * the field is declared in the table as NOT NULL. But it's
- * possible to create a table field as NOT NULL without a
- * default value and that would lead to problems later when
- * initializing the variables due to entering a block at
- * execution time. Thus we ignore this information for now.
+ * We know if the table definitions contain a default value or if the
+ * field is declared in the table as NOT NULL. But it's possible
+ * to create a table field as NOT NULL without a default value and
+ * that would lead to problems later when initializing the
+ * variables due to entering a block at execution time. Thus we
+ * ignore this information for now.
*/
var = plpgsql_build_variable(refname, 0,
- plpgsql_build_datatype(attrStruct->atttypid,
- attrStruct->atttypmod),
+ plpgsql_build_datatype(attrStruct->atttypid,
+ attrStruct->atttypmod),
false);
/* Add the variable to the row */
* Build a row-variable data structure given the component variables.
*/
static PLpgSQL_row *
-build_row_from_vars(PLpgSQL_variable **vars, int numvars)
+build_row_from_vars(PLpgSQL_variable ** vars, int numvars)
{
PLpgSQL_row *row;
int i;
for (i = 0; i < numvars; i++)
{
PLpgSQL_variable *var = vars[i];
- Oid typoid = RECORDOID;
- int32 typmod = -1;
+ Oid typoid = RECORDOID;
+ int32 typmod = -1;
switch (var->dtype)
{
row->fieldnames[i] = var->refname;
row->varnos[i] = var->dno;
- TupleDescInitEntry(row->rowtupdesc, i+1,
+ TupleDescInitEntry(row->rowtupdesc, i + 1,
var->refname,
typoid, typmod,
0);
PLpgSQL_condition *prev;
/*
- * XXX Eventually we will want to look for user-defined exception
- * names here.
+ * XXX Eventually we will want to look for user-defined exception names
+ * here.
*/
/*
* ----------
*/
void
-plpgsql_adddatum(PLpgSQL_datum *new)
+plpgsql_adddatum(PLpgSQL_datum * new)
{
if (plpgsql_nDatums == datums_alloc)
{
static void
compute_function_hashkey(FunctionCallInfo fcinfo,
Form_pg_proc procStruct,
- PLpgSQL_func_hashkey *hashkey,
+ PLpgSQL_func_hashkey * hashkey,
bool forValidator)
{
/* Make sure any unused bytes of the struct are zero */
hashkey->funcOid = fcinfo->flinfo->fn_oid;
/*
- * if trigger, get relation OID. In validation mode we do not know
- * what relation is intended to be used, so we leave trigrelOid zero;
- * the hash entry built in this case will never really be used.
+ * if trigger, get relation OID. In validation mode we do not know what
+ * relation is intended to be used, so we leave trigrelOid zero; the hash
+ * entry built in this case will never really be used.
*/
if (CALLED_AS_TRIGGER(fcinfo) && !forValidator)
{
}
static void
-delete_function(PLpgSQL_function *func)
+delete_function(PLpgSQL_function * func)
{
/* remove function from hash table */
plpgsql_HashTableDelete(func);
MemoryContextDelete(func->fn_cxt);
/*
- * Caller should be sure not to use passed-in pointer, as it now
- * points to pfree'd storage
+ * Caller should be sure not to use passed-in pointer, as it now points to
+ * pfree'd storage
*/
}
}
static PLpgSQL_function *
-plpgsql_HashTableLookup(PLpgSQL_func_hashkey *func_key)
+plpgsql_HashTableLookup(PLpgSQL_func_hashkey * func_key)
{
plpgsql_HashEnt *hentry;
}
static void
-plpgsql_HashTableInsert(PLpgSQL_function *function,
- PLpgSQL_func_hashkey *func_key)
+plpgsql_HashTableInsert(PLpgSQL_function * function,
+ PLpgSQL_func_hashkey * func_key)
{
plpgsql_HashEnt *hentry;
bool found;
}
static void
-plpgsql_HashTableDelete(PLpgSQL_function *function)
+plpgsql_HashTableDelete(PLpgSQL_function * function)
{
plpgsql_HashEnt *hentry;
hentry = (plpgsql_HashEnt *) hash_search(plpgsql_HashTable,
- (void *) function->fn_hashkey,
+ (void *) function->fn_hashkey,
HASH_REMOVE,
NULL);
if (hentry == NULL)
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.152 2005/09/13 16:16:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.153 2005/10/15 02:49:49 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
* Local function forward declarations
************************************************************/
static void plpgsql_exec_error_callback(void *arg);
-static PLpgSQL_datum *copy_plpgsql_datum(PLpgSQL_datum *datum);
-
-static int exec_stmt_block(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_block *block);
-static int exec_stmts(PLpgSQL_execstate *estate,
- List *stmts);
-static int exec_stmt(PLpgSQL_execstate *estate,
- PLpgSQL_stmt *stmt);
-static int exec_stmt_assign(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_assign *stmt);
-static int exec_stmt_perform(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_perform *stmt);
-static int exec_stmt_getdiag(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_getdiag *stmt);
-static int exec_stmt_if(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_if *stmt);
-static int exec_stmt_loop(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_loop *stmt);
-static int exec_stmt_while(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_while *stmt);
-static int exec_stmt_fori(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_fori *stmt);
-static int exec_stmt_fors(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_fors *stmt);
-static int exec_stmt_select(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_select *stmt);
-static int exec_stmt_open(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_open *stmt);
-static int exec_stmt_fetch(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_fetch *stmt);
-static int exec_stmt_close(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_close *stmt);
-static int exec_stmt_exit(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_exit *stmt);
-static int exec_stmt_return(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_return *stmt);
-static int exec_stmt_return_next(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_return_next *stmt);
-static int exec_stmt_raise(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_raise *stmt);
-static int exec_stmt_execsql(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_execsql *stmt);
-static int exec_stmt_dynexecute(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_dynexecute *stmt);
-static int exec_stmt_dynfors(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_dynfors *stmt);
-
-static void plpgsql_estate_setup(PLpgSQL_execstate *estate,
- PLpgSQL_function *func,
+static PLpgSQL_datum *copy_plpgsql_datum(PLpgSQL_datum * datum);
+
+static int exec_stmt_block(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_block * block);
+static int exec_stmts(PLpgSQL_execstate * estate,
+ List *stmts);
+static int exec_stmt(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt * stmt);
+static int exec_stmt_assign(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_assign * stmt);
+static int exec_stmt_perform(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_perform * stmt);
+static int exec_stmt_getdiag(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_getdiag * stmt);
+static int exec_stmt_if(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_if * stmt);
+static int exec_stmt_loop(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_loop * stmt);
+static int exec_stmt_while(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_while * stmt);
+static int exec_stmt_fori(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_fori * stmt);
+static int exec_stmt_fors(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_fors * stmt);
+static int exec_stmt_select(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_select * stmt);
+static int exec_stmt_open(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_open * stmt);
+static int exec_stmt_fetch(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_fetch * stmt);
+static int exec_stmt_close(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_close * stmt);
+static int exec_stmt_exit(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_exit * stmt);
+static int exec_stmt_return(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_return * stmt);
+static int exec_stmt_return_next(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_return_next * stmt);
+static int exec_stmt_raise(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_raise * stmt);
+static int exec_stmt_execsql(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_execsql * stmt);
+static int exec_stmt_dynexecute(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_dynexecute * stmt);
+static int exec_stmt_dynfors(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_dynfors * stmt);
+
+static void plpgsql_estate_setup(PLpgSQL_execstate * estate,
+ PLpgSQL_function * func,
ReturnSetInfo *rsi);
-static void exec_eval_cleanup(PLpgSQL_execstate *estate);
+static void exec_eval_cleanup(PLpgSQL_execstate * estate);
-static void exec_prepare_plan(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr);
+static void exec_prepare_plan(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr);
static bool exec_simple_check_node(Node *node);
-static void exec_simple_check_plan(PLpgSQL_expr *expr);
-static Datum exec_eval_simple_expr(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr,
+static void exec_simple_check_plan(PLpgSQL_expr * expr);
+static Datum exec_eval_simple_expr(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr,
bool *isNull,
Oid *rettype);
-static void exec_assign_expr(PLpgSQL_execstate *estate,
- PLpgSQL_datum *target,
- PLpgSQL_expr *expr);
-static void exec_assign_value(PLpgSQL_execstate *estate,
- PLpgSQL_datum *target,
+static void exec_assign_expr(PLpgSQL_execstate * estate,
+ PLpgSQL_datum * target,
+ PLpgSQL_expr * expr);
+static void exec_assign_value(PLpgSQL_execstate * estate,
+ PLpgSQL_datum * target,
Datum value, Oid valtype, bool *isNull);
-static void exec_eval_datum(PLpgSQL_execstate *estate,
- PLpgSQL_datum *datum,
+static void exec_eval_datum(PLpgSQL_execstate * estate,
+ PLpgSQL_datum * datum,
Oid expectedtypeid,
Oid *typeid,
Datum *value,
bool *isnull);
-static int exec_eval_integer(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr,
+static int exec_eval_integer(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr,
bool *isNull);
-static bool exec_eval_boolean(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr,
+static bool exec_eval_boolean(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr,
bool *isNull);
-static Datum exec_eval_expr(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr,
+static Datum exec_eval_expr(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr,
bool *isNull,
Oid *rettype);
-static int exec_run_select(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr, long maxtuples, Portal *portalP);
-static void exec_move_row(PLpgSQL_execstate *estate,
- PLpgSQL_rec *rec,
- PLpgSQL_row *row,
+static int exec_run_select(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr, long maxtuples, Portal *portalP);
+static void exec_move_row(PLpgSQL_execstate * estate,
+ PLpgSQL_rec * rec,
+ PLpgSQL_row * row,
HeapTuple tup, TupleDesc tupdesc);
-static HeapTuple make_tuple_from_row(PLpgSQL_execstate *estate,
- PLpgSQL_row *row,
+static HeapTuple make_tuple_from_row(PLpgSQL_execstate * estate,
+ PLpgSQL_row * row,
TupleDesc tupdesc);
static char *convert_value_to_string(Datum value, Oid valtype);
static Datum exec_cast_value(Datum value, Oid valtype,
static Datum exec_simple_cast_value(Datum value, Oid valtype,
Oid reqtype, int32 reqtypmod,
bool isnull);
-static void exec_init_tuple_store(PLpgSQL_execstate *estate);
+static void exec_init_tuple_store(PLpgSQL_execstate * estate);
static bool compatible_tupdesc(TupleDesc td1, TupleDesc td2);
-static void exec_set_found(PLpgSQL_execstate *estate, bool state);
-static void free_var(PLpgSQL_var *var);
+static void exec_set_found(PLpgSQL_execstate * estate, bool state);
+static void free_var(PLpgSQL_var * var);
/* ----------
* ----------
*/
Datum
-plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo)
+plpgsql_exec_function(PLpgSQL_function * func, FunctionCallInfo fcinfo)
{
PLpgSQL_execstate estate;
ErrorContextCallback plerrcontext;
estate.err_text = NULL;
/*
- * Provide a more helpful message if a CONTINUE has been used
- * outside a loop.
+ * Provide a more helpful message if a CONTINUE has been used outside
+ * a loop.
*/
if (rc == PLPGSQL_RC_CONTINUE)
ereport(ERROR,
errmsg("CONTINUE cannot be used outside a loop")));
else
ereport(ERROR,
- (errcode(ERRCODE_S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT),
- errmsg("control reached end of function without RETURN")));
+ (errcode(ERRCODE_S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT),
+ errmsg("control reached end of function without RETURN")));
}
/*
* ----------
*/
HeapTuple
-plpgsql_exec_trigger(PLpgSQL_function *func,
+plpgsql_exec_trigger(PLpgSQL_function * func,
TriggerData *trigdata)
{
PLpgSQL_execstate estate;
var = (PLpgSQL_var *) (estate.datums[func->tg_name_varno]);
var->value = DirectFunctionCall1(namein,
- CStringGetDatum(trigdata->tg_trigger->tgname));
+ CStringGetDatum(trigdata->tg_trigger->tgname));
var->isnull = false;
var->freeval = true;
var = (PLpgSQL_var *) (estate.datums[func->tg_relname_varno]);
var->value = DirectFunctionCall1(namein,
- CStringGetDatum(RelationGetRelationName(trigdata->tg_relation)));
+ CStringGetDatum(RelationGetRelationName(trigdata->tg_relation)));
var->isnull = false;
var->freeval = true;
var->freeval = false;
/*
- * Store the trigger argument values into the special execution
- * state variables
+ * Store the trigger argument values into the special execution state
+ * variables
*/
estate.err_text = gettext_noop("while storing call arguments into local variables");
estate.trig_nargs = trigdata->tg_trigger->tgnargs;
estate.trig_argv = palloc(sizeof(Datum) * estate.trig_nargs);
for (i = 0; i < trigdata->tg_trigger->tgnargs; i++)
estate.trig_argv[i] = DirectFunctionCall1(textin,
- CStringGetDatum(trigdata->tg_trigger->tgargs[i]));
+ CStringGetDatum(trigdata->tg_trigger->tgargs[i]));
}
/*
estate.err_text = NULL;
/*
- * Provide a more helpful message if a CONTINUE has been used
- * outside a loop.
+ * Provide a more helpful message if a CONTINUE has been used outside
+ * a loop.
*/
if (rc == PLPGSQL_RC_CONTINUE)
ereport(ERROR,
errmsg("CONTINUE cannot be used outside a loop")));
else
ereport(ERROR,
- (errcode(ERRCODE_S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT),
- errmsg("control reached end of trigger procedure without RETURN")));
+ (errcode(ERRCODE_S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT),
+ errmsg("control reached end of trigger procedure without RETURN")));
}
if (estate.retisset)
errmsg("trigger procedure cannot return a set")));
/*
- * Check that the returned tuple structure has the same attributes,
- * the relation that fired the trigger has. A per-statement trigger
- * always needs to return NULL, so we ignore any return value the
- * function itself produces (XXX: is this a good idea?)
+ * Check that the returned tuple structure has the same attributes, the
+ * relation that fired the trigger has. A per-statement trigger always
+ * needs to return NULL, so we ignore any return value the function itself
+ * produces (XXX: is this a good idea?)
*
* XXX This way it is possible, that the trigger returns a tuple where
- * attributes don't have the correct atttypmod's length. It's up to
- * the trigger's programmer to ensure that this doesn't happen. Jan
+ * attributes don't have the correct atttypmod's length. It's up to the
+ * trigger's programmer to ensure that this doesn't happen. Jan
*/
if (estate.retisnull || TRIGGER_FIRED_FOR_STATEMENT(trigdata->tg_event))
rettup = NULL;
else if (estate->err_text != NULL)
{
/*
- * We don't expend the cycles to run gettext() on err_text unless
- * we actually need it. Therefore, places that set up err_text
- * should use gettext_noop() to ensure the strings get recorded in
- * the message dictionary.
+ * We don't expend the cycles to run gettext() on err_text unless we
+ * actually need it. Therefore, places that set up err_text should
+ * use gettext_noop() to ensure the strings get recorded in the
+ * message dictionary.
*/
/*
* ----------
*/
static PLpgSQL_datum *
-copy_plpgsql_datum(PLpgSQL_datum *datum)
+copy_plpgsql_datum(PLpgSQL_datum * datum)
{
PLpgSQL_datum *result;
switch (datum->dtype)
{
case PLPGSQL_DTYPE_VAR:
- {
- PLpgSQL_var *new = palloc(sizeof(PLpgSQL_var));
+ {
+ PLpgSQL_var *new = palloc(sizeof(PLpgSQL_var));
- memcpy(new, datum, sizeof(PLpgSQL_var));
- /* Ensure the value is null (possibly not needed?) */
- new->value = 0;
- new->isnull = true;
- new->freeval = false;
+ memcpy(new, datum, sizeof(PLpgSQL_var));
+ /* Ensure the value is null (possibly not needed?) */
+ new->value = 0;
+ new->isnull = true;
+ new->freeval = false;
- result = (PLpgSQL_datum *) new;
- }
- break;
+ result = (PLpgSQL_datum *) new;
+ }
+ break;
case PLPGSQL_DTYPE_REC:
- {
- PLpgSQL_rec *new = palloc(sizeof(PLpgSQL_rec));
+ {
+ PLpgSQL_rec *new = palloc(sizeof(PLpgSQL_rec));
- memcpy(new, datum, sizeof(PLpgSQL_rec));
- /* Ensure the value is null (possibly not needed?) */
- new->tup = NULL;
- new->tupdesc = NULL;
- new->freetup = false;
- new->freetupdesc = false;
+ memcpy(new, datum, sizeof(PLpgSQL_rec));
+ /* Ensure the value is null (possibly not needed?) */
+ new->tup = NULL;
+ new->tupdesc = NULL;
+ new->freetup = false;
+ new->freetupdesc = false;
- result = (PLpgSQL_datum *) new;
- }
- break;
+ result = (PLpgSQL_datum *) new;
+ }
+ break;
case PLPGSQL_DTYPE_ROW:
case PLPGSQL_DTYPE_RECFIELD:
case PLPGSQL_DTYPE_ARRAYELEM:
case PLPGSQL_DTYPE_TRIGARG:
+
/*
- * These datum records are read-only at runtime, so no need
- * to copy them
+ * These datum records are read-only at runtime, so no need to
+ * copy them
*/
result = datum;
break;
static bool
-exception_matches_conditions(ErrorData *edata, PLpgSQL_condition *cond)
+exception_matches_conditions(ErrorData *edata, PLpgSQL_condition * cond)
{
for (; cond != NULL; cond = cond->next)
{
* ----------
*/
static int
-exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block)
+exec_stmt_block(PLpgSQL_execstate * estate, PLpgSQL_stmt_block * block)
{
volatile int rc = -1;
int i;
var->isnull = true;
if (var->notnull)
ereport(ERROR,
- (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("variable \"%s\" declared NOT NULL cannot default to NULL",
- var->refname)));
+ (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
+ errmsg("variable \"%s\" declared NOT NULL cannot default to NULL",
+ var->refname)));
}
else
{
if (block->exceptions)
{
/*
- * Execute the statements in the block's body inside a
- * sub-transaction
+ * Execute the statements in the block's body inside a sub-transaction
*/
MemoryContext oldcontext = CurrentMemoryContext;
ResourceOwner oldowner = CurrentResourceOwner;
CurrentResourceOwner = oldowner;
/*
- * AtEOSubXact_SPI() should not have popped any SPI context,
- * but just in case it did, make sure we remain connected.
+ * AtEOSubXact_SPI() should not have popped any SPI context, but
+ * just in case it did, make sure we remain connected.
*/
SPI_restore_connection();
}
PG_CATCH();
{
- ErrorData *edata;
- ListCell *e;
+ ErrorData *edata;
+ ListCell *e;
/* Save error info */
MemoryContextSwitchTo(oldcontext);
CurrentResourceOwner = oldowner;
/*
- * If AtEOSubXact_SPI() popped any SPI context of the subxact,
- * it will have left us in a disconnected state. We need this
- * hack to return to connected state.
+ * If AtEOSubXact_SPI() popped any SPI context of the subxact, it
+ * will have left us in a disconnected state. We need this hack
+ * to return to connected state.
*/
SPI_restore_connection();
/* Look for a matching exception handler */
- foreach (e, block->exceptions->exc_list)
+ foreach(e, block->exceptions->exc_list)
{
PLpgSQL_exception *exception = (PLpgSQL_exception *) lfirst(e);
if (exception_matches_conditions(edata, exception->conditions))
{
/*
- * Initialize the magic SQLSTATE and SQLERRM
- * variables for the exception block. We needn't
- * do this until we have found a matching
- * exception.
+ * Initialize the magic SQLSTATE and SQLERRM variables for
+ * the exception block. We needn't do this until we have
+ * found a matching exception.
*/
PLpgSQL_var *state_var;
PLpgSQL_var *errm_var;
state_var = (PLpgSQL_var *)
estate->datums[block->exceptions->sqlstate_varno];
state_var->value = DirectFunctionCall1(textin,
- CStringGetDatum(unpack_sql_state(edata->sqlerrcode)));
+ CStringGetDatum(unpack_sql_state(edata->sqlerrcode)));
state_var->freeval = true;
state_var->isnull = false;
errm_var = (PLpgSQL_var *)
estate->datums[block->exceptions->sqlerrm_varno];
errm_var->value = DirectFunctionCall1(textin,
- CStringGetDatum(edata->message));
+ CStringGetDatum(edata->message));
errm_var->freeval = true;
errm_var->isnull = false;
return PLPGSQL_RC_EXIT;
estate->exitlabel = NULL;
return PLPGSQL_RC_OK;
-
+
default:
elog(ERROR, "unrecognized rc: %d", rc);
}
* ----------
*/
static int
-exec_stmts(PLpgSQL_execstate *estate, List *stmts)
+exec_stmts(PLpgSQL_execstate * estate, List *stmts)
{
ListCell *s;
- foreach (s, stmts)
+ foreach(s, stmts)
{
PLpgSQL_stmt *stmt = (PLpgSQL_stmt *) lfirst(s);
- int rc = exec_stmt(estate, stmt);
+ int rc = exec_stmt(estate, stmt);
+
if (rc != PLPGSQL_RC_OK)
return rc;
}
* ----------
*/
static int
-exec_stmt(PLpgSQL_execstate *estate, PLpgSQL_stmt *stmt)
+exec_stmt(PLpgSQL_execstate * estate, PLpgSQL_stmt * stmt)
{
PLpgSQL_stmt *save_estmt;
int rc = -1;
* ----------
*/
static int
-exec_stmt_assign(PLpgSQL_execstate *estate, PLpgSQL_stmt_assign *stmt)
+exec_stmt_assign(PLpgSQL_execstate * estate, PLpgSQL_stmt_assign * stmt)
{
Assert(stmt->varno >= 0);
* ----------
*/
static int
-exec_stmt_perform(PLpgSQL_execstate *estate, PLpgSQL_stmt_perform *stmt)
+exec_stmt_perform(PLpgSQL_execstate * estate, PLpgSQL_stmt_perform * stmt)
{
PLpgSQL_expr *expr = stmt->expr;
* ----------
*/
static int
-exec_stmt_getdiag(PLpgSQL_execstate *estate, PLpgSQL_stmt_getdiag *stmt)
+exec_stmt_getdiag(PLpgSQL_execstate * estate, PLpgSQL_stmt_getdiag * stmt)
{
- ListCell *lc;
+ ListCell *lc;
- foreach (lc, stmt->diag_items)
+ foreach(lc, stmt->diag_items)
{
- PLpgSQL_diag_item *diag_item = (PLpgSQL_diag_item *) lfirst(lc);
- PLpgSQL_datum *var;
- bool isnull = false;
+ PLpgSQL_diag_item *diag_item = (PLpgSQL_diag_item *) lfirst(lc);
+ PLpgSQL_datum *var;
+ bool isnull = false;
if (diag_item->target <= 0)
continue;
* ----------
*/
static int
-exec_stmt_if(PLpgSQL_execstate *estate, PLpgSQL_stmt_if *stmt)
+exec_stmt_if(PLpgSQL_execstate * estate, PLpgSQL_stmt_if * stmt)
{
bool value;
bool isnull;
* ----------
*/
static int
-exec_stmt_loop(PLpgSQL_execstate *estate, PLpgSQL_stmt_loop *stmt)
+exec_stmt_loop(PLpgSQL_execstate * estate, PLpgSQL_stmt_loop * stmt)
{
for (;;)
{
- int rc = exec_stmts(estate, stmt->body);
+ int rc = exec_stmts(estate, stmt->body);
switch (rc)
{
return PLPGSQL_RC_EXIT;
estate->exitlabel = NULL;
return PLPGSQL_RC_OK;
-
+
case PLPGSQL_RC_CONTINUE:
if (estate->exitlabel == NULL)
/* anonymous continue, so re-run the loop */
* ----------
*/
static int
-exec_stmt_while(PLpgSQL_execstate *estate, PLpgSQL_stmt_while *stmt)
+exec_stmt_while(PLpgSQL_execstate * estate, PLpgSQL_stmt_while * stmt)
{
for (;;)
{
* ----------
*/
static int
-exec_stmt_fori(PLpgSQL_execstate *estate, PLpgSQL_stmt_fori *stmt)
+exec_stmt_fori(PLpgSQL_execstate * estate, PLpgSQL_stmt_fori * stmt)
{
PLpgSQL_var *var;
Datum value;
}
/*
- * otherwise, this is a labelled exit that does not match
- * the current statement's label, if any: return RC_EXIT
- * so that the EXIT continues to propagate up the stack.
+ * otherwise, this is a labelled exit that does not match the
+ * current statement's label, if any: return RC_EXIT so that the
+ * EXIT continues to propagate up the stack.
*/
break;
else
{
/*
- * otherwise, this is a named continue that does not
- * match the current statement's label, if any: return
- * RC_CONTINUE so that the CONTINUE will propagate up
- * the stack.
+ * otherwise, this is a named continue that does not match the
+ * current statement's label, if any: return RC_CONTINUE so
+ * that the CONTINUE will propagate up the stack.
*/
- break;
+ break;
}
}
/*
* Set the FOUND variable to indicate the result of executing the loop
- * (namely, whether we looped one or more times). This must be set
- * here so that it does not interfere with the value of the FOUND
- * variable inside the loop processing itself.
+ * (namely, whether we looped one or more times). This must be set here so
+ * that it does not interfere with the value of the FOUND variable inside
+ * the loop processing itself.
*/
exec_set_found(estate, found);
* ----------
*/
static int
-exec_stmt_fors(PLpgSQL_execstate *estate, PLpgSQL_stmt_fors *stmt)
+exec_stmt_fors(PLpgSQL_execstate * estate, PLpgSQL_stmt_fors * stmt)
{
PLpgSQL_rec *rec = NULL;
PLpgSQL_row *row = NULL;
n = SPI_processed;
/*
- * If the query didn't return any rows, set the target to NULL and
- * return with FOUND = false.
+ * If the query didn't return any rows, set the target to NULL and return
+ * with FOUND = false.
*/
if (n == 0)
exec_move_row(estate, rec, row, NULL, tuptab->tupdesc);
}
/*
- * otherwise, we processed a labelled exit that does
- * not match the current statement's label, if any:
- * return RC_EXIT so that the EXIT continues to
- * recurse upward.
+ * otherwise, we processed a labelled exit that does not
+ * match the current statement's label, if any: return
+ * RC_EXIT so that the EXIT continues to recurse upward.
*/
}
else if (rc == PLPGSQL_RC_CONTINUE)
}
/*
- * otherwise, we processed a named continue
- * that does not match the current statement's
- * label, if any: return RC_CONTINUE so that the
- * CONTINUE will propagate up the stack.
+ * otherwise, we processed a named continue that does not
+ * match the current statement's label, if any: return
+ * RC_CONTINUE so that the CONTINUE will propagate up the
+ * stack.
*/
}
/*
- * We're aborting the loop, so cleanup and set FOUND.
- * (This code should match the code after the loop.)
+ * We're aborting the loop, so cleanup and set FOUND. (This
+ * code should match the code after the loop.)
*/
SPI_freetuptable(tuptab);
SPI_cursor_close(portal);
/*
* Set the FOUND variable to indicate the result of executing the loop
- * (namely, whether we looped one or more times). This must be set
- * here so that it does not interfere with the value of the FOUND
- * variable inside the loop processing itself.
+ * (namely, whether we looped one or more times). This must be set here so
+ * that it does not interfere with the value of the FOUND variable inside
+ * the loop processing itself.
*/
exec_set_found(estate, found);
* ----------
*/
static int
-exec_stmt_select(PLpgSQL_execstate *estate, PLpgSQL_stmt_select *stmt)
+exec_stmt_select(PLpgSQL_execstate * estate, PLpgSQL_stmt_select * stmt)
{
PLpgSQL_rec *rec = NULL;
PLpgSQL_row *row = NULL;
n = estate->eval_processed;
/*
- * If the query didn't return any rows, set the target to NULL and
- * return.
+ * If the query didn't return any rows, set the target to NULL and return.
*/
if (n == 0)
{
* ----------
*/
static int
-exec_stmt_exit(PLpgSQL_execstate *estate, PLpgSQL_stmt_exit *stmt)
+exec_stmt_exit(PLpgSQL_execstate * estate, PLpgSQL_stmt_exit * stmt)
{
/*
* If the exit / continue has a condition, evaluate it
* ----------
*/
static int
-exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt)
+exec_stmt_return(PLpgSQL_execstate * estate, PLpgSQL_stmt_return * stmt)
{
/*
* If processing a set-returning PL/PgSQL function, the final RETURN
- * indicates that the function is finished producing tuples. The rest
- * of the work will be done at the top level.
+ * indicates that the function is finished producing tuples. The rest of
+ * the work will be done at the top level.
*/
if (estate->retisset)
return PLPGSQL_RC_RETURN;
switch (retvar->dtype)
{
case PLPGSQL_DTYPE_VAR:
- {
- PLpgSQL_var *var = (PLpgSQL_var *) retvar;
+ {
+ PLpgSQL_var *var = (PLpgSQL_var *) retvar;
- estate->retval = var->value;
- estate->retisnull = var->isnull;
- estate->rettype = var->datatype->typoid;
- }
- break;
+ estate->retval = var->value;
+ estate->retisnull = var->isnull;
+ estate->rettype = var->datatype->typoid;
+ }
+ break;
case PLPGSQL_DTYPE_REC:
- {
- PLpgSQL_rec *rec = (PLpgSQL_rec *) retvar;
-
- if (HeapTupleIsValid(rec->tup))
{
- estate->retval = (Datum) rec->tup;
- estate->rettupdesc = rec->tupdesc;
- estate->retisnull = false;
+ PLpgSQL_rec *rec = (PLpgSQL_rec *) retvar;
+
+ if (HeapTupleIsValid(rec->tup))
+ {
+ estate->retval = (Datum) rec->tup;
+ estate->rettupdesc = rec->tupdesc;
+ estate->retisnull = false;
+ }
}
- }
- break;
+ break;
case PLPGSQL_DTYPE_ROW:
- {
- PLpgSQL_row *row = (PLpgSQL_row *) retvar;
-
- Assert(row->rowtupdesc);
- estate->retval = (Datum) make_tuple_from_row(estate, row,
- row->rowtupdesc);
- if (estate->retval == (Datum) NULL) /* should not happen */
- elog(ERROR, "row not compatible with its own tupdesc");
- estate->rettupdesc = row->rowtupdesc;
- estate->retisnull = false;
- }
- break;
+ {
+ PLpgSQL_row *row = (PLpgSQL_row *) retvar;
+
+ Assert(row->rowtupdesc);
+ estate->retval = (Datum) make_tuple_from_row(estate, row,
+ row->rowtupdesc);
+ if (estate->retval == (Datum) NULL) /* should not happen */
+ elog(ERROR, "row not compatible with its own tupdesc");
+ estate->rettupdesc = row->rowtupdesc;
+ estate->retisnull = false;
+ }
+ break;
default:
elog(ERROR, "unrecognized dtype: %d", retvar->dtype);
/*
* Special hack for function returning VOID: instead of NULL, return a
* non-null VOID value. This is of dubious importance but is kept for
- * backwards compatibility. Note that the only other way to get here
- * is to have written "RETURN NULL" in a function returning tuple.
+ * backwards compatibility. Note that the only other way to get here is
+ * to have written "RETURN NULL" in a function returning tuple.
*/
if (estate->fn_rettype == VOIDOID)
{
* ----------
*/
static int
-exec_stmt_return_next(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_return_next *stmt)
+exec_stmt_return_next(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_return_next * stmt)
{
TupleDesc tupdesc;
int natts;
if (!estate->retisset)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot use RETURN NEXT in a non-SETOF function")));
+ errmsg("cannot use RETURN NEXT in a non-SETOF function")));
if (estate->tuple_store == NULL)
exec_init_tuple_store(estate);
switch (retvar->dtype)
{
case PLPGSQL_DTYPE_VAR:
- {
- PLpgSQL_var *var = (PLpgSQL_var *) retvar;
- Datum retval = var->value;
- bool isNull = var->isnull;
+ {
+ PLpgSQL_var *var = (PLpgSQL_var *) retvar;
+ Datum retval = var->value;
+ bool isNull = var->isnull;
- if (natts != 1)
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("wrong result type supplied in RETURN NEXT")));
+ if (natts != 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("wrong result type supplied in RETURN NEXT")));
- /* coerce type if needed */
- retval = exec_simple_cast_value(retval,
- var->datatype->typoid,
- tupdesc->attrs[0]->atttypid,
+ /* coerce type if needed */
+ retval = exec_simple_cast_value(retval,
+ var->datatype->typoid,
+ tupdesc->attrs[0]->atttypid,
tupdesc->attrs[0]->atttypmod,
- isNull);
+ isNull);
- tuple = heap_form_tuple(tupdesc, &retval, &isNull);
+ tuple = heap_form_tuple(tupdesc, &retval, &isNull);
- free_tuple = true;
- }
- break;
+ free_tuple = true;
+ }
+ break;
case PLPGSQL_DTYPE_REC:
- {
- PLpgSQL_rec *rec = (PLpgSQL_rec *) retvar;
+ {
+ PLpgSQL_rec *rec = (PLpgSQL_rec *) retvar;
- if (!HeapTupleIsValid(rec->tup))
- ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("record \"%s\" is not assigned yet",
- rec->refname),
- errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
- if (!compatible_tupdesc(tupdesc, rec->tupdesc))
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("wrong record type supplied in RETURN NEXT")));
- tuple = rec->tup;
- }
- break;
+ if (!HeapTupleIsValid(rec->tup))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("record \"%s\" is not assigned yet",
+ rec->refname),
+ errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
+ if (!compatible_tupdesc(tupdesc, rec->tupdesc))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("wrong record type supplied in RETURN NEXT")));
+ tuple = rec->tup;
+ }
+ break;
case PLPGSQL_DTYPE_ROW:
- {
- PLpgSQL_row *row = (PLpgSQL_row *) retvar;
+ {
+ PLpgSQL_row *row = (PLpgSQL_row *) retvar;
- tuple = make_tuple_from_row(estate, row, tupdesc);
- if (tuple == NULL)
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("wrong record type supplied in RETURN NEXT")));
- free_tuple = true;
- }
- break;
+ tuple = make_tuple_from_row(estate, row, tupdesc);
+ if (tuple == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("wrong record type supplied in RETURN NEXT")));
+ free_tuple = true;
+ }
+ break;
default:
elog(ERROR, "unrecognized dtype: %d", retvar->dtype);
if (natts != 1)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("wrong result type supplied in RETURN NEXT")));
+ errmsg("wrong result type supplied in RETURN NEXT")));
retval = exec_eval_expr(estate,
stmt->expr,
}
static void
-exec_init_tuple_store(PLpgSQL_execstate *estate)
+exec_init_tuple_store(PLpgSQL_execstate * estate)
{
ReturnSetInfo *rsi = estate->rsi;
MemoryContext oldcxt;
* ----------
*/
static int
-exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt)
+exec_stmt_raise(PLpgSQL_execstate * estate, PLpgSQL_stmt_raise * stmt)
{
char *cp;
PLpgSQL_dstring ds;
errmsg("too few parameters specified for RAISE")));
paramvalue = exec_eval_expr(estate,
- (PLpgSQL_expr *) lfirst(current_param),
+ (PLpgSQL_expr *) lfirst(current_param),
¶misnull,
¶mtypeid);
}
/*
- * If more parameters were specified than were required to process
- * the format string, throw an error
+ * If more parameters were specified than were required to process the
+ * format string, throw an error
*/
if (current_param != NULL)
ereport(ERROR,
estate->err_text = raise_skip_msg; /* suppress traceback of raise */
ereport(stmt->elog_level,
- ((stmt->elog_level >= ERROR) ? errcode(ERRCODE_RAISE_EXCEPTION) : 0,
- errmsg_internal("%s", plpgsql_dstring_get(&ds))));
+ ((stmt->elog_level >= ERROR) ? errcode(ERRCODE_RAISE_EXCEPTION) : 0,
+ errmsg_internal("%s", plpgsql_dstring_get(&ds))));
estate->err_text = NULL; /* un-suppress... */
* ----------
*/
static void
-plpgsql_estate_setup(PLpgSQL_execstate *estate,
- PLpgSQL_function *func,
+plpgsql_estate_setup(PLpgSQL_execstate * estate,
+ PLpgSQL_function * func,
ReturnSetInfo *rsi)
{
estate->retval = (Datum) 0;
estate->err_text = NULL;
/*
- * Create an EState for evaluation of simple expressions, if there's
- * not one already in the current transaction. The EState is made a
- * child of TopTransactionContext so it will have the right lifespan.
+ * Create an EState for evaluation of simple expressions, if there's not
+ * one already in the current transaction. The EState is made a child of
+ * TopTransactionContext so it will have the right lifespan.
*/
if (simple_eval_estate == NULL)
{
}
/*
- * Create an expression context for simple expressions.
- * This must be a child of simple_eval_estate.
+ * Create an expression context for simple expressions. This must be a
+ * child of simple_eval_estate.
*/
estate->eval_econtext = CreateExprContext(simple_eval_estate);
}
* ----------
*/
static void
-exec_eval_cleanup(PLpgSQL_execstate *estate)
+exec_eval_cleanup(PLpgSQL_execstate * estate)
{
/* Clear result of a full SPI_execute */
if (estate->eval_tuptable != NULL)
* ----------
*/
static void
-exec_prepare_plan(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr)
+exec_prepare_plan(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr)
{
int i;
_SPI_plan *spi_plan;
case SPI_ERROR_COPY:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot COPY to/from client in PL/pgSQL")));
+ errmsg("cannot COPY to/from client in PL/pgSQL")));
case SPI_ERROR_CURSOR:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot manipulate cursors directly in PL/pgSQL"),
- errhint("Use PL/pgSQL's cursor features instead.")));
+ errmsg("cannot manipulate cursors directly in PL/pgSQL"),
+ errhint("Use PL/pgSQL's cursor features instead.")));
case SPI_ERROR_TRANSACTION:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot begin/end transactions in PL/pgSQL"),
+ errmsg("cannot begin/end transactions in PL/pgSQL"),
errhint("Use a BEGIN block with an EXCEPTION clause instead.")));
default:
elog(ERROR, "SPI_prepare failed for \"%s\": %s",
* ----------
*/
static int
-exec_stmt_execsql(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_execsql *stmt)
+exec_stmt_execsql(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_execsql * stmt)
{
int i;
Datum *values;
case SPI_OK_UPDATE:
/*
- * If the INSERT, DELETE, or UPDATE query affected at least
- * one tuple, set the magic 'FOUND' variable to true. This
- * conforms with the behavior of PL/SQL.
+ * If the INSERT, DELETE, or UPDATE query affected at least one
+ * tuple, set the magic 'FOUND' variable to true. This conforms
+ * with the behavior of PL/SQL.
*/
exec_set_found(estate, (SPI_processed != 0));
break;
case SPI_OK_SELECT:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("SELECT query has no destination for result data"),
+ errmsg("SELECT query has no destination for result data"),
errhint("If you want to discard the results, use PERFORM instead.")));
default:
* ----------
*/
static int
-exec_stmt_dynexecute(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_dynexecute *stmt)
+exec_stmt_dynexecute(PLpgSQL_execstate * estate,
+ PLpgSQL_stmt_dynexecute * stmt)
{
Datum query;
bool isnull = false;
row = (PLpgSQL_row *) (estate->datums[stmt->row->rowno]);
/*
- * First we evaluate the string expression after the EXECUTE keyword.
- * It's result is the querystring we have to execute.
+ * First we evaluate the string expression after the EXECUTE keyword. It's
+ * result is the querystring we have to execute.
*/
query = exec_eval_expr(estate, stmt->query, &isnull, &restype);
if (isnull)
/*
* Call SPI_execute() without preparing a saved plan. The returncode can
- * be any standard OK. Note that while a SELECT is allowed, its
- * results will be discarded unless an INTO clause is specified.
+ * be any standard OK. Note that while a SELECT is allowed, its results
+ * will be discarded unless an INTO clause is specified.
*/
exec_res = SPI_execute(querystr, estate->readonly_func, 0);
case SPI_OK_SELINTO:
/*
- * We want to disallow SELECT INTO for now, because its
- * behavior is not consistent with SELECT INTO in a normal
- * plpgsql context. (We need to reimplement EXECUTE to parse
- * the string as a plpgsql command, not just feed it to
- * SPI_execute.) However, CREATE AS should be allowed ... and
- * since it produces the same parsetree as SELECT INTO,
- * there's no way to tell the difference except to look at the
- * source text. Wotta kluge!
+ * We want to disallow SELECT INTO for now, because its behavior
+ * is not consistent with SELECT INTO in a normal plpgsql context.
+ * (We need to reimplement EXECUTE to parse the string as a
+ * plpgsql command, not just feed it to SPI_execute.) However,
+ * CREATE AS should be allowed ... and since it produces the same
+ * parsetree as SELECT INTO, there's no way to tell the difference
+ * except to look at the source text. Wotta kluge!
*/
{
char *ptr;
case SPI_ERROR_CURSOR:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot manipulate cursors directly in PL/pgSQL"),
+ errmsg("cannot manipulate cursors directly in PL/pgSQL"),
errhint("Use PL/pgSQL's cursor features instead.")));
case SPI_ERROR_TRANSACTION:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot begin/end transactions in PL/pgSQL"),
- errhint("Use a BEGIN block with an EXCEPTION clause instead.")));
+ errhint("Use a BEGIN block with an EXCEPTION clause instead.")));
default:
elog(ERROR, "SPI_execute failed executing query \"%s\": %s",
* ----------
*/
static int
-exec_stmt_dynfors(PLpgSQL_execstate *estate, PLpgSQL_stmt_dynfors *stmt)
+exec_stmt_dynfors(PLpgSQL_execstate * estate, PLpgSQL_stmt_dynfors * stmt)
{
Datum query;
bool isnull;
elog(ERROR, "unsupported target");
/*
- * Evaluate the string expression after the EXECUTE keyword. It's
- * result is the querystring we have to execute.
+ * Evaluate the string expression after the EXECUTE keyword. It's result
+ * is the querystring we have to execute.
*/
query = exec_eval_expr(estate, stmt->query, &isnull, &restype);
if (isnull)
n = SPI_processed;
/*
- * If the query didn't return any rows, set the target to NULL and
- * return with FOUND = false.
+ * If the query didn't return any rows, set the target to NULL and return
+ * with FOUND = false.
*/
if (n == 0)
exec_move_row(estate, rec, row, NULL, tuptab->tupdesc);
for (i = 0; i < n; i++)
{
- int rc;
+ int rc;
/*
* Assign the tuple to the target
}
/*
- * otherwise, we processed a labelled exit that does
- * not match the current statement's label, if any:
- * return RC_EXIT so that the EXIT continues to
- * recurse upward.
+ * otherwise, we processed a labelled exit that does not
+ * match the current statement's label, if any: return
+ * RC_EXIT so that the EXIT continues to recurse upward.
*/
}
else if (rc == PLPGSQL_RC_CONTINUE)
}
/*
- * otherwise, we process a labelled continue that
- * does not match the current statement's label,
- * so propagate RC_CONTINUE upward in the stack.
+ * otherwise, we process a labelled continue that does not
+ * match the current statement's label, so propagate
+ * RC_CONTINUE upward in the stack.
*/
}
/*
- * We're aborting the loop, so cleanup and set FOUND.
- * (This code should match the code after the loop.)
+ * We're aborting the loop, so cleanup and set FOUND. (This
+ * code should match the code after the loop.)
*/
SPI_freetuptable(tuptab);
SPI_cursor_close(portal);
/*
* Set the FOUND variable to indicate the result of executing the loop
- * (namely, whether we looped one or more times). This must be set
- * here so that it does not interfere with the value of the FOUND
- * variable inside the loop processing itself.
+ * (namely, whether we looped one or more times). This must be set here so
+ * that it does not interfere with the value of the FOUND variable inside
+ * the loop processing itself.
*/
exec_set_found(estate, found);
* ----------
*/
static int
-exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt)
+exec_stmt_open(PLpgSQL_execstate * estate, PLpgSQL_stmt_open * stmt)
{
PLpgSQL_var *curvar = NULL;
char *curname = NULL;
if (curvar->cursor_explicit_argrow < 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("arguments given for cursor without arguments")));
+ errmsg("arguments given for cursor without arguments")));
memset(&set_args, 0, sizeof(set_args));
set_args.cmd_type = PLPGSQL_STMT_SELECT;
* ----------
*/
static int
-exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt)
+exec_stmt_fetch(PLpgSQL_execstate * estate, PLpgSQL_stmt_fetch * stmt)
{
PLpgSQL_var *curvar = NULL;
PLpgSQL_rec *rec = NULL;
if (curvar->isnull)
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("cursor variable \"%s\" is NULL", curvar->refname)));
+ errmsg("cursor variable \"%s\" is NULL", curvar->refname)));
curname = DatumGetCString(DirectFunctionCall1(textout, curvar->value));
portal = SPI_cursor_find(curname);
* ----------
*/
static int
-exec_stmt_close(PLpgSQL_execstate *estate, PLpgSQL_stmt_close *stmt)
+exec_stmt_close(PLpgSQL_execstate * estate, PLpgSQL_stmt_close * stmt)
{
PLpgSQL_var *curvar = NULL;
Portal portal;
if (curvar->isnull)
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("cursor variable \"%s\" is NULL", curvar->refname)));
+ errmsg("cursor variable \"%s\" is NULL", curvar->refname)));
curname = DatumGetCString(DirectFunctionCall1(textout, curvar->value));
portal = SPI_cursor_find(curname);
* ----------
*/
static void
-exec_assign_expr(PLpgSQL_execstate *estate, PLpgSQL_datum *target,
- PLpgSQL_expr *expr)
+exec_assign_expr(PLpgSQL_execstate * estate, PLpgSQL_datum * target,
+ PLpgSQL_expr * expr)
{
Datum value;
Oid valtype;
* ----------
*/
static void
-exec_assign_value(PLpgSQL_execstate *estate,
- PLpgSQL_datum *target,
+exec_assign_value(PLpgSQL_execstate * estate,
+ PLpgSQL_datum * target,
Datum value, Oid valtype, bool *isNull)
{
switch (target->dtype)
/*
* If type is by-reference, make sure we have a freshly
- * palloc'd copy; the originally passed value may not live
- * as long as the variable! But we don't need to re-copy
- * if exec_cast_value performed a conversion; its output
- * must already be palloc'd.
+ * palloc'd copy; the originally passed value may not live as
+ * long as the variable! But we don't need to re-copy if
+ * exec_cast_value performed a conversion; its output must
+ * already be palloc'd.
*/
if (!var->datatype->typbyval && !*isNull)
{
}
/*
- * Now free the old value. (We can't do this any earlier
- * because of the possibility that we are assigning the
- * var's old value to it, eg "foo := foo". We could optimize
- * out the assignment altogether in such cases, but it's too
+ * Now free the old value. (We can't do this any earlier
+ * because of the possibility that we are assigning the var's
+ * old value to it, eg "foo := foo". We could optimize out
+ * the assignment altogether in such cases, but it's too
* infrequent to be worth testing for.)
*/
free_var(var);
rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]);
/*
- * Check that there is already a tuple in the record. We
- * need that because records don't have any predefined
- * field structure.
+ * Check that there is already a tuple in the record. We need
+ * that because records don't have any predefined field
+ * structure.
*/
if (!HeapTupleIsValid(rec->tup))
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("record \"%s\" is not assigned yet",
- rec->refname),
- errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("record \"%s\" is not assigned yet",
+ rec->refname),
+ errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
/*
* Get the number of the records field to change and the
natts = rec->tupdesc->natts;
/*
- * Set up values/datums arrays for heap_formtuple. For
- * all the attributes except the one we want to replace,
- * use the value that's in the old tuple.
+ * Set up values/datums arrays for heap_formtuple. For all
+ * the attributes except the one we want to replace, use the
+ * value that's in the old tuple.
*/
values = palloc(sizeof(Datum) * natts);
nulls = palloc(natts);
}
/*
- * Now insert the new value, being careful to cast it to
- * the right type.
+ * Now insert the new value, being careful to cast it to the
+ * right type.
*/
atttype = SPI_gettypeid(rec->tupdesc, fno + 1);
atttypmod = rec->tupdesc->attrs[fno]->atttypmod;
nulls[fno] = ' ';
/*
- * Avoid leaking the result of exec_simple_cast_value, if
- * it performed a conversion to a pass-by-ref type.
+ * Avoid leaking the result of exec_simple_cast_value, if it
+ * performed a conversion to a pass-by-ref type.
*/
if (!attisnull && values[fno] != value && !get_typbyval(atttype))
mustfree = DatumGetPointer(values[fno]);
/*
* Target is an element of an array
*
- * To handle constructs like x[1][2] := something, we have to
- * be prepared to deal with a chain of arrayelem datums.
- * Chase back to find the base array datum, and save the
- * subscript expressions as we go. (We are scanning right
- * to left here, but want to evaluate the subscripts
- * left-to-right to minimize surprises.)
+ * To handle constructs like x[1][2] := something, we have to be
+ * prepared to deal with a chain of arrayelem datums. Chase
+ * back to find the base array datum, and save the subscript
+ * expressions as we go. (We are scanning right to left here,
+ * but want to evaluate the subscripts left-to-right to
+ * minimize surprises.)
*/
nsubscripts = 0;
do
/* Fetch current value of array datum */
exec_eval_datum(estate, target, InvalidOid,
- &arraytypeid, &oldarraydatum, &oldarrayisnull);
+ &arraytypeid, &oldarraydatum, &oldarrayisnull);
arrayelemtypeid = get_element_type(arraytypeid);
if (!OidIsValid(arrayelemtypeid))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("subscripted object is not an array")));
+ errmsg("subscripted object is not an array")));
get_typlenbyvalalign(arrayelemtypeid,
&elemtyplen,
arraytyplen = get_typlen(arraytypeid);
/*
- * Evaluate the subscripts, switch into left-to-right
- * order
+ * Evaluate the subscripts, switch into left-to-right order
*/
havenullsubscript = false;
for (i = 0; i < nsubscripts; i++)
return;
/*
- * If the original array is null, cons up an empty array
- * so that the assignment can proceed; we'll end with a
+ * If the original array is null, cons up an empty array so
+ * that the assignment can proceed; we'll end with a
* one-element array containing just the assigned-to
- * subscript. This only works for varlena arrays, though;
- * for fixed-length array types we skip the assignment.
- * Again, this corresponds to the current behavior of
+ * subscript. This only works for varlena arrays, though; for
+ * fixed-length array types we skip the assignment. Again,
+ * this corresponds to the current behavior of
* ExecEvalArrayRef().
*/
if (oldarrayisnull)
arraytypeid, isNull);
/*
- * Avoid leaking the result of exec_simple_cast_value, if
- * it performed a conversion to a pass-by-ref type.
+ * Avoid leaking the result of exec_simple_cast_value, if it
+ * performed a conversion to a pass-by-ref type.
*/
if (!*isNull && coerced_value != value && !elemtypbyval)
pfree(DatumGetPointer(coerced_value));
* At present this doesn't handle PLpgSQL_expr or PLpgSQL_arrayelem datums.
*
* NOTE: caller must not modify the returned value, since it points right
- * at the stored value in the case of pass-by-reference datatypes. In some
+ * at the stored value in the case of pass-by-reference datatypes. In some
* cases we have to palloc a return value, and in such cases we put it into
* the estate's short-term memory context.
*/
static void
-exec_eval_datum(PLpgSQL_execstate *estate,
- PLpgSQL_datum *datum,
+exec_eval_datum(PLpgSQL_execstate * estate,
+ PLpgSQL_datum * datum,
Oid expectedtypeid,
Oid *typeid,
Datum *value,
if (!HeapTupleIsValid(rec->tup))
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("record \"%s\" is not assigned yet",
- rec->refname),
- errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("record \"%s\" is not assigned yet",
+ rec->refname),
+ errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
Assert(rec->tupdesc != NULL);
/* Make sure we have a valid type/typmod setting */
BlessTupleDesc(rec->tupdesc);
/*
- * In a trigger, the NEW and OLD parameters are likely to
- * be on-disk tuples that don't have the desired Datum
- * fields. Copy the tuple body and insert the right
- * values.
+ * In a trigger, the NEW and OLD parameters are likely to be
+ * on-disk tuples that don't have the desired Datum fields.
+ * Copy the tuple body and insert the right values.
*/
oldcontext = MemoryContextSwitchTo(estate->eval_econtext->ecxt_per_tuple_memory);
heap_copytuple_with_tuple(rec->tup, &worktup);
rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]);
if (!HeapTupleIsValid(rec->tup))
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("record \"%s\" is not assigned yet",
- rec->refname),
- errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("record \"%s\" is not assigned yet",
+ rec->refname),
+ errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
fno = SPI_fnumber(rec->tupdesc, recfield->fieldname);
if (fno == SPI_ERROR_NOATTRIBUTE)
ereport(ERROR,
* ----------
*/
static int
-exec_eval_integer(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr,
+exec_eval_integer(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr,
bool *isNull)
{
Datum exprdatum;
* ----------
*/
static bool
-exec_eval_boolean(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr,
+exec_eval_boolean(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr,
bool *isNull)
{
Datum exprdatum;
* ----------
*/
static Datum
-exec_eval_expr(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr,
+exec_eval_expr(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr,
bool *isNull,
Oid *rettype)
{
if (rc != SPI_OK_SELECT)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("query \"%s\" did not return data", expr->query)));
+ errmsg("query \"%s\" did not return data", expr->query)));
/*
* If there are no rows selected, the result is NULL.
* ----------
*/
static int
-exec_run_select(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr, long maxtuples, Portal *portalP)
+exec_run_select(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr, long maxtuples, Portal *portalP)
{
int i;
Datum *values;
* ----------
*/
static Datum
-exec_eval_simple_expr(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr,
+exec_eval_simple_expr(PLpgSQL_execstate * estate,
+ PLpgSQL_expr * expr,
bool *isNull,
Oid *rettype)
{
*rettype = expr->expr_simple_type;
/*
- * Prepare the expression for execution, if it's not been done already
- * in the current transaction.
+ * Prepare the expression for execution, if it's not been done already in
+ * the current transaction.
*/
if (expr->expr_simple_state == NULL)
{
/*
* Param list can live in econtext's temporary memory context.
*
- * XXX think about avoiding repeated palloc's for param lists? Beware
- * however that this routine is re-entrant: exec_eval_datum() can call
- * it back for subscript evaluation, and so there can be a need to
- * have more than one active param list.
+ * XXX think about avoiding repeated palloc's for param lists? Beware however
+ * that this routine is re-entrant: exec_eval_datum() can call it back for
+ * subscript evaluation, and so there can be a need to have more than one
+ * active param list.
*/
paramLI = (ParamListInfo)
MemoryContextAlloc(econtext->ecxt_per_tuple_memory,
- (expr->nparams + 1) * sizeof(ParamListInfoData));
+ (expr->nparams + 1) * sizeof(ParamListInfoData));
/*
* Put the parameter values into the parameter list entries.
econtext->ecxt_param_list_info = paramLI;
/*
- * We have to do some of the things SPI_execute_plan would do,
- * in particular advance the snapshot if we are in a non-read-only
- * function. Without this, stable functions within the expression
- * would fail to see updates made so far by our own function.
+ * We have to do some of the things SPI_execute_plan would do, in
+ * particular advance the snapshot if we are in a non-read-only function.
+ * Without this, stable functions within the expression would fail to see
+ * updates made so far by our own function.
*/
SPI_push();
saveActiveSnapshot = ActiveSnapshot;
* ----------
*/
static void
-exec_move_row(PLpgSQL_execstate *estate,
- PLpgSQL_rec *rec,
- PLpgSQL_row *row,
+exec_move_row(PLpgSQL_execstate * estate,
+ PLpgSQL_rec * rec,
+ PLpgSQL_row * row,
HeapTuple tup, TupleDesc tupdesc)
{
/*
* Row is a bit more complicated in that we assign the individual
* attributes of the tuple to the variables the row points to.
*
- * NOTE: this code used to demand row->nfields == tup->t_data->t_natts,
- * but that's wrong. The tuple might have more fields than we
- * expected if it's from an inheritance-child table of the current
- * table, or it might have fewer if the table has had columns added by
- * ALTER TABLE. Ignore extra columns and assume NULL for missing
- * columns, the same as heap_getattr would do. We also have to skip
- * over dropped columns in either the source or destination.
+ * NOTE: this code used to demand row->nfields == tup->t_data->t_natts, but
+ * that's wrong. The tuple might have more fields than we expected if
+ * it's from an inheritance-child table of the current table, or it might
+ * have fewer if the table has had columns added by ALTER TABLE. Ignore
+ * extra columns and assume NULL for missing columns, the same as
+ * heap_getattr would do. We also have to skip over dropped columns in
+ * either the source or destination.
*
- * If we have no tuple data at all, we'll assign NULL to all columns of
- * the row variable.
+ * If we have no tuple data at all, we'll assign NULL to all columns of the
+ * row variable.
*/
if (row != NULL)
{
* ----------
*/
static HeapTuple
-make_tuple_from_row(PLpgSQL_execstate *estate,
- PLpgSQL_row *row,
+make_tuple_from_row(PLpgSQL_execstate * estate,
+ PLpgSQL_row * row,
TupleDesc tupdesc)
{
int natts = tupdesc->natts;
if (!isnull)
{
/*
- * If the type of the queries return value isn't that of the
- * variable, convert it.
+ * If the type of the queries return value isn't that of the variable,
+ * convert it.
*/
if (valtype != reqtype || reqtypmod != -1)
{
* ----------
*/
static void
-exec_simple_check_plan(PLpgSQL_expr *expr)
+exec_simple_check_plan(PLpgSQL_expr * expr)
{
_SPI_plan *spi_plan = (_SPI_plan *) expr->plan;
Plan *plan;
expr->expr_simple_expr = NULL;
/*
- * 1. We can only evaluate queries that resulted in one single
- * execution plan
+ * 1. We can only evaluate queries that resulted in one single execution
+ * plan
*/
if (list_length(spi_plan->ptlist) != 1)
return;
* ----------
*/
static void
-exec_set_found(PLpgSQL_execstate *estate, bool state)
+exec_set_found(PLpgSQL_execstate * estate, bool state)
{
PLpgSQL_var *var;
active_simple_exprs = NULL;
/*
- * If we are doing a clean transaction shutdown, free the
- * EState (so that any remaining resources will be released
- * correctly). In an abort, we expect the regular abort
- * recovery procedures to release everything of interest.
+ * If we are doing a clean transaction shutdown, free the EState (so that
+ * any remaining resources will be released correctly). In an abort, we
+ * expect the regular abort recovery procedures to release everything of
+ * interest.
*/
if (event == XACT_EVENT_COMMIT && simple_eval_estate)
FreeExecutorState(simple_eval_estate);
}
static void
-free_var(PLpgSQL_var *var)
+free_var(PLpgSQL_var * var)
{
if (var->freeval)
{
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.45 2005/06/22 01:35:02 neilc Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.46 2005/10/15 02:49:50 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
* ----------
*/
void
-plpgsql_dstring_init(PLpgSQL_dstring *ds)
+plpgsql_dstring_init(PLpgSQL_dstring * ds)
{
ds->value = palloc(ds->alloc = 512);
ds->used = 1;
* ----------
*/
void
-plpgsql_dstring_free(PLpgSQL_dstring *ds)
+plpgsql_dstring_free(PLpgSQL_dstring * ds)
{
pfree(ds->value);
}
static void
-plpgsql_dstring_expand(PLpgSQL_dstring *ds, int needed)
+plpgsql_dstring_expand(PLpgSQL_dstring * ds, int needed)
{
/* Don't allow truncating the string */
Assert(needed > ds->alloc);
* ----------
*/
void
-plpgsql_dstring_append(PLpgSQL_dstring *ds, const char *str)
+plpgsql_dstring_append(PLpgSQL_dstring * ds, const char *str)
{
int len = strlen(str);
int needed = ds->used + len;
* ----------
*/
void
-plpgsql_dstring_append_char(PLpgSQL_dstring *ds, char c)
+plpgsql_dstring_append_char(PLpgSQL_dstring * ds, char c)
{
if (ds->used == ds->alloc)
plpgsql_dstring_expand(ds, ds->used + 1);
* ----------
*/
char *
-plpgsql_dstring_get(PLpgSQL_dstring *ds)
+plpgsql_dstring_get(PLpgSQL_dstring * ds)
{
return ds->value;
}
{
ns->items_alloc *= 2;
ns->items = repalloc(ns->items,
- sizeof(PLpgSQL_nsitem *) * ns->items_alloc);
+ sizeof(PLpgSQL_nsitem *) * ns->items_alloc);
}
}
int i;
/*
- * Lookup name in the namestack; do the lookup in the current
- * namespace only.
+ * Lookup name in the namestack; do the lookup in the current namespace
+ * only.
*/
for (ns = ns_current; ns != NULL; ns = ns->upper)
{
else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("qualified identifier cannot be used here: %s",
- sstart)));
+ errmsg("qualified identifier cannot be used here: %s",
+ sstart)));
/* If not done, skip whitespace, dot, whitespace */
if (*s)
* Statement type as a string, for use in error messages etc.
*/
const char *
-plpgsql_stmt_typename(PLpgSQL_stmt *stmt)
+plpgsql_stmt_typename(PLpgSQL_stmt * stmt)
{
switch (stmt->cmd_type)
{
static int dump_indent;
static void dump_ind();
-static void dump_stmt(PLpgSQL_stmt *stmt);
-static void dump_block(PLpgSQL_stmt_block *block);
-static void dump_assign(PLpgSQL_stmt_assign *stmt);
-static void dump_if(PLpgSQL_stmt_if *stmt);
-static void dump_loop(PLpgSQL_stmt_loop *stmt);
-static void dump_while(PLpgSQL_stmt_while *stmt);
-static void dump_fori(PLpgSQL_stmt_fori *stmt);
-static void dump_fors(PLpgSQL_stmt_fors *stmt);
-static void dump_select(PLpgSQL_stmt_select *stmt);
-static void dump_exit(PLpgSQL_stmt_exit *stmt);
-static void dump_return(PLpgSQL_stmt_return *stmt);
-static void dump_return_next(PLpgSQL_stmt_return_next *stmt);
-static void dump_raise(PLpgSQL_stmt_raise *stmt);
-static void dump_execsql(PLpgSQL_stmt_execsql *stmt);
-static void dump_dynexecute(PLpgSQL_stmt_dynexecute *stmt);
-static void dump_dynfors(PLpgSQL_stmt_dynfors *stmt);
-static void dump_getdiag(PLpgSQL_stmt_getdiag *stmt);
-static void dump_open(PLpgSQL_stmt_open *stmt);
-static void dump_fetch(PLpgSQL_stmt_fetch *stmt);
-static void dump_close(PLpgSQL_stmt_close *stmt);
-static void dump_perform(PLpgSQL_stmt_perform *stmt);
-static void dump_expr(PLpgSQL_expr *expr);
+static void dump_stmt(PLpgSQL_stmt * stmt);
+static void dump_block(PLpgSQL_stmt_block * block);
+static void dump_assign(PLpgSQL_stmt_assign * stmt);
+static void dump_if(PLpgSQL_stmt_if * stmt);
+static void dump_loop(PLpgSQL_stmt_loop * stmt);
+static void dump_while(PLpgSQL_stmt_while * stmt);
+static void dump_fori(PLpgSQL_stmt_fori * stmt);
+static void dump_fors(PLpgSQL_stmt_fors * stmt);
+static void dump_select(PLpgSQL_stmt_select * stmt);
+static void dump_exit(PLpgSQL_stmt_exit * stmt);
+static void dump_return(PLpgSQL_stmt_return * stmt);
+static void dump_return_next(PLpgSQL_stmt_return_next * stmt);
+static void dump_raise(PLpgSQL_stmt_raise * stmt);
+static void dump_execsql(PLpgSQL_stmt_execsql * stmt);
+static void dump_dynexecute(PLpgSQL_stmt_dynexecute * stmt);
+static void dump_dynfors(PLpgSQL_stmt_dynfors * stmt);
+static void dump_getdiag(PLpgSQL_stmt_getdiag * stmt);
+static void dump_open(PLpgSQL_stmt_open * stmt);
+static void dump_fetch(PLpgSQL_stmt_fetch * stmt);
+static void dump_close(PLpgSQL_stmt_close * stmt);
+static void dump_perform(PLpgSQL_stmt_perform * stmt);
+static void dump_expr(PLpgSQL_expr * expr);
static void
}
static void
-dump_stmt(PLpgSQL_stmt *stmt)
+dump_stmt(PLpgSQL_stmt * stmt)
{
printf("%3d:", stmt->lineno);
switch (stmt->cmd_type)
static void
dump_stmts(List *stmts)
{
- ListCell *s;
+ ListCell *s;
dump_indent += 2;
- foreach (s, stmts)
+ foreach(s, stmts)
dump_stmt((PLpgSQL_stmt *) lfirst(s));
dump_indent -= 2;
}
static void
-dump_block(PLpgSQL_stmt_block *block)
+dump_block(PLpgSQL_stmt_block * block)
{
char *name;
if (block->exceptions)
{
- ListCell *e;
+ ListCell *e;
- foreach (e, block->exceptions->exc_list)
+ foreach(e, block->exceptions->exc_list)
{
PLpgSQL_exception *exc = (PLpgSQL_exception *) lfirst(e);
PLpgSQL_condition *cond;
}
static void
-dump_assign(PLpgSQL_stmt_assign *stmt)
+dump_assign(PLpgSQL_stmt_assign * stmt)
{
dump_ind();
printf("ASSIGN var %d := ", stmt->varno);
}
static void
-dump_if(PLpgSQL_stmt_if *stmt)
+dump_if(PLpgSQL_stmt_if * stmt)
{
dump_ind();
printf("IF ");
}
static void
-dump_loop(PLpgSQL_stmt_loop *stmt)
+dump_loop(PLpgSQL_stmt_loop * stmt)
{
dump_ind();
printf("LOOP\n");
}
static void
-dump_while(PLpgSQL_stmt_while *stmt)
+dump_while(PLpgSQL_stmt_while * stmt)
{
dump_ind();
printf("WHILE ");
}
static void
-dump_fori(PLpgSQL_stmt_fori *stmt)
+dump_fori(PLpgSQL_stmt_fori * stmt)
{
dump_ind();
printf("FORI %s %s\n", stmt->var->refname, (stmt->reverse) ? "REVERSE" : "NORMAL");
}
static void
-dump_fors(PLpgSQL_stmt_fors *stmt)
+dump_fors(PLpgSQL_stmt_fors * stmt)
{
dump_ind();
printf("FORS %s ", (stmt->rec != NULL) ? stmt->rec->refname : stmt->row->refname);
}
static void
-dump_select(PLpgSQL_stmt_select *stmt)
+dump_select(PLpgSQL_stmt_select * stmt)
{
dump_ind();
printf("SELECT ");
}
static void
-dump_open(PLpgSQL_stmt_open *stmt)
+dump_open(PLpgSQL_stmt_open * stmt)
{
dump_ind();
printf("OPEN curvar=%d\n", stmt->curvar);
}
static void
-dump_fetch(PLpgSQL_stmt_fetch *stmt)
+dump_fetch(PLpgSQL_stmt_fetch * stmt)
{
dump_ind();
printf("FETCH curvar=%d\n", stmt->curvar);
}
static void
-dump_close(PLpgSQL_stmt_close *stmt)
+dump_close(PLpgSQL_stmt_close * stmt)
{
dump_ind();
printf("CLOSE curvar=%d\n", stmt->curvar);
}
static void
-dump_perform(PLpgSQL_stmt_perform *stmt)
+dump_perform(PLpgSQL_stmt_perform * stmt)
{
dump_ind();
printf("PERFORM expr = ");
}
static void
-dump_exit(PLpgSQL_stmt_exit *stmt)
+dump_exit(PLpgSQL_stmt_exit * stmt)
{
dump_ind();
printf("%s label='%s'",
}
static void
-dump_return(PLpgSQL_stmt_return *stmt)
+dump_return(PLpgSQL_stmt_return * stmt)
{
dump_ind();
printf("RETURN ");
}
static void
-dump_return_next(PLpgSQL_stmt_return_next *stmt)
+dump_return_next(PLpgSQL_stmt_return_next * stmt)
{
dump_ind();
printf("RETURN NEXT ");
}
static void
-dump_raise(PLpgSQL_stmt_raise *stmt)
+dump_raise(PLpgSQL_stmt_raise * stmt)
{
- ListCell *lc;
- int i = 0;
+ ListCell *lc;
+ int i = 0;
dump_ind();
printf("RAISE '%s'\n", stmt->message);
dump_indent += 2;
- foreach (lc, stmt->params)
+ foreach(lc, stmt->params)
{
dump_ind();
printf(" parameter %d: ", i++);
}
static void
-dump_execsql(PLpgSQL_stmt_execsql *stmt)
+dump_execsql(PLpgSQL_stmt_execsql * stmt)
{
dump_ind();
printf("EXECSQL ");
}
static void
-dump_dynexecute(PLpgSQL_stmt_dynexecute *stmt)
+dump_dynexecute(PLpgSQL_stmt_dynexecute * stmt)
{
dump_ind();
printf("EXECUTE ");
}
static void
-dump_dynfors(PLpgSQL_stmt_dynfors *stmt)
+dump_dynfors(PLpgSQL_stmt_dynfors * stmt)
{
dump_ind();
printf("FORS %s EXECUTE ", (stmt->rec != NULL) ? stmt->rec->refname : stmt->row->refname);
}
static void
-dump_getdiag(PLpgSQL_stmt_getdiag *stmt)
+dump_getdiag(PLpgSQL_stmt_getdiag * stmt)
{
- ListCell *lc;
+ ListCell *lc;
dump_ind();
printf("GET DIAGNOSTICS ");
- foreach (lc, stmt->diag_items)
+ foreach(lc, stmt->diag_items)
{
PLpgSQL_diag_item *diag_item = (PLpgSQL_diag_item *) lfirst(lc);
}
static void
-dump_expr(PLpgSQL_expr *expr)
+dump_expr(PLpgSQL_expr * expr)
{
int i;
}
void
-plpgsql_dumptree(PLpgSQL_function *func)
+plpgsql_dumptree(PLpgSQL_function * func)
{
int i;
PLpgSQL_datum *d;
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_handler.c,v 1.25 2005/03/29 00:17:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_handler.c,v 1.26 2005/10/15 02:49:50 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
extern DLLIMPORT bool check_function_bodies;
-static bool plpgsql_firstcall = true;
+static bool plpgsql_firstcall = true;
static void plpgsql_init_all(void);
*/
if (CALLED_AS_TRIGGER(fcinfo))
retval = PointerGetDatum(plpgsql_exec_trigger(func,
- (TriggerData *) fcinfo->context));
+ (TriggerData *) fcinfo->context));
else
retval = plpgsql_exec_function(func, fcinfo);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("plpgsql functions cannot take type %s",
- format_type_be(proc->proargtypes.values[i]))));
+ format_type_be(proc->proargtypes.values[i]))));
}
}
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.64 2005/06/22 01:35:02 neilc Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.65 2005/10/15 02:49:50 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
int alloc;
int used; /* Including NUL terminator */
char *value;
-} PLpgSQL_dstring;
+} PLpgSQL_dstring;
typedef struct
Oid typioparam;
FmgrInfo typinput; /* lookup info for typinput function */
int32 atttypmod; /* typmod (taken from someplace else) */
-} PLpgSQL_type;
+} PLpgSQL_type;
/*
{ /* Generic datum array item */
int dtype;
int dno;
-} PLpgSQL_datum;
+} PLpgSQL_datum;
/*
* The variants PLpgSQL_var, PLpgSQL_row, and PLpgSQL_rec share these
int dno;
char *refname;
int lineno;
-} PLpgSQL_variable;
+} PLpgSQL_variable;
typedef struct PLpgSQL_expr
{ /* SQL Query to plan and execute */
/* params to pass to expr */
int nparams;
int params[1]; /* VARIABLE SIZE ARRAY ... must be last */
-} PLpgSQL_expr;
+} PLpgSQL_expr;
typedef struct
Datum value;
bool isnull;
bool freeval;
-} PLpgSQL_var;
+} PLpgSQL_var;
typedef struct
int nfields;
char **fieldnames;
int *varnos;
-} PLpgSQL_row;
+} PLpgSQL_row;
typedef struct
TupleDesc tupdesc;
bool freetup;
bool freetupdesc;
-} PLpgSQL_rec;
+} PLpgSQL_rec;
typedef struct
int rfno;
char *fieldname;
int recparentno; /* dno of parent record */
-} PLpgSQL_recfield;
+} PLpgSQL_recfield;
typedef struct
int dno;
PLpgSQL_expr *subscript;
int arrayparentno; /* dno of parent array variable */
-} PLpgSQL_arrayelem;
+} PLpgSQL_arrayelem;
typedef struct
int dtype;
int dno;
PLpgSQL_expr *argnum;
-} PLpgSQL_trigarg;
+} PLpgSQL_trigarg;
typedef struct
int itemtype;
int itemno;
char name[1];
-} PLpgSQL_nsitem;
+} PLpgSQL_nsitem;
/* XXX: consider adapting this to use List */
int items_used;
PLpgSQL_nsitem **items;
struct PLpgSQL_ns *upper;
-} PLpgSQL_ns;
+} PLpgSQL_ns;
typedef struct
{ /* Generic execution node */
int cmd_type;
int lineno;
-} PLpgSQL_stmt;
+} PLpgSQL_stmt;
typedef struct PLpgSQL_condition
int sqlerrstate; /* SQLSTATE code */
char *condname; /* condition name (for debugging) */
struct PLpgSQL_condition *next;
-} PLpgSQL_condition;
+} PLpgSQL_condition;
typedef struct
{
int sqlstate_varno;
int sqlerrm_varno;
List *exc_list; /* List of WHEN clauses */
-} PLpgSQL_exception_block;
+} PLpgSQL_exception_block;
typedef struct
{ /* One EXCEPTION ... WHEN clause */
int lineno;
PLpgSQL_condition *conditions;
List *action; /* List of statements */
-} PLpgSQL_exception;
+} PLpgSQL_exception;
typedef struct
int n_initvars;
int *initvarnos;
PLpgSQL_exception_block *exceptions;
-} PLpgSQL_stmt_block;
+} PLpgSQL_stmt_block;
typedef struct
int lineno;
int varno;
PLpgSQL_expr *expr;
-} PLpgSQL_stmt_assign;
+} PLpgSQL_stmt_assign;
typedef struct
{ /* PERFORM statement */
int cmd_type;
int lineno;
PLpgSQL_expr *expr;
-} PLpgSQL_stmt_perform;
+} PLpgSQL_stmt_perform;
typedef struct
{ /* Get Diagnostics item */
int kind; /* id for diagnostic value desired */
int target; /* where to assign it */
-} PLpgSQL_diag_item;
+} PLpgSQL_diag_item;
typedef struct
{ /* Get Diagnostics statement */
int cmd_type;
int lineno;
List *diag_items; /* List of PLpgSQL_diag_item */
-} PLpgSQL_stmt_getdiag;
+} PLpgSQL_stmt_getdiag;
typedef struct
PLpgSQL_expr *cond;
List *true_body; /* List of statements */
List *false_body; /* List of statements */
-} PLpgSQL_stmt_if;
+} PLpgSQL_stmt_if;
typedef struct
int lineno;
char *label;
List *body; /* List of statements */
-} PLpgSQL_stmt_loop;
+} PLpgSQL_stmt_loop;
typedef struct
char *label;
PLpgSQL_expr *cond;
List *body; /* List of statements */
-} PLpgSQL_stmt_while;
+} PLpgSQL_stmt_while;
typedef struct
PLpgSQL_expr *upper;
int reverse;
List *body; /* List of statements */
-} PLpgSQL_stmt_fori;
+} PLpgSQL_stmt_fori;
typedef struct
PLpgSQL_row *row;
PLpgSQL_expr *query;
List *body; /* List of statements */
-} PLpgSQL_stmt_fors;
+} PLpgSQL_stmt_fors;
typedef struct
PLpgSQL_row *row;
PLpgSQL_expr *query;
List *body; /* List of statements */
-} PLpgSQL_stmt_dynfors;
+} PLpgSQL_stmt_dynfors;
typedef struct
PLpgSQL_rec *rec;
PLpgSQL_row *row;
PLpgSQL_expr *query;
-} PLpgSQL_stmt_select;
+} PLpgSQL_stmt_select;
typedef struct
PLpgSQL_expr *argquery;
PLpgSQL_expr *query;
PLpgSQL_expr *dynquery;
-} PLpgSQL_stmt_open;
+} PLpgSQL_stmt_open;
typedef struct
PLpgSQL_rec *rec;
PLpgSQL_row *row;
int curvar;
-} PLpgSQL_stmt_fetch;
+} PLpgSQL_stmt_fetch;
typedef struct
int cmd_type;
int lineno;
int curvar;
-} PLpgSQL_stmt_close;
+} PLpgSQL_stmt_close;
typedef struct
bool is_exit; /* Is this an exit or a continue? */
char *label;
PLpgSQL_expr *cond;
-} PLpgSQL_stmt_exit;
+} PLpgSQL_stmt_exit;
typedef struct
int lineno;
PLpgSQL_expr *expr;
int retvarno;
-} PLpgSQL_stmt_return;
+} PLpgSQL_stmt_return;
typedef struct
{ /* RETURN NEXT statement */
int lineno;
PLpgSQL_expr *expr;
int retvarno;
-} PLpgSQL_stmt_return_next;
+} PLpgSQL_stmt_return_next;
typedef struct
{ /* RAISE statement */
int elog_level;
char *message;
List *params; /* list of expressions */
-} PLpgSQL_stmt_raise;
+} PLpgSQL_stmt_raise;
typedef struct
int cmd_type;
int lineno;
PLpgSQL_expr *sqlstmt;
-} PLpgSQL_stmt_execsql;
+} PLpgSQL_stmt_execsql;
typedef struct
{ /* Dynamic SQL string to execute */
int cmd_type;
int lineno;
- PLpgSQL_rec *rec; /* INTO record or row variable */
+ PLpgSQL_rec *rec; /* INTO record or row variable */
PLpgSQL_row *row;
PLpgSQL_expr *query;
-} PLpgSQL_stmt_dynexecute;
+} PLpgSQL_stmt_dynexecute;
typedef struct PLpgSQL_func_hashkey
Oid funcOid;
/*
- * For a trigger function, the OID of the relation triggered on is
- * part of the hashkey --- we want to compile the trigger separately
- * for each relation it is used with, in case the rowtype is
- * different. Zero if not called as a trigger.
+ * For a trigger function, the OID of the relation triggered on is part of
+ * the hashkey --- we want to compile the trigger separately for each
+ * relation it is used with, in case the rowtype is different. Zero if
+ * not called as a trigger.
*/
Oid trigrelOid;
/*
- * We include actual argument types in the hash key to support
- * polymorphic PLpgSQL functions. Be careful that extra positions are
- * zeroed!
+ * We include actual argument types in the hash key to support polymorphic
+ * PLpgSQL functions. Be careful that extra positions are zeroed!
*/
Oid argtypes[FUNC_MAX_ARGS];
-} PLpgSQL_func_hashkey;
+} PLpgSQL_func_hashkey;
typedef struct PLpgSQL_function
int ndatums;
PLpgSQL_datum **datums;
PLpgSQL_stmt_block *action;
-} PLpgSQL_function;
+} PLpgSQL_function;
typedef struct
bool readonly_func;
TupleDesc rettupdesc;
- char *exitlabel; /* the "target" label of the current
- * EXIT or CONTINUE stmt, if any */
+ char *exitlabel; /* the "target" label of the current EXIT or
+ * CONTINUE stmt, if any */
Tuplestorestate *tuple_store; /* SRFs accumulate results here */
MemoryContext tuple_store_cxt;
PLpgSQL_function *err_func; /* current func */
PLpgSQL_stmt *err_stmt; /* current stmt */
const char *err_text; /* additional state info */
-} PLpgSQL_execstate;
+} PLpgSQL_execstate;
/**********************************************************************
* Global variable declarations
**********************************************************************/
-extern bool plpgsql_DumpExecTree;
-extern bool plpgsql_SpaceScanned;
+extern bool plpgsql_DumpExecTree;
+extern bool plpgsql_SpaceScanned;
extern int plpgsql_nDatums;
extern PLpgSQL_datum **plpgsql_Datums;
#define plpgsql_yytext plpgsql_base_yytext
extern PLpgSQL_function *plpgsql_curr_compile;
-extern bool plpgsql_check_syntax;
+extern bool plpgsql_check_syntax;
extern MemoryContext compile_tmp_cxt;
/**********************************************************************
extern PLpgSQL_type *plpgsql_parse_datatype(const char *string);
extern PLpgSQL_type *plpgsql_build_datatype(Oid typeOid, int32 typmod);
extern PLpgSQL_variable *plpgsql_build_variable(const char *refname, int lineno,
- PLpgSQL_type *dtype,
+ PLpgSQL_type * dtype,
bool add2namespace);
extern PLpgSQL_condition *plpgsql_parse_err_condition(char *condname);
-extern void plpgsql_adddatum(PLpgSQL_datum *new);
+extern void plpgsql_adddatum(PLpgSQL_datum * new);
extern int plpgsql_add_initdatums(int **varnos);
extern void plpgsql_HashTableInit(void);
extern void plpgsql_compile_error_callback(void *arg);
* Functions in pl_exec.c
* ----------
*/
-extern Datum plpgsql_exec_function(PLpgSQL_function *func,
+extern Datum plpgsql_exec_function(PLpgSQL_function * func,
FunctionCallInfo fcinfo);
-extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function *func,
+extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function * func,
TriggerData *trigdata);
extern void plpgsql_xact_cb(XactEvent event, void *arg);
* Functions for the dynamic string handling in pl_funcs.c
* ----------
*/
-extern void plpgsql_dstring_init(PLpgSQL_dstring *ds);
-extern void plpgsql_dstring_free(PLpgSQL_dstring *ds);
-extern void plpgsql_dstring_append(PLpgSQL_dstring *ds, const char *str);
-extern void plpgsql_dstring_append_char(PLpgSQL_dstring *ds, char c);
-extern char *plpgsql_dstring_get(PLpgSQL_dstring *ds);
+extern void plpgsql_dstring_init(PLpgSQL_dstring * ds);
+extern void plpgsql_dstring_free(PLpgSQL_dstring * ds);
+extern void plpgsql_dstring_append(PLpgSQL_dstring * ds, const char *str);
+extern void plpgsql_dstring_append_char(PLpgSQL_dstring * ds, char c);
+extern char *plpgsql_dstring_get(PLpgSQL_dstring * ds);
/* ----------
* Functions for the namestack handling in pl_funcs.c
* ----------
*/
extern void plpgsql_convert_ident(const char *s, char **output, int numidents);
-extern const char *plpgsql_stmt_typename(PLpgSQL_stmt *stmt);
-extern void plpgsql_dumptree(PLpgSQL_function *func);
+extern const char *plpgsql_stmt_typename(PLpgSQL_stmt * stmt);
+extern void plpgsql_dumptree(PLpgSQL_function * func);
/* ----------
* Externs in gram.y and scan.l
* MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.65 2005/07/10 04:56:55 momjian Exp $
+ * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.66 2005/10/15 02:49:50 momjian Exp $
*
*********************************************************************
*/
int is_rowtype;
/*
- * is_rowtype can be: -1 not known yet (initial state) 0 scalar
- * datatype 1 rowtype 2 rowtype, but I/O functions not set up yet
+ * is_rowtype can be: -1 not known yet (initial state) 0 scalar datatype
+ * 1 rowtype 2 rowtype, but I/O functions not set up yet
*/
} PLyTypeInfo;
TransactionId fn_xmin;
CommandId fn_cmin;
bool fn_readonly;
- PLyTypeInfo result; /* also used to store info for trigger
- * tuple type */
+ PLyTypeInfo result; /* also used to store info for trigger tuple
+ * type */
PLyTypeInfo args[FUNC_MAX_ARGS];
int nargs;
PyObject *code; /* compiled procedure code */
HeapTuple trv;
proc = PLy_procedure_get(fcinfo,
- RelationGetRelid(tdata->tg_relation));
+ RelationGetRelid(tdata->tg_relation));
PLy_curr_procedure = proc;
trv = PLy_trigger_handler(fcinfo, proc);
retval = PointerGetDatum(trv);
{
/*
* hmmm, perhaps they only read the pltcl page, not a
- * surprising thing since i've written no documentation,
- * so accept a belated OK
+ * surprising thing since i've written no documentation, so
+ * accept a belated OK
*/
elog(ERROR, "expected return to be \"SKIP\" or \"MODIFY\"");
}
modvalues[i] = FunctionCall3(&proc->result.out.r.atts[atti].typfunc,
CStringGetDatum(src),
- ObjectIdGetDatum(proc->result.out.r.atts[atti].typioparam),
- Int32GetDatum(tupdesc->attrs[atti]->atttypmod));
+ ObjectIdGetDatum(proc->result.out.r.atts[atti].typioparam),
+ Int32GetDatum(tupdesc->attrs[atti]->atttypmod));
modnulls[i] = ' ';
Py_DECREF(plstr);
Py_DECREF(pltname);
stroid = DatumGetCString(DirectFunctionCall1(oidout,
- ObjectIdGetDatum(tdata->tg_relation->rd_id)));
+ ObjectIdGetDatum(tdata->tg_relation->rd_id)));
pltrelid = PyString_FromString(stroid);
PyDict_SetItemString(pltdata, "relid", pltrelid);
Py_DECREF(pltrelid);
Assert(!PLy_error_in_progress);
/*
- * Disconnect from SPI manager and then create the return values
- * datum (if the input function does a palloc for it this must not
- * be allocated in the SPI memory context because SPI_finish would
- * free it).
+ * Disconnect from SPI manager and then create the return values datum
+ * (if the input function does a palloc for it this must not be
+ * allocated in the SPI memory context because SPI_finish would free
+ * it).
*/
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
plrv_sc = PyString_AsString(plrv_so);
rv = FunctionCall3(&proc->result.out.d.typfunc,
PointerGetDatum(plrv_sc),
- ObjectIdGetDatum(proc->result.out.d.typioparam),
+ ObjectIdGetDatum(proc->result.out.d.typioparam),
Int32GetDatum(-1));
}
proc->globals, proc->globals);
/*
- * If there was an error in a PG callback, propagate that no matter
- * what Python claims about its success.
+ * If there was an error in a PG callback, propagate that no matter what
+ * Python claims about its success.
*/
if (PLy_error_in_progress)
{
dt = FunctionCall3(&(proc->args[i].in.d.typfunc),
fcinfo->arg[i],
- ObjectIdGetDatum(proc->args[i].in.d.typioparam),
+ ObjectIdGetDatum(proc->args[i].in.d.typioparam),
Int32GetDatum(-1));
ct = DatumGetCString(dt);
arg = (proc->args[i].in.d.func) (ct);
PG_TRY();
{
/*
- * get information required for output conversion of the return
- * value, but only if this isn't a trigger.
+ * get information required for output conversion of the return value,
+ * but only if this isn't a trigger.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
{
Form_pg_type rvTypeStruct;
rvTypeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(procStruct->prorettype),
+ ObjectIdGetDatum(procStruct->prorettype),
0, 0, 0);
if (!HeapTupleIsValid(rvTypeTup))
elog(ERROR, "cache lookup failed for type %u",
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("plpython functions cannot return type %s",
- format_type_be(procStruct->prorettype))));
+ errmsg("plpython functions cannot return type %s",
+ format_type_be(procStruct->prorettype))));
}
if (rvTypeStruct->typtype == 'c')
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("plpython functions cannot return tuples yet")));
+ errmsg("plpython functions cannot return tuples yet")));
else
PLy_output_datum_func(&proc->result, rvTypeTup);
}
/*
- * now get information required for input conversion of the
- * procedures arguments.
+ * now get information required for input conversion of the procedures
+ * arguments.
*/
proc->nargs = fcinfo->nargs;
for (i = 0; i < fcinfo->nargs; i++)
Form_pg_type argTypeStruct;
argTypeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(procStruct->proargtypes.values[i]),
+ ObjectIdGetDatum(procStruct->proargtypes.values[i]),
0, 0, 0);
if (!HeapTupleIsValid(argTypeTup))
elog(ERROR, "cache lookup failed for type %u",
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("plpython functions cannot take type %s",
- format_type_be(procStruct->proargtypes.values[i]))));
+ format_type_be(procStruct->proargtypes.values[i]))));
if (argTypeStruct->typtype != 'c')
PLy_input_datum_func(&(proc->args[i]),
procStruct->proargtypes.values[i],
argTypeTup);
else
- proc->args[i].is_rowtype = 2; /* still need to set I/O
- * funcs */
+ proc->args[i].is_rowtype = 2; /* still need to set I/O funcs */
ReleaseSysCache(argTypeTup);
}
proc->globals = PyDict_Copy(PLy_interp_globals);
/*
- * SD is private preserved data between calls GD is global data shared
- * by all functions
+ * SD is private preserved data between calls GD is global data shared by
+ * all functions
*/
proc->statics = PyDict_New();
PyDict_SetItemString(proc->globals, "SD", proc->statics);
continue;
typeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(desc->attrs[i]->atttypid),
+ ObjectIdGetDatum(desc->attrs[i]->atttypid),
0, 0, 0);
if (!HeapTupleIsValid(typeTup))
elog(ERROR, "cache lookup failed for type %u",
continue;
typeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(desc->attrs[i]->atttypid),
+ ObjectIdGetDatum(desc->attrs[i]->atttypid),
0, 0, 0);
if (!HeapTupleIsValid(typeTup))
elog(ERROR, "cache lookup failed for type %u",
{
vdat = FunctionCall3(&info->in.r.atts[i].typfunc,
vattr,
- ObjectIdGetDatum(info->in.r.atts[i].typioparam),
- Int32GetDatum(desc->attrs[i]->atttypmod));
+ ObjectIdGetDatum(info->in.r.atts[i].typioparam),
+ Int32GetDatum(desc->attrs[i]->atttypmod));
vsrc = DatumGetCString(vdat);
/*
if ((list) && (!PySequence_Check(list)))
{
PyErr_SetString(PLy_exc_spi_error,
- "Second argument in plpy.prepare() must be a sequence");
+ "Second argument in plpy.prepare() must be a sequence");
return NULL;
}
/*
* the other loop might throw an exception, if PLyTypeInfo
- * member isn't properly initialized the Py_DECREF(plan)
- * will go boom
+ * member isn't properly initialized the Py_DECREF(plan) will
+ * go boom
*/
for (i = 0; i < nargs; i++)
{
sptr = PyString_AsString(optr);
/*
- * XXX should extend this to allow qualified type
- * names
+ * XXX should extend this to allow qualified type names
*/
typeTup = typenameType(makeTypeName(sptr));
Py_DECREF(optr);
char *sv;
PyObject *so = PyObject_Str(list);
+
if (!so)
PLy_elog(ERROR, "function \"%s\" could not execute plan",
PLy_procedure_name(PLy_curr_procedure));
plan->values[i] =
FunctionCall3(&(plan->args[i].out.d.typfunc),
CStringGetDatum(sv),
- ObjectIdGetDatum(plan->args[i].out.d.typioparam),
+ ObjectIdGetDatum(plan->args[i].out.d.typioparam),
Int32GetDatum(-1));
Py_DECREF(so);
for (i = 0; i < rows; i++)
{
PyObject *row = PLyDict_FromTuple(&args, tuptable->vals[i],
- tuptable->tupdesc);
+ tuptable->tupdesc);
PyList_SetItem(result->rows, i, row);
}
FlushErrorState();
if (!PyErr_Occurred())
PyErr_SetString(PLy_exc_error,
- "Unknown error in PLy_spi_execute_fetch_result");
+ "Unknown error in PLy_spi_execute_fetch_result");
Py_DECREF(result);
PLy_typeinfo_dealloc(&args);
return NULL;
Py_XDECREF(so);
/*
- * return a legal object so the interpreter will continue on its merry
- * way
+ * return a legal object so the interpreter will continue on its merry way
*/
Py_INCREF(Py_None);
return Py_None;
vstr = "Unknown";
/*
- * I'm not sure what to do if eob is NULL here -- we can't call
- * PLy_elog because that function calls us, so we could end up
- * with infinite recursion. I'm not even sure if eob could be
- * NULL here -- would an Assert() be more appropriate?
+ * I'm not sure what to do if eob is NULL here -- we can't call PLy_elog
+ * because that function calls us, so we could end up with infinite
+ * recursion. I'm not even sure if eob could be NULL here -- would an
+ * Assert() be more appropriate?
*/
estr = eob ? PyString_AsString(eob) : "Unknown Exception";
xstr = PLy_printf("%s: %s", estr, vstr);
* ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.97 2005/05/06 17:24:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.98 2005/10/15 02:49:50 momjian Exp $
*
**********************************************************************/
pfree(_pltcl_utf_dst); } while (0)
#define UTF_U2E(x) (_pltcl_utf_dst=utf_u2e(_pltcl_utf_src=(x)))
#define UTF_E2U(x) (_pltcl_utf_dst=utf_e2u(_pltcl_utf_src=(x)))
-
#else /* !PLTCL_UTF */
#define UTF_BEGIN
#define UTF_END
#define UTF_U2E(x) (x)
#define UTF_E2U(x) (x)
-
#endif /* PLTCL_UTF */
int nargs;
FmgrInfo arg_out_func[FUNC_MAX_ARGS];
bool arg_is_rowtype[FUNC_MAX_ARGS];
-} pltcl_proc_desc;
+} pltcl_proc_desc;
/**********************************************************************
Oid *argtypes;
FmgrInfo *arginfuncs;
Oid *argtypioparams;
-} pltcl_query_desc;
+} pltcl_query_desc;
/**********************************************************************
* Forward declarations
**********************************************************************/
static void pltcl_init_all(void);
-static void pltcl_init_interp(Tcl_Interp *interp);
+static void pltcl_init_interp(Tcl_Interp * interp);
-static void pltcl_init_load_unknown(Tcl_Interp *interp);
+static void pltcl_init_load_unknown(Tcl_Interp * interp);
Datum pltcl_call_handler(PG_FUNCTION_ARGS);
Datum pltclu_call_handler(PG_FUNCTION_ARGS);
static pltcl_proc_desc *compile_pltcl_function(Oid fn_oid, Oid tgreloid);
-static int pltcl_elog(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_elog(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_quote(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_quote(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_argisnull(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_argisnull(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_returnnull(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_returnnull(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_SPI_execute(ClientData cdata, Tcl_Interp *interp,
- int argc, CONST84 char *argv[]);
-static int pltcl_process_SPI_result(Tcl_Interp *interp,
- CONST84 char *arrayname,
- CONST84 char *loop_body,
- int spi_rc,
- SPITupleTable *tuptable,
- int ntuples);
-static int pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_SPI_execute(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp *interp,
- int argc, CONST84 char *argv[]);
-static int pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_process_SPI_result(Tcl_Interp * interp,
+ CONST84 char *arrayname,
+ CONST84 char *loop_body,
+ int spi_rc,
+ SPITupleTable *tuptable,
+ int ntuples);
+static int pltcl_SPI_prepare(ClientData cdata, Tcl_Interp * interp,
+ int argc, CONST84 char *argv[]);
+static int pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp * interp,
+ int argc, CONST84 char *argv[]);
+static int pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static void pltcl_set_tuple_values(Tcl_Interp *interp, CONST84 char *arrayname,
+static void pltcl_set_tuple_values(Tcl_Interp * interp, CONST84 char *arrayname,
int tupno, HeapTuple tuple, TupleDesc tupdesc);
static void pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc,
- Tcl_DString *retval);
+ Tcl_DString * retval);
/*
* pltcl_init_interp() - initialize a Tcl interpreter
**********************************************************************/
static void
-pltcl_init_interp(Tcl_Interp *interp)
+pltcl_init_interp(Tcl_Interp * interp)
{
/************************************************************
* Install the commands for SPI support in the interpreter
* table pltcl_modules (if it exists)
**********************************************************************/
static void
-pltcl_init_load_unknown(Tcl_Interp *interp)
+pltcl_init_load_unknown(Tcl_Interp * interp)
{
int spi_rc;
int tcl_rc;
PG_TRY();
{
/*
- * Determine if called as function or trigger and
- * call appropriate subhandler
+ * Determine if called as function or trigger and call appropriate
+ * subhandler
*/
if (CALLED_AS_TRIGGER(fcinfo))
{
UTF_BEGIN;
retval = FunctionCall3(&prodesc->result_in_func,
PointerGetDatum(UTF_U2E(interp->result)),
- ObjectIdGetDatum(prodesc->result_typioparam),
+ ObjectIdGetDatum(prodesc->result_typioparam),
Int32GetDatum(-1));
UTF_END;
}
/* Find or compile the function */
prodesc = compile_pltcl_function(fcinfo->flinfo->fn_oid,
- RelationGetRelid(trigdata->tg_relation));
+ RelationGetRelid(trigdata->tg_relation));
pltcl_current_prodesc = prodesc;
/* The oid of the trigger relation for argument TG_relid */
stroid = DatumGetCString(DirectFunctionCall1(oidout,
- ObjectIdGetDatum(trigdata->tg_relation->rd_id)));
+ ObjectIdGetDatum(trigdata->tg_relation->rd_id)));
Tcl_DStringAppendElement(&tcl_cmd, stroid);
pfree(stroid);
Tcl_DStringAppendElement(&tcl_trigtup, "");
else
Tcl_DStringAppendElement(&tcl_trigtup,
- NameStr(tupdesc->attrs[i]->attname));
+ NameStr(tupdesc->attrs[i]->attname));
}
Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_trigtup));
Tcl_DStringFree(&tcl_trigtup);
tupdesc, &tcl_trigtup);
/*
- * Now the command part of the event for TG_op and data for
- * NEW and OLD
+ * Now the command part of the event for TG_op and data for NEW
+ * and OLD
*/
if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
{
* for the input function
************************************************************/
typeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(tupdesc->attrs[attnum - 1]->atttypid),
+ ObjectIdGetDatum(tupdesc->attrs[attnum - 1]->atttypid),
0, 0, 0);
if (!HeapTupleIsValid(typeTup))
elog(ERROR, "cache lookup failed for type %u",
FunctionCall3(&finfo,
CStringGetDatum(UTF_U2E(ret_value)),
ObjectIdGetDatum(typioparam),
- Int32GetDatum(tupdesc->attrs[attnum - 1]->atttypmod));
+ Int32GetDatum(tupdesc->attrs[attnum - 1]->atttypmod));
UTF_END;
}
prodesc = (pltcl_proc_desc *) Tcl_GetHashValue(hashent);
uptodate = (prodesc->fn_xmin == HeapTupleHeaderGetXmin(procTup->t_data) &&
- prodesc->fn_cmin == HeapTupleHeaderGetCmin(procTup->t_data));
+ prodesc->fn_cmin == HeapTupleHeaderGetCmin(procTup->t_data));
if (!uptodate)
{
if (!is_trigger)
{
typeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(procStruct->prorettype),
+ ObjectIdGetDatum(procStruct->prorettype),
0, 0, 0);
if (!HeapTupleIsValid(typeTup))
{
free(prodesc);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("pltcl functions cannot return type %s",
- format_type_be(procStruct->prorettype))));
+ errmsg("pltcl functions cannot return type %s",
+ format_type_be(procStruct->prorettype))));
}
}
free(prodesc);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("pltcl functions cannot return tuples yet")));
+ errmsg("pltcl functions cannot return tuples yet")));
}
perm_fmgr_info(typeStruct->typinput, &(prodesc->result_in_func));
for (i = 0; i < prodesc->nargs; i++)
{
typeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(procStruct->proargtypes.values[i]),
+ ObjectIdGetDatum(procStruct->proargtypes.values[i]),
0, 0, 0);
if (!HeapTupleIsValid(typeTup))
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("pltcl functions cannot take type %s",
- format_type_be(procStruct->proargtypes.values[i]))));
+ format_type_be(procStruct->proargtypes.values[i]))));
}
if (typeStruct->typtype == 'c')
* pltcl_elog() - elog() support for PLTcl
**********************************************************************/
static int
-pltcl_elog(ClientData cdata, Tcl_Interp *interp,
+pltcl_elog(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
volatile int level;
* be used in SPI_execute query strings
**********************************************************************/
static int
-pltcl_quote(ClientData cdata, Tcl_Interp *interp,
+pltcl_quote(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
char *tmp;
* pltcl_argisnull() - determine if a specific argument is NULL
**********************************************************************/
static int
-pltcl_argisnull(ClientData cdata, Tcl_Interp *interp,
+pltcl_argisnull(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
int argno;
* pltcl_returnnull() - Cause a NULL return from a function
**********************************************************************/
static int
-pltcl_returnnull(ClientData cdata, Tcl_Interp *interp,
+pltcl_returnnull(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
FunctionCallInfo fcinfo = pltcl_current_fcinfo;
CurrentResourceOwner = oldowner;
/*
- * AtEOSubXact_SPI() should not have popped any SPI context,
- * but just in case it did, make sure we remain connected.
+ * AtEOSubXact_SPI() should not have popped any SPI context, but just in
+ * case it did, make sure we remain connected.
*/
SPI_restore_connection();
}
static void
-pltcl_subtrans_abort(Tcl_Interp *interp,
+pltcl_subtrans_abort(Tcl_Interp * interp,
MemoryContext oldcontext, ResourceOwner oldowner)
{
ErrorData *edata;
CurrentResourceOwner = oldowner;
/*
- * If AtEOSubXact_SPI() popped any SPI context of the subxact,
- * it will have left us in a disconnected state. We need this
- * hack to return to connected state.
+ * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will
+ * have left us in a disconnected state. We need this hack to return to
+ * connected state.
*/
SPI_restore_connection();
* for the Tcl interpreter
**********************************************************************/
static int
-pltcl_SPI_execute(ClientData cdata, Tcl_Interp *interp,
+pltcl_SPI_execute(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
int my_rc;
* Shared code between pltcl_SPI_execute and pltcl_SPI_execute_plan
*/
static int
-pltcl_process_SPI_result(Tcl_Interp *interp,
+pltcl_process_SPI_result(Tcl_Interp * interp,
CONST84 char *arrayname,
CONST84 char *loop_body,
int spi_rc,
break;
case SPI_OK_SELECT:
+
/*
* Process the tuples we got
*/
if (loop_body == NULL)
{
/*
- * If there is no loop body given, just set the variables
- * from the first tuple (if any)
+ * If there is no loop body given, just set the variables from
+ * the first tuple (if any)
*/
if (ntuples > 0)
pltcl_set_tuple_values(interp, arrayname, 0,
else
{
/*
- * There is a loop body - process all tuples and evaluate
- * the body on each
+ * There is a loop body - process all tuples and evaluate the
+ * body on each
*/
for (i = 0; i < ntuples; i++)
{
* and not save the plan currently.
**********************************************************************/
static int
-pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp,
+pltcl_SPI_prepare(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
int nargs;
* pltcl_SPI_execute_plan() - Execute a prepared plan
**********************************************************************/
static int
-pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp *interp,
+pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
int my_rc;
if (strlen(nulls) != qdesc->nargs)
{
Tcl_SetResult(interp,
- "length of nulls string doesn't match # of arguments",
+ "length of nulls string doesn't match # of arguments",
TCL_VOLATILE);
return TCL_ERROR;
}
if (callnargs != qdesc->nargs)
{
Tcl_SetResult(interp,
- "argument list length doesn't match # of arguments for query",
+ "argument list length doesn't match # of arguments for query",
TCL_VOLATILE);
ckfree((char *) callargs);
return TCL_ERROR;
* be used after insert queries
**********************************************************************/
static int
-pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp *interp,
+pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
char buf[64];
* of a given tuple
**********************************************************************/
static void
-pltcl_set_tuple_values(Tcl_Interp *interp, CONST84 char *arrayname,
+pltcl_set_tuple_values(Tcl_Interp * interp, CONST84 char *arrayname,
int tupno, HeapTuple tuple, TupleDesc tupdesc)
{
int i;
* for the output function
************************************************************/
typeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(tupdesc->attrs[i]->atttypid),
+ ObjectIdGetDatum(tupdesc->attrs[i]->atttypid),
0, 0, 0);
if (!HeapTupleIsValid(typeTup))
elog(ERROR, "cache lookup failed for type %u",
**********************************************************************/
static void
pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc,
- Tcl_DString *retval)
+ Tcl_DString * retval)
{
int i;
char *outputstr;
* for the output function
************************************************************/
typeTup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(tupdesc->attrs[i]->atttypid),
+ ObjectIdGetDatum(tupdesc->attrs[i]->atttypid),
0, 0, 0);
if (!HeapTupleIsValid(typeTup))
elog(ERROR, "cache lookup failed for type %u",
* as a service.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/copydir.c,v 1.14 2005/09/03 15:55:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/copydir.c,v 1.15 2005/10/15 02:49:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
while ((xlde = ReadDir(xldir, fromdir)) != NULL)
{
- struct stat fst;
+ struct stat fst;
- if (strcmp(xlde->d_name, ".") == 0 ||
+ if (strcmp(xlde->d_name, ".") == 0 ||
strcmp(xlde->d_name, "..") == 0)
- continue;
+ continue;
snprintf(fromfile, MAXPGPATH, "%s/%s", fromdir, xlde->d_name);
snprintf(tofile, MAXPGPATH, "%s/%s", todir, xlde->d_name);
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
static char sccsid[] = "@(#)crypt.c 8.1.1.1 (Berkeley) 8/18/93";
-
#else
__RCSID("$NetBSD: crypt.c,v 1.18 2001/03/01 14:37:35 wiz Exp $");
#endif
/* ===== (mostly) Standard DES Tables ==================== */
-static const unsigned char IP[] = { /* initial permutation */
+static const unsigned char IP[] = { /* initial permutation */
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
0, 0, 46, 42, 50, 36, 29, 32,
};
-static const unsigned char S[8][64] = { /* 48->32 bit substitution tables */
+static const unsigned char S[8][64] = { /* 48->32 bit substitution tables */
/* S[1] */
{14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
22, 11, 4, 25,
};
-static const unsigned char CIFP[] = { /* compressed/interleaved permutation */
+static const unsigned char CIFP[] = { /* compressed/interleaved permutation */
1, 2, 3, 4, 17, 18, 19, 20,
5, 6, 7, 8, 21, 22, 23, 24,
9, 10, 11, 12, 25, 26, 27, 28,
45, 46, 47, 48, 61, 62, 63, 64,
};
-static const unsigned char itoa64[] = /* 0..63 => ascii-64 */
+static const unsigned char itoa64[] = /* 0..63 => ascii-64 */
"./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
key++;
keyblock.b[i] = t;
}
- if (des_setkey((char *) keyblock.b)) /* also initializes
- * "a64toi" */
+ if (des_setkey((char *) keyblock.b)) /* also initializes "a64toi" */
return (NULL);
encp = &cryptresult[0];
int tableno;
static unsigned char perm[64],
tmp32[32]; /* "static" for speed */
+
/* static volatile long init_start = 0; not used */
/*
* Win32 (NT, Win2k, XP). replace() doesn't work on Win95/98/Me.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/dirmod.c,v 1.40 2005/09/18 09:48:24 petere Exp $
+ * $PostgreSQL: pgsql/src/port/dirmod.c,v 1.41 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define palloc(sz) pgport_palloc(sz)
#define pstrdup(str) pgport_pstrdup(str)
#endif
-
-#else /* FRONTEND */
+#else /* FRONTEND */
/*
* In frontend, fake palloc behavior with these
}
return res;
}
-
-#endif /* FRONTEND */
+#endif /* FRONTEND */
#if defined(WIN32) || defined(__CYGWIN__)
int loops = 0;
/*
- * We need these loops because even though PostgreSQL uses flags
- * that allow rename while the file is open, other applications
- * might have these files open without those flags.
+ * We need these loops because even though PostgreSQL uses flags that
+ * allow rename while the file is open, other applications might have
+ * these files open without those flags.
*/
#if defined(WIN32) && !defined(__CYGWIN__)
while (!MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING))
int loops = 0;
/*
- * We need these loops because even though PostgreSQL uses flags
- * that allow unlink while the file is open, other applications
- * might have these files open without those flags.
+ * We need these loops because even though PostgreSQL uses flags that
+ * allow unlink while the file is open, other applications might have
+ * these files open without those flags.
*/
while (unlink(path))
{
}
-#ifdef WIN32 /* Cygwin has its own symlinks */
+#ifdef WIN32 /* Cygwin has its own symlinks */
/*
* pgsymlink support:
CreateDirectory(newpath, 0);
dirhandle = CreateFile(newpath, GENERIC_READ | GENERIC_WRITE,
0, 0, OPEN_EXISTING,
- FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, 0);
+ FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, 0);
if (dirhandle == INVALID_HANDLE_VALUE)
return -1;
reparseBuf->PathBuffer, MAX_PATH);
/*
- * FSCTL_SET_REPARSE_POINT is coded differently depending on SDK
- * version; we use our own definition
+ * FSCTL_SET_REPARSE_POINT is coded differently depending on SDK version;
+ * we use our own definition
*/
if (!DeviceIoControl(dirhandle,
- CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 41, METHOD_BUFFERED, FILE_ANY_ACCESS),
+ CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 41, METHOD_BUFFERED, FILE_ANY_ACCESS),
reparseBuf,
- reparseBuf->ReparseDataLength + REPARSE_JUNCTION_DATA_BUFFER_HEADER_SIZE,
+ reparseBuf->ReparseDataLength + REPARSE_JUNCTION_DATA_BUFFER_HEADER_SIZE,
0, 0, &len, 0))
{
LPSTR msg;
return 0;
}
-
-#endif /* WIN32 */
-
-#endif /* defined(WIN32) || defined(__CYGWIN__) */
+#endif /* WIN32 */
+#endif /* defined(WIN32) || defined(__CYGWIN__) */
/* We undefined this above, so we redefine it */
/*
* fnames
*
- * return a list of the names of objects in the argument directory
+ * return a list of the names of objects in the argument directory
*/
static char **
fnames(char *path)
{
if (strcmp(file->d_name, ".") != 0 && strcmp(file->d_name, "..") != 0)
{
- if (numnames+1 >= fnsize)
+ if (numnames + 1 >= fnsize)
{
fnsize *= 2;
filenames = (char **) repalloc(filenames,
#ifdef WIN32
/*
- * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- * not in released version
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but not in
+ * released version
*/
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
struct stat statbuf;
/*
- * we copy all the names out of the directory before we start
- * modifying it.
+ * we copy all the names out of the directory before we start modifying
+ * it.
*/
filenames = fnames(path);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/exec.c,v 1.38 2005/02/22 04:43:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/exec.c,v 1.39 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
struct passwd *pwp;
int i;
int in_grp = 0;
-
#else
char path_exe[MAXPGPATH + sizeof(".exe") - 1];
#endif
/*
* Ensure that the file exists and is a regular file.
*
- * XXX if you have a broken system where stat() looks at the symlink
- * instead of the underlying file, you lose.
+ * XXX if you have a broken system where stat() looks at the symlink instead
+ * of the underlying file, you lose.
*/
if (stat(path, &buf) < 0)
return -1;
#endif
/*
- * Since no explicit path was supplied, the user must have
- * been relying on PATH. We'll search the same PATH.
+ * Since no explicit path was supplied, the user must have been relying on
+ * PATH. We'll search the same PATH.
*/
if ((path = getenv("PATH")) && *path)
{
switch (validate_exec(retpath))
{
- case 0: /* found ok */
+ case 0: /* found ok */
return resolve_symlinks(retpath);
case -1: /* wasn't even a candidate, keep looking */
break;
char *fname;
/*
- * To resolve a symlink properly, we have to chdir into its directory
- * and then chdir to where the symlink points; otherwise we may fail to
+ * To resolve a symlink properly, we have to chdir into its directory and
+ * then chdir to where the symlink points; otherwise we may fail to
* resolve relative links correctly (consider cases involving mount
* points, for example). After following the final symlink, we use
* getcwd() to figure out where the heck we're at.
*
- * One might think we could skip all this if path doesn't point to a
- * symlink to start with, but that's wrong. We also want to get rid
- * of any directory symlinks that are present in the given path.
- * We expect getcwd() to give us an accurate, symlink-free path.
+ * One might think we could skip all this if path doesn't point to a symlink
+ * to start with, but that's wrong. We also want to get rid of any
+ * directory symlinks that are present in the given path. We expect
+ * getcwd() to give us an accurate, symlink-free path.
*/
if (!getcwd(orig_wd, MAXPGPATH))
{
for (;;)
{
- char *lsep;
- int rllen;
+ char *lsep;
+ int rllen;
lsep = last_dir_separator(path);
if (lsep)
log_error(_("could not change directory to \"%s\""), orig_wd);
return -1;
}
-
-#endif /* HAVE_READLINK */
+#endif /* HAVE_READLINK */
return 0;
}
return NULL;
return line;
-
-#else /* WIN32 */
+#else /* WIN32 */
SECURITY_ATTRIBUTES sattr;
HANDLE childstdoutrd,
&pi))
{
/* Successfully started the process */
- char *lineptr;
+ char *lineptr;
ZeroMemory(line, maxsize);
/* Try to read at least one line from the pipe */
/* This may require more than one wait/read attempt */
- for (lineptr = line; lineptr < line+maxsize-1; )
+ for (lineptr = line; lineptr < line + maxsize - 1;)
{
DWORD bytesread = 0;
/* Let's see if we can read */
if (WaitForSingleObject(childstdoutrddup, 10000) != WAIT_OBJECT_0)
- break; /* Timeout, but perhaps we got a line already */
+ break; /* Timeout, but perhaps we got a line already */
- if (!ReadFile(childstdoutrddup, lineptr, maxsize-(lineptr-line),
+ if (!ReadFile(childstdoutrddup, lineptr, maxsize - (lineptr - line),
&bytesread, NULL))
- break; /* Error, but perhaps we got a line already */
+ break; /* Error, but perhaps we got a line already */
lineptr += strlen(lineptr);
if (!bytesread)
- break; /* EOF */
+ break; /* EOF */
if (strchr(line, '\n'))
- break; /* One or more lines read */
+ break; /* One or more lines read */
}
if (lineptr != line)
int len;
/* If we got more than one line, cut off after the first \n */
- lineptr = strchr(line,'\n');
+ lineptr = strchr(line, '\n');
if (lineptr)
- *(lineptr+1) = '\0';
+ *(lineptr + 1) = '\0';
len = strlen(line);
/*
* If EOL is \r\n, convert to just \n. Because stdout is a
* text-mode stream, the \n output by the child process is
- * received as \r\n, so we convert it to \n. The server
- * main.c sets setvbuf(stdout, NULL, _IONBF, 0) which has the
- * effect of disabling \n to \r\n expansion for stdout.
+ * received as \r\n, so we convert it to \n. The server main.c
+ * sets setvbuf(stdout, NULL, _IONBF, 0) which has the effect of
+ * disabling \n to \r\n expansion for stdout.
*/
if (len >= 2 && line[len - 2] == '\r' && line[len - 1] == '\n')
{
}
/*
- * We emulate fgets() behaviour. So if there is no newline at
- * the end, we add one...
+ * We emulate fgets() behaviour. So if there is no newline at the
+ * end, we add one...
*/
if (len == 0 || line[len - 1] != '\n')
strcat(line, "\n");
CloseHandle(childstdoutrddup);
return retval;
-#endif /* WIN32 */
+#endif /* WIN32 */
}
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/getaddrinfo.c,v 1.20 2005/10/13 23:22:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/getaddrinfo.c,v 1.21 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Here we need to declare what the function pointers look like
*/
typedef int (__stdcall * getaddrinfo_ptr_t) (const char *nodename,
- const char *servname,
- const struct addrinfo * hints,
- struct addrinfo ** res);
+ const char *servname,
+ const struct addrinfo * hints,
+ struct addrinfo ** res);
typedef void (__stdcall * freeaddrinfo_ptr_t) (struct addrinfo * ai);
typedef int (__stdcall * getnameinfo_ptr_t) (const struct sockaddr * sa,
- int salen,
- char *host, int hostlen,
- char *serv, int servlen,
- int flags);
+ int salen,
+ char *host, int hostlen,
+ char *serv, int servlen,
+ int flags);
/* static pointers to the native routines, so we only do the lookup once. */
static getaddrinfo_ptr_t getaddrinfo_ptr = NULL;
return (getaddrinfo_ptr != NULL);
/*
- * For Windows XP and Windows 2003 (and longhorn/vista), the IPv6
- * routines are present in the WinSock 2 library (ws2_32.dll).
- * Try that first
+ * For Windows XP and Windows 2003 (and longhorn/vista), the IPv6 routines
+ * are present in the WinSock 2 library (ws2_32.dll). Try that first
*/
hLibrary = LoadLibraryA("ws2_32");
getaddrinfo_ptr = (getaddrinfo_ptr_t) GetProcAddress(hLibrary,
"getaddrinfo");
freeaddrinfo_ptr = (freeaddrinfo_ptr_t) GetProcAddress(hLibrary,
- "freeaddrinfo");
+ "freeaddrinfo");
getnameinfo_ptr = (getnameinfo_ptr_t) GetProcAddress(hLibrary,
"getnameinfo");
alreadyLookedForIpv6routines = true;
return (getaddrinfo_ptr != NULL);
}
-
#endif
struct addrinfo hints;
#ifdef WIN32
+
/*
* If Windows has native IPv6 support, use the native Windows routine.
* Otherwise, fall through and use our own code.
if (res)
{
#ifdef WIN32
+
/*
* If Windows has native IPv6 support, use the native Windows routine.
* Otherwise, fall through and use our own code.
}
return hstrerror(hcode);
-
-#else /* !HAVE_HSTRERROR */
+#else /* !HAVE_HSTRERROR */
switch (errcode)
{
return "Unknown host";
case EAI_AGAIN:
return "Host name lookup failure";
- /* Errors below are probably WIN32 only */
+ /* Errors below are probably WIN32 only */
#ifdef EAI_BADFLAGS
case EAI_BADFLAGS:
return "Invalid argument";
char *service, int servicelen, int flags)
{
#ifdef WIN32
+
/*
* If Windows has native IPv6 support, use the native Windows routine.
* Otherwise, fall through and use our own code.
!(oli = strchr(ostr, optopt)))
{
/*
- * if the user didn't specify '-' as an option, assume it means
- * -1.
+ * if the user didn't specify '-' as an option, assume it means -1.
*/
if (optopt == (int) '-')
return -1;
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/port/getopt_long.c,v 1.4 2005/07/28 04:03:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/getopt_long.c,v 1.5 2005/10/15 02:49:51 momjian Exp $
*/
#include "c.h"
return BADARG;
if (opterr)
fprintf(stderr,
- "%s: option requires an argument -- %s\n",
+ "%s: option requires an argument -- %s\n",
argv[0], place);
place = EMSG;
optind++;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/getrusage.c,v 1.10 2005/07/28 04:03:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/getrusage.c,v 1.11 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
#ifdef WIN32
- FILETIME starttime;
- FILETIME exittime;
- FILETIME kerneltime;
- FILETIME usertime;
+ FILETIME starttime;
+ FILETIME exittime;
+ FILETIME kerneltime;
+ FILETIME usertime;
ULARGE_INTEGER li;
- if (rusage == (struct rusage *)NULL)
+ if (rusage == (struct rusage *) NULL)
{
errno = EFAULT;
return -1;
/* Convert FILETIMEs (0.1 us) to struct timeval */
memcpy(&li, &kerneltime, sizeof(FILETIME));
- li.QuadPart /= 10L; /* Convert to microseconds */
- rusage->ru_stime.tv_sec = li.QuadPart / 1000000L;
+ li.QuadPart /= 10L; /* Convert to microseconds */
+ rusage->ru_stime.tv_sec = li.QuadPart / 1000000L;
rusage->ru_stime.tv_usec = li.QuadPart % 1000000L;
memcpy(&li, &usertime, sizeof(FILETIME));
- li.QuadPart /= 10L; /* Convert to microseconds */
- rusage->ru_utime.tv_sec = li.QuadPart / 1000000L;
+ li.QuadPart /= 10L; /* Convert to microseconds */
+ rusage->ru_utime.tv_sec = li.QuadPart / 1000000L;
rusage->ru_utime.tv_usec = li.QuadPart % 1000000L;
-
-#else /* all but WIN32 */
+#else /* all but WIN32 */
struct tms tms;
int tick_rate = CLK_TCK; /* ticks per second */
rusage->ru_utime.tv_usec = TICK_TO_USEC(u, tick_rate);
rusage->ru_stime.tv_sec = TICK_TO_SEC(s, tick_rate);
rusage->ru_stime.tv_usec = TICK_TO_USEC(u, tick_rate);
-
-#endif /* WIN32 */
+#endif /* WIN32 */
return 0;
}
-/* $PostgreSQL: pgsql/src/port/inet_aton.c,v 1.7 2004/09/27 23:24:45 momjian Exp $
+/* $PostgreSQL: pgsql/src/port/inet_aton.c,v 1.8 2005/10/15 02:49:51 momjian Exp $
*
* This inet_aton() function was taken from the GNU C library and
* incorporated into Postgres for those systems which do not have this
for (;;)
{
/*
- * Collect number up to ``.''. Values are specified as for C:
- * 0x=hex, 0=octal, other=decimal.
+ * Collect number up to ``.''. Values are specified as for C: 0x=hex,
+ * 0=octal, other=decimal.
*/
val = 0;
base = 10;
if (*cp == '.')
{
/*
- * Internet format: a.b.c.d a.b.c (with c treated as
- * 16-bits) a.b (with b treated as 24 bits)
+ * Internet format: a.b.c.d a.b.c (with c treated as 16-bits)
+ * a.b (with b treated as 24 bits)
*/
if (pp >= parts + 3 || val > 0xff)
return 0;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/isinf.c,v 1.6 2004/12/31 22:03:53 pgsql Exp $
+ * $PostgreSQL: pgsql/src/port/isinf.c,v 1.7 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include
#include
-#if HAVE_FPCLASS /* this is _not_ HAVE_FP_CLASS, and not
- * typo */
+#if HAVE_FPCLASS /* this is _not_ HAVE_FP_CLASS, and not typo */
#if HAVE_IEEEFP_H
#include
}
return 0;
}
-
#else
#if defined(HAVE_FP_CLASS) || defined(HAVE_FP_CLASS_D)
{
#if HAVE_FP_CLASS
int fpclass = fp_class(x);
-
#else
int fpclass = fp_class_d(x);
#endif
return -1;
return 0;
}
-
#elif defined(HAVE_CLASS)
int
isinf(double x)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/memcmp.c,v 1.8 2005/07/28 04:03:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/memcmp.c,v 1.9 2005/10/15 02:49:51 momjian Exp $
*
* This file was taken from NetBSD and is used by SunOS because memcmp
* on that platform does not properly compare negative bytes. The
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/noblock.c,v 1.7 2005/07/28 04:03:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/noblock.c,v 1.8 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
pg_set_block(int sock)
{
#if !defined(WIN32) && !defined(__BEOS__)
- int flags;
+ int flags;
+
flags = fcntl(sock, F_GETFL);
if (flags < 0 || fcntl(sock, F_SETFL, (long) (flags & ~O_NONBLOCK)))
return false;
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/port/open.c,v 1.10 2005/07/28 04:03:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/open.c,v 1.11 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include
-int win32_open(const char *fileName, int fileFlags, ...);
+int win32_open(const char *fileName, int fileFlags,...);
static int
openFlagsToCreateFileFlags(int openFlags)
assert((fileFlags & ((O_RDONLY | O_WRONLY | O_RDWR) | O_APPEND |
(O_RANDOM | O_SEQUENTIAL | O_TEMPORARY) |
_O_SHORT_LIVED | O_DSYNC |
- (O_CREAT | O_TRUNC | O_EXCL) | (O_TEXT | O_BINARY))) == fileFlags);
+ (O_CREAT | O_TRUNC | O_EXCL) | (O_TEXT | O_BINARY))) == fileFlags);
sa.nLength = sizeof(sa);
sa.bInheritHandle = TRUE;
if ((h = CreateFile(fileName,
/* cannot use O_RDONLY, as it == 0 */
- (fileFlags & O_RDWR) ? (GENERIC_WRITE | GENERIC_READ) :
- ((fileFlags & O_WRONLY) ? GENERIC_WRITE : GENERIC_READ),
- /* These flags allow concurrent rename/unlink */
- (FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE),
+ (fileFlags & O_RDWR) ? (GENERIC_WRITE | GENERIC_READ) :
+ ((fileFlags & O_WRONLY) ? GENERIC_WRITE : GENERIC_READ),
+ /* These flags allow concurrent rename/unlink */
+ (FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE),
&sa,
openFlagsToCreateFileFlags(fileFlags),
FILE_ATTRIBUTE_NORMAL |
- ((fileFlags & O_RANDOM) ? FILE_FLAG_RANDOM_ACCESS : 0) |
- ((fileFlags & O_SEQUENTIAL) ? FILE_FLAG_SEQUENTIAL_SCAN : 0) |
- ((fileFlags & _O_SHORT_LIVED) ? FILE_ATTRIBUTE_TEMPORARY : 0) |
- ((fileFlags & O_TEMPORARY) ? FILE_FLAG_DELETE_ON_CLOSE : 0)|
- ((fileFlags & O_DSYNC) ? FILE_FLAG_WRITE_THROUGH : 0),
+ ((fileFlags & O_RANDOM) ? FILE_FLAG_RANDOM_ACCESS : 0) |
+ ((fileFlags & O_SEQUENTIAL) ? FILE_FLAG_SEQUENTIAL_SCAN : 0) |
+ ((fileFlags & _O_SHORT_LIVED) ? FILE_ATTRIBUTE_TEMPORARY : 0) |
+ ((fileFlags & O_TEMPORARY) ? FILE_FLAG_DELETE_ON_CLOSE : 0) |
+ ((fileFlags & O_DSYNC) ? FILE_FLAG_WRITE_THROUGH : 0),
NULL)) == INVALID_HANDLE_VALUE)
{
switch (GetLastError())
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/path.c,v 1.60 2005/10/13 15:37:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/path.c,v 1.61 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#endif
static void make_relative_path(char *ret_path, const char *target_path,
- const char *bin_path, const char *my_exec_path);
+ const char *bin_path, const char *my_exec_path);
static void trim_directory(char *path);
static void trim_trailing_separator(char *path);
/*
* skip_drive
*
- * On Windows, a path may begin with "C:" or "//network/". Advance over
+ * On Windows, a path may begin with "C:" or "//network/". Advance over
* this and point to the effective start of the path.
*/
#ifdef WIN32
}
return (char *) path;
}
-
#else
#define skip_drive(path) (path)
-
#endif
/*
{
if (ret_path != head)
StrNCpy(ret_path, head, MAXPGPATH);
+
/*
- * Remove any leading "." and ".." in the tail component,
- * adjusting head as needed.
+ * Remove any leading "." and ".." in the tail component, adjusting head
+ * as needed.
*/
for (;;)
{
void
canonicalize_path(char *path)
{
- char *p, *to_p;
+ char *p,
+ *to_p;
char *spath;
bool was_sep = false;
int pending_strips;
#ifdef WIN32
+
/*
- * The Windows command processor will accept suitably quoted paths
- * with forward slashes, but barfs badly with mixed forward and back
- * slashes.
+ * The Windows command processor will accept suitably quoted paths with
+ * forward slashes, but barfs badly with mixed forward and back slashes.
*/
for (p = path; *p; p++)
{
}
/*
- * In Win32, if you do: prog.exe "a b" "\c\d\" the system will pass
- * \c\d" as argv[2], so trim off trailing quote.
+ * In Win32, if you do: prog.exe "a b" "\c\d\" the system will pass \c\d"
+ * as argv[2], so trim off trailing quote.
*/
if (p > path && *(p - 1) == '"')
*(p - 1) = '/';
#endif
/*
- * Removing the trailing slash on a path means we never get ugly
- * double trailing slashes. Also, Win32 can't stat() a directory
- * with a trailing slash. Don't remove a leading slash, though.
+ * Removing the trailing slash on a path means we never get ugly double
+ * trailing slashes. Also, Win32 can't stat() a directory with a trailing
+ * slash. Don't remove a leading slash, though.
*/
trim_trailing_separator(path);
/*
- * Remove duplicate adjacent separators
+ * Remove duplicate adjacent separators
*/
p = path;
#ifdef WIN32
/*
* Remove any trailing uses of "." and process ".." ourselves
*
- * Note that "/../.." should reduce to just "/", while "../.." has to
- * be kept as-is. In the latter case we put back mistakenly trimmed
- * ".." components below. Also note that we want a Windows drive spec
- * to be visible to trim_directory(), but it's not part of the logic
- * that's looking at the name components; hence distinction between
- * path and spath.
+ * Note that "/../.." should reduce to just "/", while "../.." has to be kept
+ * as-is. In the latter case we put back mistakenly trimmed ".."
+ * components below. Also note that we want a Windows drive spec to be
+ * visible to trim_directory(), but it's not part of the logic that's
+ * looking at the name components; hence distinction between path and
+ * spath.
*/
spath = skip_drive(path);
pending_strips = 0;
if (pending_strips > 0)
{
/*
- * We could only get here if path is now totally empty (other than
- * a possible drive specifier on Windows).
- * We have to put back one or more ".."'s that we took off.
+ * We could only get here if path is now totally empty (other than a
+ * possible drive specifier on Windows). We have to put back one or
+ * more ".."'s that we took off.
*/
while (--pending_strips > 0)
strcat(path, "../");
bool
path_contains_parent_reference(const char *path)
{
- int path_len;
+ int path_len;
path = skip_drive(path); /* C: shouldn't affect our conclusion */
path_len = strlen(path);
/*
- * ".." could be the whole path; otherwise, if it's present it must
- * be at the beginning, in the middle, or at the end.
+ * ".." could be the whole path; otherwise, if it's present it must be at
+ * the beginning, in the middle, or at the end.
*/
if (strcmp(path, "..") == 0 ||
strncmp(path, "../", 3) == 0 ||
bool
path_is_prefix_of_path(const char *path1, const char *path2)
{
- int path1_len = strlen(path1);
+ int path1_len = strlen(path1);
if (strncmp(path1, path2, path1_len) == 0 &&
(IS_DIR_SEP(path2[path1_len]) || path2[path1_len] == '\0'))
}
/*
- * Extracts the actual name of the program as called -
+ * Extracts the actual name of the program as called -
* stripped of .exe suffix if any
*/
const char *
#if defined(__CYGWIN__) || defined(WIN32)
/* strip .exe suffix, regardless of case */
if (strlen(nodir_name) > sizeof(EXE) - 1 &&
- pg_strcasecmp(nodir_name + strlen(nodir_name)-(sizeof(EXE)-1), EXE) == 0)
+ pg_strcasecmp(nodir_name + strlen(nodir_name) - (sizeof(EXE) - 1), EXE) == 0)
{
- char *progname;
+ char *progname;
progname = strdup(nodir_name); /* leaks memory, but called only once */
if (progname == NULL)
{
fprintf(stderr, "%s: out of memory\n", nodir_name);
- exit(1); /* This could exit the postmaster */
+ exit(1); /* This could exit the postmaster */
}
progname[strlen(progname) - (sizeof(EXE) - 1)] = '\0';
- nodir_name = progname;
+ nodir_name = progname;
}
#endif
* bin_path, then we build the result as my_exec_path (less the executable
* name and last directory) joined to the non-matching part of target_path.
* Otherwise, we return target_path as-is.
- *
+ *
* For example:
* target_path = '/usr/local/share/postgresql'
- * bin_path = '/usr/local/bin'
+ * bin_path = '/usr/local/bin'
* my_exec_path = '/opt/pgsql/bin/postmaster'
* Given these inputs we would return '/opt/pgsql/share/postgresql'
*/
return false;
StrNCpy(ret_path, pwd->pw_dir, MAXPGPATH);
return true;
-
#else
char tmppath[MAX_PATH];
* Add do ... while() macro fix
* Remove __inline, _DIAGASSERTs, __P
*
- * $PostgreSQL: pgsql/src/port/qsort.c,v 1.7 2005/07/28 04:03:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/qsort.c,v 1.8 2005/10/15 02:49:51 momjian Exp $
*/
/* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* causing nasty effects.
**************************************************************/
-/*static char _id[] = "$PostgreSQL: pgsql/src/port/snprintf.c,v 1.28 2005/07/28 04:03:14 tgl Exp $";*/
+/*static char _id[] = "$PostgreSQL: pgsql/src/port/snprintf.c,v 1.29 2005/10/15 02:49:51 momjian Exp $";*/
static void dopr(char *buffer, const char *format, va_list args, char *end);
va_start(args, fmt);
len = pg_vsnprintf(buffer, (size_t) 4096, fmt, args);
va_end(args);
-
+
for (p = buffer; *p; p++)
putchar(*p);
return len;
}
-static int adjust_sign(int is_negative, int forcesign, int *signvalue);
+static int adjust_sign(int is_negative, int forcesign, int *signvalue);
static void adjust_padlen(int minlen, int vallen, int leftjust, int *padlen);
static void leading_pad(int zpad, int *signvalue, int *padlen, char *end,
- char **output);
+ char **output);
static void trailing_pad(int *padlen, char *end, char **output);
static void fmtstr(char *value, int leftjust, int minlen, int maxwidth,
static void fmtint(int64 value, int base, int dosign, int forcesign,
int leftjust, int minlen, int zpad, char *end, char **output);
static void fmtfloat(double value, char type, int forcesign,
- int leftjust, int minlen, int zpad, int precision, int pointflag, char *end,
- char **output);
+ int leftjust, int minlen, int zpad, int precision, int pointflag, char *end,
+ char **output);
static void dostr(char *str, int cut, char *end, char **output);
static void dopr_outch(int c, char *end, char **output);
} *fmtpar, **fmtparptr;
/*
- * Create enough structures to hold all arguments. This overcounts,
- * eg not all '*' characters are necessarily arguments, but it's not
- * worth being exact.
+ * Create enough structures to hold all arguments. This overcounts, eg
+ * not all '*' characters are necessarily arguments, but it's not worth
+ * being exact.
*/
for (p = format; *p != '\0'; p++)
if (*p == '%' || *p == '*')
break;
case FMTLEN:
{
- int minlen = va_arg(args, int);
- int leftjust = 0;
+ int minlen = va_arg(args, int);
+ int leftjust = 0;
if (minlen < 0)
{
break;
case FMTFLOAT:
fmtfloat(fmtparptr[i]->fvalue, fmtparptr[i]->type,
- fmtparptr[i]->forcesign, fmtparptr[i]->leftjust,
- fmtparptr[i]->minlen, fmtparptr[i]->zpad,
- fmtparptr[i]->precision, fmtparptr[i]->pointflag,
- end, &output);
+ fmtparptr[i]->forcesign, fmtparptr[i]->leftjust,
+ fmtparptr[i]->minlen, fmtparptr[i]->zpad,
+ fmtparptr[i]->precision, fmtparptr[i]->pointflag,
+ end, &output);
break;
case FMTCHAR:
dopr_outch(fmtparptr[i]->charvalue, end, &output);
/* Handle +/- and %X (uppercase hex) */
if (dosign && adjust_sign((value < 0), forcesign, &signvalue))
- value = -value;
+ value = -value;
if (base < 0)
{
caps = 1;
adjust_padlen(minlen, vallen, leftjust, &padlen);
leading_pad(zpad, &signvalue, &padlen, end, output);
-
+
while (vallen > 0)
dopr_outch(convert[--vallen], end, output);
sprintf(fmt, "%%%c", type);
if (adjust_sign((value < 0), forcesign, &signvalue))
- value = -value;
+ value = -value;
vallen = sprintf(convert, fmt, value);
{
dopr_outch(*signvalue, end, output);
if (*padlen > 0)
- --*padlen;
+ --* padlen;
if (padlen < 0)
++padlen;
}
++*padlen;
}
}
-
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/sprompt.c,v 1.11 2005/02/22 04:43:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/sprompt.c,v 1.12 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifdef HAVE_TERMIOS_H
struct termios t_orig,
t;
-
#else
#ifdef WIN32
HANDLE t = NULL;
prompt_state = true; /* disable SIGINT */
/*
- * Do not try to collapse these into one "w+" mode file. Doesn't work
- * on some platforms (eg, HPUX 10.20).
+ * Do not try to collapse these into one "w+" mode file. Doesn't work on
+ * some platforms (eg, HPUX 10.20).
*/
termin = fopen("/dev/tty", "r");
termout = fopen("/dev/tty", "w");
/*
* Compute the cutoff value between legal numbers and illegal numbers.
- * That is the largest legal value, divided by the base. An input
- * number that is greater than this value, if followed by a legal
- * input character, is too big. One that is equal to this value may
- * be valid or not; the limit between valid and invalid numbers is
- * then based on the last digit. For instance, if the range for longs
- * is [-2147483648..2147483647] and the input base is 10, cutoff will
- * be set to 214748364 and cutlim to either 7 (neg==0) or 8 (neg==1),
- * meaning that if we have accumulated a value > 214748364, or equal
- * but the next digit is > 7 (or 8), the number is too big, and we
- * will return a range error.
+ * That is the largest legal value, divided by the base. An input number
+ * that is greater than this value, if followed by a legal input
+ * character, is too big. One that is equal to this value may be valid or
+ * not; the limit between valid and invalid numbers is then based on the
+ * last digit. For instance, if the range for longs is
+ * [-2147483648..2147483647] and the input base is 10, cutoff will be set
+ * to 214748364 and cutlim to either 7 (neg==0) or 8 (neg==1), meaning
+ * that if we have accumulated a value > 214748364, or equal but the next
+ * digit is > 7 (or 8), the number is too big, and we will return a range
+ * error.
*
- * Set any if any `digits' consumed; make it negative to indicate
- * overflow.
+ * Set any if any `digits' consumed; make it negative to indicate overflow.
*/
cutoff = neg ? -(unsigned long) LONG_MIN : LONG_MAX;
cutlim = cutoff % (unsigned long) base;
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/port/thread.c,v 1.30 2005/07/28 04:03:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/thread.c,v 1.31 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#if defined(FRONTEND) && defined(ENABLE_THREAD_SAFETY) && defined(HAVE_GETHOSTBYNAME_R)
/*
- * broken (well early POSIX draft) gethostbyname_r() which returns
- * 'struct hostent *'
+ * broken (well early POSIX draft) gethostbyname_r() which returns 'struct
+ * hostent *'
*/
*result = gethostbyname_r(name, resultbuf, buffer, buflen, herrno);
return (*result == NULL) ? -1 : 0;
-
#else
/* no gethostbyname_r(), just use gethostbyname() */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/unsetenv.c,v 1.4 2004/12/31 22:03:53 pgsql Exp $
+ * $PostgreSQL: pgsql/src/port/unsetenv.c,v 1.5 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return; /* no work */
/*
- * The technique embodied here works if libc follows the Single Unix
- * Spec and actually uses the storage passed to putenv() to hold the
- * environ entry. When we clobber the entry in the second step we are
- * ensuring that we zap the actual environ member. However, there are
- * some libc implementations (notably recent BSDs) that do not obey
- * SUS but copy the presented string. This method fails on such
- * platforms. Hopefully all such platforms have unsetenv() and thus
- * won't be using this hack.
+ * The technique embodied here works if libc follows the Single Unix Spec
+ * and actually uses the storage passed to putenv() to hold the environ
+ * entry. When we clobber the entry in the second step we are ensuring
+ * that we zap the actual environ member. However, there are some libc
+ * implementations (notably recent BSDs) that do not obey SUS but copy the
+ * presented string. This method fails on such platforms. Hopefully all
+ * such platforms have unsetenv() and thus won't be using this hack.
*
- * Note that repeatedly setting and unsetting a var using this code will
- * leak memory.
+ * Note that repeatedly setting and unsetting a var using this code will leak
+ * memory.
*/
envstr = (char *) malloc(strlen(name) + 2);
strcpy(envstr, "=");
/*
- * This last putenv cleans up if we have multiple zero-length names as
- * a result of unsetting multiple things.
+ * This last putenv cleans up if we have multiple zero-length names as a
+ * result of unsetting multiple things.
*/
putenv(envstr);
}
/*
* If the user supplies a parameter on the command line, use it as the
- * conninfo string; otherwise default to setting dbname=postgres and
- * using environment variables or defaults for all other connection
- * parameters.
+ * conninfo string; otherwise default to setting dbname=postgres and using
+ * environment variables or defaults for all other connection parameters.
*/
if (argc > 1)
conninfo = argv[1];
}
/*
- * Our test case here involves using a cursor, for which we must be
- * inside a transaction block. We could do the whole thing with a
- * single PQexec() of "select * from pg_database", but that's too
- * trivial to make a good example.
+ * Our test case here involves using a cursor, for which we must be inside
+ * a transaction block. We could do the whole thing with a single
+ * PQexec() of "select * from pg_database", but that's too trivial to make
+ * a good example.
*/
/* Start a transaction block */
}
/*
- * Should PQclear PGresult whenever it is no longer needed to avoid
- * memory leaks
+ * Should PQclear PGresult whenever it is no longer needed to avoid memory
+ * leaks
*/
PQclear(res);
/*
* If the user supplies a parameter on the command line, use it as the
- * conninfo string; otherwise default to setting dbname=postgres and
- * using environment variables or defaults for all other connection
- * parameters.
+ * conninfo string; otherwise default to setting dbname=postgres and using
+ * environment variables or defaults for all other connection parameters.
*/
if (argc > 1)
conninfo = argv[1];
}
/*
- * Issue LISTEN command to enable notifications from the rule's
- * NOTIFY.
+ * Issue LISTEN command to enable notifications from the rule's NOTIFY.
*/
res = PQexec(conn, "LISTEN TBL2");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
}
/*
- * should PQclear PGresult whenever it is no longer needed to avoid
- * memory leaks
+ * should PQclear PGresult whenever it is no longer needed to avoid memory
+ * leaks
*/
PQclear(res);
while (nnotifies < 4)
{
/*
- * Sleep until something happens on the connection. We use
- * select(2) to wait for input, but you could also use poll() or
- * similar facilities.
+ * Sleep until something happens on the connection. We use select(2)
+ * to wait for input, but you could also use poll() or similar
+ * facilities.
*/
int sock;
fd_set input_mask;
/*
* If the user supplies a parameter on the command line, use it as the
- * conninfo string; otherwise default to setting dbname=postgres and
- * using environment variables or defaults for all other connection
- * parameters.
+ * conninfo string; otherwise default to setting dbname=postgres and using
+ * environment variables or defaults for all other connection parameters.
*/
if (argc > 1)
conninfo = argv[1];
}
/*
- * The point of this program is to illustrate use of PQexecParams()
- * with out-of-line parameters, as well as binary transmission of
- * results. By using out-of-line parameters we can avoid a lot of
- * tedious mucking about with quoting and escaping. Notice how we
- * don't have to do anything special with the quote mark in the
- * parameter value.
+ * The point of this program is to illustrate use of PQexecParams() with
+ * out-of-line parameters, as well as binary transmission of results. By
+ * using out-of-line parameters we can avoid a lot of tedious mucking
+ * about with quoting and escaping. Notice how we don't have to do
+ * anything special with the quote mark in the parameter value.
*/
/* Here is our out-of-line parameter value */
bptr = PQgetvalue(res, i, b_fnum);
/*
- * The binary representation of INT4 is in network byte order,
- * which we'd better coerce to the local byte order.
+ * The binary representation of INT4 is in network byte order, which
+ * we'd better coerce to the local byte order.
*/
ival = ntohl(*((uint32_t *) iptr));
/*
- * The binary representation of TEXT is, well, text, and since
- * libpq was nice enough to append a zero byte to it, it'll work
- * just fine as a C string.
+ * The binary representation of TEXT is, well, text, and since libpq
+ * was nice enough to append a zero byte to it, it'll work just fine
+ * as a C string.
*
- * The binary representation of BYTEA is a bunch of bytes, which
- * could include embedded nulls so we have to pay attention to
- * field length.
+ * The binary representation of BYTEA is a bunch of bytes, which could
+ * include embedded nulls so we have to pay attention to field length.
*/
blen = PQgetlength(res, i, b_fnum);
/*
* begin, by setting the parameters for a backend connection if the
* parameters are null, then the system will try to use reasonable
- * defaults by looking up environment variables or, failing that,
- * using hardwired constants
+ * defaults by looking up environment variables or, failing that, using
+ * hardwired constants
*/
pghost = NULL; /* host name of the backend server */
pgport = NULL; /* port of the backend server */
}
/*
- * make sure to PQclear() a PGresult whenever it is no longer
- * needed to avoid memory leaks
+ * make sure to PQclear() a PGresult whenever it is no longer needed to
+ * avoid memory leaks
*/
PQclear(res1);
/*
- * fetch instances from the pg_database, the system catalog of
- * databases
+ * fetch instances from the pg_database, the system catalog of databases
*/
res1 = PQexec(conn1, "DECLARE myportal CURSOR FOR select * from pg_database");
if (PQresultStatus(res1) != PGRES_COMMAND_OK)
/*
- * $PostgreSQL: pgsql/src/test/regress/regress.c,v 1.63 2005/07/23 14:18:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/test/regress/regress.c,v 1.64 2005/10/15 02:49:51 momjian Exp $
*/
#include "postgres.h"
default:
/*
- * the distance from a point to a path is the smallest
- * distance from the point to any of its constituent segments.
+ * the distance from a point to a path is the smallest distance
+ * from the point to any of its constituent segments.
*/
Assert(path->npts > 1);
for (i = 0; i < path->npts - 1; ++i)
{
regress_lseg_construct(&lseg, &path->p[i], &path->p[i + 1]);
tmp = DatumGetFloat8(DirectFunctionCall2(dist_ps,
- PointPGetDatum(pt),
- LsegPGetDatum(&lseg)));
+ PointPGetDatum(pt),
+ LsegPGetDatum(&lseg)));
if (i == 0 || tmp < result)
result = tmp;
}
tmp = DatumGetFloat8(DirectFunctionCall2(lseg_distance,
LsegPGetDatum(&seg1),
- LsegPGetDatum(&seg2)));
+ LsegPGetDatum(&seg2)));
if (!have_min || tmp < min)
{
min = tmp;
if (SPI_processed > 0)
{
selected = DatumGetInt32(DirectFunctionCall1(int4in,
- CStringGetDatum(SPI_getvalue(
- SPI_tuptable->vals[0],
- SPI_tuptable->tupdesc,
- 1
- ))));
+ CStringGetDatum(SPI_getvalue(
+ SPI_tuptable->vals[0],
+ SPI_tuptable->tupdesc,
+ 1
+ ))));
}
elog(DEBUG4, "funny_dup17 (fired %s) on level %3d: %d/%d tuples inserted/selected",
{
text *seqname = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum("ttdummy_seq")));
+ CStringGetDatum("ttdummy_seq")));
newoff = DirectFunctionCall1(nextval,
PointerGetDatum(seqname));
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/ialloc.c,v 1.6 2005/06/20 08:00:51 neilc Exp $
+ * $PostgreSQL: pgsql/src/timezone/ialloc.c,v 1.7 2005/10/15 02:49:51 momjian Exp $
*/
#include "postgres.h"
char *
icatalloc(char *old, const char *new)
{
- char *result;
- int oldsize,
- newsize;
+ char *result;
+ int oldsize,
+ newsize;
newsize = (new == NULL) ? 0 : strlen(new);
if (old == NULL)
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/localtime.c,v 1.11 2005/06/20 08:00:51 neilc Exp $
+ * $PostgreSQL: pgsql/src/timezone/localtime.c,v 1.12 2005/10/15 02:49:51 momjian Exp $
*/
/*
#define JULIAN_DAY 0 /* Jn - Julian day */
#define DAY_OF_YEAR 1 /* n - day of year */
-#define MONTH_NTH_DAY_OF_WEEK 2 /* Mm.n.d - month, week, day of
- * week */
+#define MONTH_NTH_DAY_OF_WEEK 2 /* Mm.n.d - month, week, day of week */
/*
* Prototypes for static functions.
static const char *getnum(const char *strp, int *nump, int min, int max);
static const char *getsecs(const char *strp, long *secsp);
static const char *getoffset(const char *strp, long *offsetp);
-static const char *getrule(const char *strp, struct rule *rulep);
-static void gmtload(struct state *sp);
-static void gmtsub(const pg_time_t *timep, long offset, struct pg_tm *tmp);
-static void localsub(const pg_time_t *timep, long offset, struct pg_tm *tmp, const pg_tz *tz);
+static const char *getrule(const char *strp, struct rule * rulep);
+static void gmtload(struct state * sp);
+static void gmtsub(const pg_time_t *timep, long offset, struct pg_tm * tmp);
+static void localsub(const pg_time_t *timep, long offset, struct pg_tm * tmp, const pg_tz *tz);
static void timesub(const pg_time_t *timep, long offset,
- const struct state *sp, struct pg_tm *tmp);
+ const struct state * sp, struct pg_tm * tmp);
static pg_time_t transtime(pg_time_t janfirst, int year,
- const struct rule *rulep, long offset);
-int tzparse(const char *name, struct state *sp, int lastditch);
+ const struct rule * rulep, long offset);
+int tzparse(const char *name, struct state * sp, int lastditch);
/* GMT timezone */
static struct state gmtmem;
static long
detzcode(const char *codep)
{
- long result;
- int i;
+ long result;
+ int i;
result = (codep[0] & 0x80) ? ~0L : 0L;
for (i = 0; i < 4; ++i)
}
int
-tzload(const char *name, struct state *sp)
+tzload(const char *name, struct state * sp)
{
const char *p;
- int i;
- int fid;
+ int i;
+ int fid;
if (name == NULL && (name = TZDEFAULT) == NULL)
return -1;
{
- int doaccess;
+ int doaccess;
char fullname[MAXPGPATH];
if (name[0] == ':')
static const char *
getzname(const char *strp)
{
- char c;
+ char c;
while ((c = *strp) != '\0' && !is_digit(c) && c != ',' && c != '-' &&
c != '+')
static const char *
getnum(const char *strp, int *nump, int min, int max)
{
- char c;
- int num;
+ char c;
+ int num;
if (strp == NULL || !is_digit(c = *strp))
return NULL;
/*
* `HOURSPERDAY * DAYSPERWEEK - 1' allows quasi-Posix rules like
- * "M10.4.6/26", which does not conform to Posix, but which specifies
- * the equivalent of ``02:00 on the first Sunday on or after 23 Oct''.
+ * "M10.4.6/26", which does not conform to Posix, but which specifies the
+ * equivalent of ``02:00 on the first Sunday on or after 23 Oct''.
*/
strp = getnum(strp, &num, 0, HOURSPERDAY * DAYSPERWEEK - 1);
if (strp == NULL)
static const char *
getoffset(const char *strp, long *offsetp)
{
- int neg = 0;
+ int neg = 0;
if (*strp == '-')
{
* Otherwise, return a pointer to the first character not part of the rule.
*/
static const char *
-getrule(const char *strp, struct rule *rulep)
+getrule(const char *strp, struct rule * rulep)
{
if (*strp == 'J')
{
*/
static pg_time_t
transtime(const pg_time_t janfirst, int year,
- const struct rule *rulep, long offset)
+ const struct rule * rulep, long offset)
{
- int leapyear;
- pg_time_t value = 0;
+ int leapyear;
+ pg_time_t value = 0;
int i,
d,
m1,
/*
* Jn - Julian day, 1 == January 1, 60 == March 1 even in leap
- * years. In non-leap years, or if the day number is 59 or
- * less, just add SECSPERDAY times the day number-1 to the
- * time of January 1, midnight, to get the day.
+ * years. In non-leap years, or if the day number is 59 or less,
+ * just add SECSPERDAY times the day number-1 to the time of
+ * January 1, midnight, to get the day.
*/
value = janfirst + (rulep->r_day - 1) * SECSPERDAY;
if (leapyear && rulep->r_day >= 60)
case DAY_OF_YEAR:
/*
- * n - day of year. Just add SECSPERDAY times the day number
- * to the time of January 1, midnight, to get the day.
+ * n - day of year. Just add SECSPERDAY times the day number to
+ * the time of January 1, midnight, to get the day.
*/
value = janfirst + rulep->r_day * SECSPERDAY;
break;
dow += DAYSPERWEEK;
/*
- * "dow" is the day-of-week of the first day of the month. Get
- * the day-of-month (zero-origin) of the first "dow" day of
- * the month.
+ * "dow" is the day-of-week of the first day of the month. Get the
+ * day-of-month (zero-origin) of the first "dow" day of the month.
*/
d = rulep->r_day - dow;
if (d < 0)
/*
* "value" is the Epoch-relative time of 00:00:00 UTC on the day in
- * question. To get the Epoch-relative time of the specified local
- * time on that day, add the transition time and the current offset
- * from UTC.
+ * question. To get the Epoch-relative time of the specified local time
+ * on that day, add the transition time and the current offset from UTC.
*/
return value + rulep->r_time + offset;
}
*/
int
-tzparse(const char *name, struct state *sp, int lastditch)
+tzparse(const char *name, struct state * sp, int lastditch)
{
const char *stdname;
const char *dstname = NULL;
size_t dstlen;
long stdoffset;
long dstoffset;
- pg_time_t *atp;
+ pg_time_t *atp;
unsigned char *typep;
- char *cp;
- int load_result;
+ char *cp;
+ int load_result;
stdname = name;
if (lastditch)
{
struct rule start;
struct rule end;
- int year;
- pg_time_t janfirst;
+ int year;
+ pg_time_t janfirst;
pg_time_t starttime;
pg_time_t endtime;
}
else
{
- long theirstdoffset;
- long theirdstoffset;
- long theiroffset;
- int isdst;
- int i;
- int j;
+ long theirstdoffset;
+ long theirdstoffset;
+ long theiroffset;
+ int isdst;
+ int i;
+ int j;
if (*name != '\0')
return -1;
theiroffset = theirstdoffset;
/*
- * Now juggle transition times and types tracking offsets as
- * you do.
+ * Now juggle transition times and types tracking offsets as you
+ * do.
*/
for (i = 0; i < sp->timecnt; ++i)
{
else
{
/*
- * If summer time is in effect, and the transition
- * time was not specified as standard time, add the
- * summer time offset to the transition time;
- * otherwise, add the standard time offset to the
- * transition time.
+ * If summer time is in effect, and the transition time
+ * was not specified as standard time, add the summer time
+ * offset to the transition time; otherwise, add the
+ * standard time offset to the transition time.
*/
/*
- * Transitions from DST to DDST will effectively
- * disappear since POSIX provides for only one DST
- * offset.
+ * Transitions from DST to DDST will effectively disappear
+ * since POSIX provides for only one DST offset.
*/
if (isdst && !sp->ttis[j].tt_ttisstd)
{
}
/*
- * Finally, fill in ttis. ttisstd and ttisgmt need not be
- * handled.
+ * Finally, fill in ttis. ttisstd and ttisgmt need not be handled.
*/
sp->ttis[0].tt_gmtoff = -stdoffset;
sp->ttis[0].tt_isdst = FALSE;
}
static void
-gmtload(struct state *sp)
+gmtload(struct state * sp)
{
if (tzload(gmt, sp) != 0)
(void) tzparse(gmt, sp, TRUE);
* The unused offset argument is for the benefit of mktime variants.
*/
static void
-localsub(const pg_time_t *timep, long offset, struct pg_tm *tmp, const pg_tz *tz)
+localsub(const pg_time_t *timep, long offset, struct pg_tm * tmp, const pg_tz *tz)
{
- const struct state *sp;
+ const struct state *sp;
const struct ttinfo *ttisp;
- int i;
+ int i;
const pg_time_t t = *timep;
sp = &tz->state;
* gmtsub is to gmtime as localsub is to localtime.
*/
static void
-gmtsub(const pg_time_t *timep, long offset, struct pg_tm *tmp)
+gmtsub(const pg_time_t *timep, long offset, struct pg_tm * tmp)
{
if (!gmt_is_set)
{
/*
* Could get fancy here and deliver something such as "UTC+xxxx" or
- * "UTC-xxxx" if offset is non-zero, but this is no time for a
- * treasure hunt.
+ * "UTC-xxxx" if offset is non-zero, but this is no time for a treasure
+ * hunt.
*/
if (offset != 0)
tmp->tm_zone = wildabbr;
static void
timesub(const pg_time_t *timep, long offset,
- const struct state *sp, struct pg_tm *tmp)
+ const struct state * sp, struct pg_tm * tmp)
{
const struct lsinfo *lp;
/* expand days to 64 bits to support full Julian-day range */
- int64 days;
- int idays;
- long rem;
- int y;
- int yleap;
- const int *ip;
- long corr;
- int hit;
- int i;
+ int64 days;
+ int idays;
+ long rem;
+ int y;
+ int yleap;
+ const int *ip;
+ long corr;
+ int hit;
+ int i;
corr = 0;
hit = 0;
tmp->tm_min = (int) (rem / SECSPERMIN);
/*
- * A positive leap second requires a special representation. This
- * uses "... ??:59:60" et seq.
+ * A positive leap second requires a special representation. This uses
+ * "... ??:59:60" et seq.
*/
tmp->tm_sec = (int) (rem % SECSPERMIN) + hit;
tmp->tm_wday = (int) ((EPOCH_WDAY + days) % DAYSPERWEEK);
/*
* Note: the point of adding 4800 is to ensure we make the same
- * assumptions as Postgres' Julian-date routines about the placement
- * of leap years in centuries BC, at least back to 4713BC which is as
- * far as we'll go. This is effectively extending Gregorian
- * timekeeping into pre-Gregorian centuries, which is a tad bogus but
- * it conforms to the SQL spec...
+ * assumptions as Postgres' Julian-date routines about the placement of
+ * leap years in centuries BC, at least back to 4713BC which is as far as
+ * we'll go. This is effectively extending Gregorian timekeeping into
+ * pre-Gregorian centuries, which is a tad bogus but it conforms to the
+ * SQL spec...
*/
#define LEAPS_THRU_END_OF(y) (((y) + 4800) / 4 - ((y) + 4800) / 100 + ((y) + 4800) / 400)
while (days < 0 || days >= (int64) year_lengths[yleap = isleap(y)])
{
- int newy;
+ int newy;
newy = y + days / DAYSPERNYEAR;
if (days < 0)
pg_time_t *boundary,
long int *after_gmtoff,
int *after_isdst,
- const pg_tz *tz)
+ const pg_tz *tz)
{
const struct state *sp;
const struct ttinfo *ttisp;
- int i;
- int j;
+ int i;
+ int j;
const pg_time_t t = *timep;
sp = &tz->state;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.37 2005/09/09 02:31:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.38 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "utils/hsearch.h"
/* Current global timezone */
-pg_tz *global_timezone = NULL;
+pg_tz *global_timezone = NULL;
static char tzdir[MAXPGPATH];
* Get GMT offset from a system struct tm
*/
static int
-get_timezone_offset(struct tm *tm)
+get_timezone_offset(struct tm * tm)
{
#if defined(HAVE_STRUCT_TM_TM_ZONE)
return tm->tm_gmtoff;
* Does a system tm value match one we computed ourselves?
*/
static bool
-compare_tm(struct tm *s, struct pg_tm *p)
+compare_tm(struct tm * s, struct pg_tm * p)
{
if (s->tm_sec != p->tm_sec ||
s->tm_min != p->tm_min ||
* test time.
*/
static int
-score_timezone(const char *tzname, struct tztry *tt)
+score_timezone(const char *tzname, struct tztry * tt)
{
int i;
pg_time_t pgtt;
struct tm *systm;
struct pg_tm *pgtm;
char cbuf[TZ_STRLEN_MAX + 1];
- pg_tz tz;
+ pg_tz tz;
- /* Load timezone directly. Don't use pg_tzset, because we don't want
- * all timezones loaded in the cache at startup. */
- if (tzload(tzname, &tz.state) != 0) {
- if (tzname[0] == ':' || tzparse(tzname, &tz.state, FALSE) != 0) {
- return -1; /* can't handle the TZ name at all */
+ /*
+ * Load timezone directly. Don't use pg_tzset, because we don't want all
+ * timezones loaded in the cache at startup.
+ */
+ if (tzload(tzname, &tz.state) != 0)
+ {
+ if (tzname[0] == ':' || tzparse(tzname, &tz.state, FALSE) != 0)
+ {
+ return -1; /* can't handle the TZ name at all */
}
}
/*
* Set up the list of dates to be probed to see how well our timezone
- * matches the system zone. We first probe January and July of 2004;
- * this serves to quickly eliminate the vast majority of the TZ
- * database entries. If those dates match, we probe every week from
- * 2004 backwards to late 1904. (Weekly resolution is good enough to
- * identify DST transition rules, since everybody switches on
- * Sundays.) The further back the zone matches, the better we score
- * it. This may seem like a rather random way of doing things, but
- * experience has shown that system-supplied timezone definitions are
- * likely to have DST behavior that is right for the recent past and
- * not so accurate further back. Scoring in this way allows us to
- * recognize zones that have some commonality with the zic database,
- * without insisting on exact match. (Note: we probe Thursdays, not
- * Sundays, to avoid triggering DST-transition bugs in localtime
- * itself.)
+ * matches the system zone. We first probe January and July of 2004; this
+ * serves to quickly eliminate the vast majority of the TZ database
+ * entries. If those dates match, we probe every week from 2004 backwards
+ * to late 1904. (Weekly resolution is good enough to identify DST
+ * transition rules, since everybody switches on Sundays.) The further
+ * back the zone matches, the better we score it. This may seem like a
+ * rather random way of doing things, but experience has shown that
+ * system-supplied timezone definitions are likely to have DST behavior
+ * that is right for the recent past and not so accurate further back.
+ * Scoring in this way allows us to recognize zones that have some
+ * commonality with the zic database, without insisting on exact match.
+ * (Note: we probe Thursdays, not Sundays, to avoid triggering
+ * DST-transition bugs in localtime itself.)
*/
tt.n_test_times = 0;
tt.test_times[tt.n_test_times++] = build_time_t(2004, 1, 15);
return resultbuf;
/*
- * Couldn't find a match in the database, so next we try constructed
- * zone names (like "PST8PDT").
+ * Couldn't find a match in the database, so next we try constructed zone
+ * names (like "PST8PDT").
*
- * First we need to determine the names of the local standard and
- * daylight zones. The idea here is to scan forward from today until
- * we have seen both zones, if both are in use.
+ * First we need to determine the names of the local standard and daylight
+ * zones. The idea here is to scan forward from today until we have seen
+ * both zones, if both are in use.
*/
memset(std_zone_name, 0, sizeof(std_zone_name));
memset(dst_zone_name, 0, sizeof(dst_zone_name));
tnow = time(NULL);
/*
- * Round back to a GMT midnight so results don't depend on local time
- * of day
+ * Round back to a GMT midnight so results don't depend on local time of
+ * day
*/
tnow -= (tnow % T_DAY);
/*
- * We have to look a little further ahead than one year, in case today
- * is just past a DST boundary that falls earlier in the year than the
- * next similar boundary. Arbitrarily scan up to 14 months.
+ * We have to look a little further ahead than one year, in case today is
+ * just past a DST boundary that falls earlier in the year than the next
+ * similar boundary. Arbitrarily scan up to 14 months.
*/
for (t = tnow; t <= tnow + T_MONTH * 14; t += T_MONTH)
{
{
ereport(LOG,
(errmsg("unable to determine system timezone, defaulting to \"%s\"", "GMT"),
- errhint("You can specify the correct timezone in postgresql.conf.")));
+ errhint("You can specify the correct timezone in postgresql.conf.")));
return NULL; /* go to GMT */
}
return resultbuf;
/*
- * Did not find the timezone. Fallback to use a GMT zone. Note that
- * the zic timezone database names the GMT-offset zones in POSIX
- * style: plus is west of Greenwich. It's unfortunate that this is
- * opposite of SQL conventions. Should we therefore change the names?
- * Probably not...
+ * Did not find the timezone. Fallback to use a GMT zone. Note that the
+ * zic timezone database names the GMT-offset zones in POSIX style: plus
+ * is west of Greenwich. It's unfortunate that this is opposite of SQL
+ * conventions. Should we therefore change the names? Probably not...
*/
snprintf(resultbuf, sizeof(resultbuf), "Etc/GMT%s%d",
(-std_ofs > 0) ? "+" : "", -std_ofs / 3600);
ereport(LOG,
- (errmsg("could not recognize system timezone, defaulting to \"%s\"",
- resultbuf),
- errhint("You can specify the correct timezone in postgresql.conf.")));
+ (errmsg("could not recognize system timezone, defaulting to \"%s\"",
+ resultbuf),
+ errhint("You can specify the correct timezone in postgresql.conf.")));
return resultbuf;
}
* score. bestzonename must be a buffer of length TZ_STRLEN_MAX + 1.
*/
static void
-scan_available_timezones(char *tzdir, char *tzdirsub, struct tztry *tt,
+scan_available_timezones(char *tzdir, char *tzdirsub, struct tztry * tt,
int *bestscore, char *bestzonename)
{
int tzdir_orig_len = strlen(tzdir);
FreeDir(dirdesc);
}
-
#else /* WIN32 */
static const struct
{
/*
* This list was built from the contents of the registry at
- * HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows
- * NT\CurrentVersion\Time Zones on Windows XP Professional SP1
+ * HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time
+ * Zones on Windows XP Professional SP1
*
* The zones have been matched to zic timezones by looking at the cities
- * listed in the win32 display name (in the comment here) in most
- * cases.
+ * listed in the win32 display name (in the comment here) in most cases.
*/
{
"Afghanistan Standard Time", "Afghanistan Daylight Time",
{
"Central Europe Standard Time", "Central Europe Daylight Time",
"Europe/Belgrade"
- }, /* (GMT+01:00) Belgrade, Bratislava,
- * Budapest, Ljubljana, Prague */
+ }, /* (GMT+01:00) Belgrade, Bratislava, Budapest,
+ * Ljubljana, Prague */
{
"Central European Standard Time", "Central European Daylight Time",
"Europe/Sarajevo"
{
"China Standard Time", "China Daylight Time",
"Asia/Hong_Kong"
- }, /* (GMT+08:00) Beijing, Chongqing, Hong
- * Kong, Urumqi */
+ }, /* (GMT+08:00) Beijing, Chongqing, Hong Kong,
+ * Urumqi */
{
"Dateline Standard Time", "Dateline Daylight Time",
"Etc/GMT+12"
- }, /* (GMT-12:00) International Date Line
- * West */
+ }, /* (GMT-12:00) International Date Line West */
{
"E. Africa Standard Time", "E. Africa Daylight Time",
"Africa/Nairobi"
{
"Fiji Standard Time", "Fiji Daylight Time",
"Pacific/Fiji"
- }, /* (GMT+12:00) Fiji, Kamchatka, Marshall
- * Is. */
+ }, /* (GMT+12:00) Fiji, Kamchatka, Marshall Is. */
{
"FLE Standard Time", "FLE Daylight Time",
"Europe/Helsinki"
- }, /* (GMT+02:00) Helsinki, Kyiv, Riga,
- * Sofia, Tallinn, Vilnius */
+ }, /* (GMT+02:00) Helsinki, Kyiv, Riga, Sofia,
+ * Tallinn, Vilnius */
{
"GMT Standard Time", "GMT Daylight Time",
"Europe/Dublin"
{
"India Standard Time", "India Daylight Time",
"Asia/Calcutta"
- }, /* (GMT+05:30) Chennai, Kolkata, Mumbai,
- * New Delhi */
+ }, /* (GMT+05:30) Chennai, Kolkata, Mumbai, New
+ * Delhi */
{
"Iran Standard Time", "Iran Daylight Time",
"Asia/Tehran"
{
"Romance Standard Time", "Romance Daylight Time",
"Europe/Brussels"
- }, /* (GMT+01:00) Brussels, Copenhagen,
- * Madrid, Paris */
+ }, /* (GMT+01:00) Brussels, Copenhagen, Madrid,
+ * Paris */
{
"Russian Standard Time", "Russian Daylight Time",
"Europe/Moscow"
"Australia/Perth"
}, /* (GMT+08:00) Perth */
/* {"W. Central Africa Standard Time", "W. Central Africa Daylight Time",
- * * ""}, Could not find a match for this one. Excluded for now. *//* (
+ * * * ""}, Could not find a match for this one. Excluded for now. *//* (
* G MT+01:00) West Central Africa */
{
"W. Europe Standard Time", "W. Europe Daylight Time",
"CET"
- }, /* (GMT+01:00) Amsterdam, Berlin, Bern,
- * Rome, Stockholm, Vienna */
+ }, /* (GMT+01:00) Amsterdam, Berlin, Bern, Rome,
+ * Stockholm, Vienna */
{
"West Asia Standard Time", "West Asia Daylight Time",
"Asia/Karachi"
- }, /* (GMT+05:00) Islamabad, Karachi,
- * Tashkent */
+ }, /* (GMT+05:00) Islamabad, Karachi, Tashkent */
{
"West Pacific Standard Time", "West Pacific Daylight Time",
"Pacific/Guam"
{
int i;
char tzname[128];
- char localtzname[256];
+ char localtzname[256];
time_t t = time(NULL);
struct tm *tm = localtime(&t);
- HKEY rootKey;
- int idx;
+ HKEY rootKey;
+ int idx;
if (!tm)
{
}
/*
- * Localized Windows versions return localized names for the
- * timezone. Scan the registry to find the English name,
- * and then try matching against our table again.
+ * Localized Windows versions return localized names for the timezone.
+ * Scan the registry to find the English name, and then try matching
+ * against our table again.
*/
memset(localtzname, 0, sizeof(localtzname));
if (RegOpenKeyEx(HKEY_LOCAL_MACHINE,
- "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones",
+ "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Time Zones",
0,
KEY_READ,
&rootKey) != ERROR_SUCCESS)
{
ereport(WARNING,
- (errmsg_internal("could not open registry key to identify Windows timezone: %i", (int)GetLastError())));
+ (errmsg_internal("could not open registry key to identify Windows timezone: %i", (int) GetLastError())));
return NULL;
}
-
- for (idx = 0; ; idx++)
- {
- char keyname[256];
- char zonename[256];
- DWORD namesize;
- FILETIME lastwrite;
- HKEY key;
- LONG r;
-
+
+ for (idx = 0;; idx++)
+ {
+ char keyname[256];
+ char zonename[256];
+ DWORD namesize;
+ FILETIME lastwrite;
+ HKEY key;
+ LONG r;
+
memset(keyname, 0, sizeof(keyname));
namesize = sizeof(keyname);
- if ((r=RegEnumKeyEx(rootKey,
- idx,
- keyname,
- &namesize,
- NULL,
- NULL,
- NULL,
- &lastwrite)) != ERROR_SUCCESS)
+ if ((r = RegEnumKeyEx(rootKey,
+ idx,
+ keyname,
+ &namesize,
+ NULL,
+ NULL,
+ NULL,
+ &lastwrite)) != ERROR_SUCCESS)
{
if (r == ERROR_NO_MORE_ITEMS)
break;
ereport(WARNING,
- (errmsg_internal("could not enumerate registry subkeys to identify Windows timezone: %i", (int)r)));
+ (errmsg_internal("could not enumerate registry subkeys to identify Windows timezone: %i", (int) r)));
break;
}
- if ((r=RegOpenKeyEx(rootKey,keyname,0,KEY_READ,&key)) != ERROR_SUCCESS)
+ if ((r = RegOpenKeyEx(rootKey, keyname, 0, KEY_READ, &key)) != ERROR_SUCCESS)
{
ereport(WARNING,
- (errmsg_internal("could not open registry subkey to identify Windows timezone: %i", (int)r)));
+ (errmsg_internal("could not open registry subkey to identify Windows timezone: %i", (int) r)));
break;
}
-
+
memset(zonename, 0, sizeof(zonename));
namesize = sizeof(zonename);
- if ((r=RegQueryValueEx(key, "Std", NULL, NULL, zonename, &namesize)) != ERROR_SUCCESS)
+ if ((r = RegQueryValueEx(key, "Std", NULL, NULL, zonename, &namesize)) != ERROR_SUCCESS)
{
ereport(WARNING,
- (errmsg_internal("could not query value for 'std' to identify Windows timezone: %i", (int)r)));
+ (errmsg_internal("could not query value for 'std' to identify Windows timezone: %i", (int) r)));
RegCloseKey(key);
break;
}
}
memset(zonename, 0, sizeof(zonename));
namesize = sizeof(zonename);
- if ((r=RegQueryValueEx(key, "Dlt", NULL, NULL, zonename, &namesize)) != ERROR_SUCCESS)
+ if ((r = RegQueryValueEx(key, "Dlt", NULL, NULL, zonename, &namesize)) != ERROR_SUCCESS)
{
ereport(WARNING,
- (errmsg_internal("could not query value for 'dlt' to identify Windows timezone: %i", (int)r)));
+ (errmsg_internal("could not query value for 'dlt' to identify Windows timezone: %i", (int) r)));
RegCloseKey(key);
break;
}
struct pg_tz *
pg_tzset(const char *name)
{
- pg_tz *tzp;
- pg_tz tz;
-
+ pg_tz *tzp;
+ pg_tz tz;
+
if (strlen(name) > TZ_STRLEN_MAX)
return NULL; /* not going to fit */
if (!init_timezone_hashtable())
return NULL;
- tzp = (pg_tz *)hash_search(timezone_cache,
- name,
- HASH_FIND,
- NULL);
+ tzp = (pg_tz *) hash_search(timezone_cache,
+ name,
+ HASH_FIND,
+ NULL);
if (tzp)
{
/* Timezone found in cache, nothing more to do */
name,
HASH_ENTER,
NULL);
-
+
strcpy(tzp->TZname, tz.TZname);
memcpy(&tzp->state, &tz.state, sizeof(tz.state));
pg_time_t time2000;
/*
- * To detect leap-second timekeeping, run pg_localtime for what should
- * be GMT midnight, 2000-01-01. Insist that the tm_sec value be zero;
- * any other result has to be due to leap seconds.
+ * To detect leap-second timekeeping, run pg_localtime for what should be
+ * GMT midnight, 2000-01-01. Insist that the tm_sec value be zero; any
+ * other result has to be due to leap seconds.
*/
time2000 = (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY;
tt = pg_localtime(&time2000, tz);
static bool
set_global_timezone(const char *tzname)
{
- pg_tz *tznew;
+ pg_tz *tznew;
if (!tzname || !tzname[0])
return false;
-
+
tznew = pg_tzset(tzname);
if (!tznew)
return false;
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/pgtz.h,v 1.14 2005/07/04 19:54:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/timezone/pgtz.h,v 1.15 2005/10/15 02:49:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
pg_time_t ats[TZ_MAX_TIMES];
unsigned char types[TZ_MAX_TIMES];
struct ttinfo ttis[TZ_MAX_TYPES];
- char chars[BIGGEST(BIGGEST(TZ_MAX_CHARS + 1, 3 /* sizeof gmt */),
+ char chars[BIGGEST(BIGGEST(TZ_MAX_CHARS + 1, 3 /* sizeof gmt */ ),
(2 * (TZ_STRLEN_MAX + 1)))];
struct lsinfo lsis[TZ_MAX_LEAPS];
};
-struct pg_tz {
- char TZname[TZ_STRLEN_MAX + 1];
+struct pg_tz
+{
+ char TZname[TZ_STRLEN_MAX + 1];
struct state state;
};
-int tzload(const char *name, struct state * sp);
-int tzparse(const char *name, struct state * sp, int lastditch);
+int tzload(const char *name, struct state * sp);
+int tzparse(const char *name, struct state * sp, int lastditch);
#endif /* _PGTZ_H */
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/scheck.c,v 1.6 2005/06/20 08:00:51 neilc Exp $
+ * $PostgreSQL: pgsql/src/timezone/scheck.c,v 1.7 2005/10/15 02:49:51 momjian Exp $
*/
#include "postgres.h"
char *
scheck(const char *string, const char *format)
{
- char *fbuf;
+ char *fbuf;
const char *fp;
- char *tp;
- int c;
- char *result;
+ char *tp;
+ int c;
+ char *result;
char dummy;
static char nada;
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/strftime.c,v 1.7 2005/06/20 08:00:51 neilc Exp $
+ * $PostgreSQL: pgsql/src/timezone/strftime.c,v 1.8 2005/10/15 02:49:51 momjian Exp $
*/
#include "postgres.h"
/*
* c_fmt
*
- * C99 requires this format. Previously this code used "%D %X", but we
- * now conform to C99. Note that "%a %b %d %H:%M:%S %Y" is used by
- * Solaris 2.3.
+ * C99 requires this format. Previously this code used "%D %X", but we now
+ * conform to C99. Note that "%a %b %d %H:%M:%S %Y" is used by Solaris
+ * 2.3.
*/
"%a %b %e %T %Y",
size_t
pg_strftime(char *s, size_t maxsize, const char *format,
- const struct pg_tm *t)
+ const struct pg_tm * t)
{
char *p;
int warn;
}
static char *
-_fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim,
+_fmt(const char *format, const struct pg_tm * t, char *pt, const char *ptlim,
int *warnp)
{
for (; *format; ++format)
case 'O':
/*
- * C99 locale modifiers. The sequences %Ec %EC %Ex
- * %EX %Ey %EY %Od %oe %OH %OI %Om %OM %OS %Ou %OU
- * %OV %Ow %OW %Oy are supposed to provide alternate
+ * C99 locale modifiers. The sequences %Ec %EC %Ex %EX
+ * %Ey %EY %Od %oe %OH %OI %Om %OM %OS %Ou %OU %OV %Ow
+ * %OW %Oy are supposed to provide alternate
* representations.
*/
goto label;
case 'k':
/*
- * This used to be... _conv(t->tm_hour % 12 ?
- * t->tm_hour % 12 : 12, 2, ' '); ...and has been
- * changed to the below to match SunOS 4.1.1 and
- * Arnold Robbins' strftime version 3.0. That is,
- * "%k" and "%l" have been swapped. (ado, 1993-05-24)
+ * This used to be... _conv(t->tm_hour % 12 ? t->tm_hour
+ * % 12 : 12, 2, ' '); ...and has been changed to the
+ * below to match SunOS 4.1.1 and Arnold Robbins' strftime
+ * version 3.0. That is, "%k" and "%l" have been swapped.
+ * (ado, 1993-05-24)
*/
pt = _conv(t->tm_hour, "%2d", pt, ptlim);
continue;
case 'l':
/*
- * This used to be... _conv(t->tm_hour, 2, ' ');
- * ...and has been changed to the below to match SunOS
- * 4.1.1 and Arnold Robbin's strftime version 3.0.
- * That is, "%k" and "%l" have been swapped. (ado,
- * 1993-05-24)
+ * This used to be... _conv(t->tm_hour, 2, ' '); ...and
+ * has been changed to the below to match SunOS 4.1.1 and
+ * Arnold Robbin's strftime version 3.0. That is, "%k" and
+ * "%l" have been swapped. (ado, 1993-05-24)
*/
pt = _conv((t->tm_hour % 12) ?
(t->tm_hour % 12) : 12,
case 'u':
/*
- * From Arnold Robbins' strftime version 3.0: "ISO
- * 8601: Weekday as a decimal number [1 (Monday) - 7]"
- * (ado, 1993-05-24)
+ * From Arnold Robbins' strftime version 3.0: "ISO 8601:
+ * Weekday as a decimal number [1 (Monday) - 7]" (ado,
+ * 1993-05-24)
*/
pt = _conv((t->tm_wday == 0) ?
DAYSPERWEEK : t->tm_wday,
DAYSPERNYEAR;
/*
- * What yday (-3 ... 3) does the ISO year
- * begin on?
+ * What yday (-3 ... 3) does the ISO year begin
+ * on?
*/
bot = ((yday + 11 - wday) %
DAYSPERWEEK) - 3;
pt = _add(t->tm_zone, pt, ptlim);
/*
- * C99 says that %Z must be replaced by the empty
- * string if the time zone is not determinable.
+ * C99 says that %Z must be replaced by the empty string
+ * if the time zone is not determinable.
*/
continue;
case 'z':
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/tzfile.h,v 1.5 2004/05/21 20:59:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/timezone/tzfile.h,v 1.6 2005/10/15 02:49:51 momjian Exp $
*/
/*
{
char tzh_magic[4]; /* TZ_MAGIC */
char tzh_reserved[16]; /* reserved for future use */
- char tzh_ttisgmtcnt[4]; /* coded number of trans. time
- * flags */
- char tzh_ttisstdcnt[4]; /* coded number of trans. time
- * flags */
+ char tzh_ttisgmtcnt[4]; /* coded number of trans. time flags */
+ char tzh_ttisstdcnt[4]; /* coded number of trans. time flags */
char tzh_leapcnt[4]; /* coded number of leap seconds */
char tzh_timecnt[4]; /* coded number of transition times */
char tzh_typecnt[4]; /* coded number of local time types */
*/
#define TZ_MAX_TIMES 370
-#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can
- * hold */
+#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */
-#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation
- * characters */
+#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */
/* (limited by what unsigned chars can hold) */
-#define TZ_MAX_LEAPS 50 /* Maximum number of leap second
- * corrections */
+#define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */
#define SECSPERMIN 60
#define MINSPERHOUR 60
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/zic.c,v 1.15 2005/06/20 08:00:51 neilc Exp $
+ * $PostgreSQL: pgsql/src/timezone/zic.c,v 1.16 2005/10/15 02:49:51 momjian Exp $
*/
#include "postgres.h"
static int mkdirs(char *filename);
static void newabbr(const char *abbr);
static long oadd(long t1, long t2);
-static void outzone(const struct zone *zp, int ntzones);
+static void outzone(const struct zone * zp, int ntzones);
static void puttzcode(long code, FILE *fp);
static int rcomp(const void *leftp, const void *rightp);
-static pg_time_t rpytime(const struct rule *rp, int wantedy);
-static void rulesub(struct rule *rp,
+static pg_time_t rpytime(const struct rule * rp, int wantedy);
+static void rulesub(struct rule * rp,
const char *loyearp, const char *hiyearp,
const char *typep, const char *monthp,
const char *dayp, const char *timep);
error(const char *string)
{
/*
- * Match the format of "cc" to allow sh users to zic ... 2>&1 | error
- * -t "*" -v on BSD systems.
+ * Match the format of "cc" to allow sh users to zic ... 2>&1 | error -t
+ * "*" -v on BSD systems.
*/
(void) fprintf(stderr, _("\"%s\", line %d: %s"),
filename, linenum, string);
int
main(int argc, char *argv[])
{
- int i;
- int j;
- int c;
+ int i;
+ int j;
+ int c;
#ifndef WIN32
(void) umask(umask(S_IWGRP | S_IWOTH) | (S_IWGRP | S_IWOTH));
else
{
(void) fprintf(stderr,
- _("%s: More than one -d option specified\n"),
+ _("%s: More than one -d option specified\n"),
progname);
(void) exit(EXIT_FAILURE);
}
else
{
(void) fprintf(stderr,
- _("%s: More than one -l option specified\n"),
+ _("%s: More than one -l option specified\n"),
progname);
(void) exit(EXIT_FAILURE);
}
else
{
(void) fprintf(stderr,
- _("%s: More than one -p option specified\n"),
+ _("%s: More than one -p option specified\n"),
progname);
(void) exit(EXIT_FAILURE);
}
else
{
(void) fprintf(stderr,
- _("%s: More than one -y option specified\n"),
+ _("%s: More than one -y option specified\n"),
progname);
(void) exit(EXIT_FAILURE);
}
else
{
(void) fprintf(stderr,
- _("%s: More than one -L option specified\n"),
+ _("%s: More than one -L option specified\n"),
progname);
(void) exit(EXIT_FAILURE);
}
static void
dolink(const char *fromfile, const char *tofile)
{
- char *fromname;
- char *toname;
+ char *fromname;
+ char *toname;
if (fromfile[0] == '/')
fromname = ecpyalloc(fromfile);
}
/*
- * We get to be careful here since there's a fair chance of root
- * running us.
+ * We get to be careful here since there's a fair chance of root running
+ * us.
*/
if (!itsdir(toname))
(void) remove(toname);
!itsdir(fromname))
{
const char *s = tofile;
- char *symlinkcontents = NULL;
+ char *symlinkcontents = NULL;
while ((s = strchr(s + 1, '/')) != NULL)
symlinkcontents = ecatalloc(symlinkcontents, "../");
static int
itsdir(const char *name)
{
- char *myname;
- int accres;
+ char *myname;
+ int accres;
myname = ecpyalloc(name);
myname = ecatalloc(myname, "/.");
{
struct zone *zp;
struct rule *rp;
- int base,
- out;
- int i,
- j;
+ int base,
+ out;
+ int i,
+ j;
if (nrules != 0)
{
TRUE);
/*
- * Note, though, that if there's no rule, a '%s' in the format
- * is a bad thing.
+ * Note, though, that if there's no rule, a '%s' in the format is
+ * a bad thing.
*/
if (strchr(zp->z_format, '%') != 0)
error(_("%s in ruleless zone"));
static void
infile(const char *name)
{
- FILE *fp;
- char **fields;
- char *cp;
+ FILE *fp;
+ char **fields;
+ char *cp;
const struct lookup *lp;
- int nfields;
- int wantcont;
- int num;
+ int nfields;
+ int wantcont;
+ int num;
char buf[BUFSIZ];
if (strcmp(name, "-") == 0)
case LC_LEAP:
if (name != leapsec)
(void) fprintf(stderr,
- _("%s: Leap line in non leap seconds file %s\n"),
+ _("%s: Leap line in non leap seconds file %s\n"),
progname, name);
else
inleap(fields, nfields);
break;
default: /* "cannot happen" */
(void) fprintf(stderr,
- _("%s: panic: Invalid l_value %d\n"),
+ _("%s: panic: Invalid l_value %d\n"),
progname, lp->l_value);
(void) exit(EXIT_FAILURE);
}
r.r_name = ecpyalloc(fields[RF_NAME]);
r.r_abbrvar = ecpyalloc(fields[RF_ABBRVAR]);
rules = (struct rule *) (void *) erealloc((char *) rules,
- (int) ((nrules + 1) * sizeof *rules));
+ (int) ((nrules + 1) * sizeof *rules));
rules[nrules++] = r;
}
static int
inzone(char **fields, int nfields)
{
- int i;
+ int i;
static char *buf;
if (nfields < ZONE_MINFIELDS || nfields > ZONE_MAXFIELDS)
{
buf = erealloc(buf, (int) (132 + strlen(TZDEFAULT)));
(void) sprintf(buf,
- _("\"Zone %s\" line and -l option are mutually exclusive"),
+ _("\"Zone %s\" line and -l option are mutually exclusive"),
TZDEFAULT);
error(buf);
return FALSE;
{
buf = erealloc(buf, (int) (132 + strlen(TZDEFRULES)));
(void) sprintf(buf,
- _("\"Zone %s\" line and -p option are mutually exclusive"),
+ _("\"Zone %s\" line and -p option are mutually exclusive"),
TZDEFRULES);
error(buf);
return FALSE;
strlen(fields[ZF_NAME]) +
strlen(zones[i].z_filename)));
(void) sprintf(buf,
- _("duplicate zone name %s (file \"%s\", line %d)"),
+ _("duplicate zone name %s (file \"%s\", line %d)"),
fields[ZF_NAME],
zones[i].z_filename,
zones[i].z_linenum);
static int
inzsub(char **fields, int nfields, int iscont)
{
- char *cp;
+ char *cp;
static struct zone z;
- int i_gmtoff,
- i_rule,
- i_format;
- int i_untilyear,
- i_untilmonth;
- int i_untilday,
- i_untiltime;
- int hasuntil;
+ int i_gmtoff,
+ i_rule,
+ i_format;
+ int i_untilyear,
+ i_untilmonth;
+ int i_untilday,
+ i_untiltime;
+ int hasuntil;
if (iscont)
{
}
}
zones = (struct zone *) (void *) erealloc((char *) zones,
- (int) ((nzones + 1) * sizeof *zones));
+ (int) ((nzones + 1) * sizeof *zones));
zones[nzones++] = z;
/*
l.l_from = ecpyalloc(fields[LF_FROM]);
l.l_to = ecpyalloc(fields[LF_TO]);
links = (struct link *) (void *) erealloc((char *) links,
- (int) ((nlinks + 1) * sizeof *links));
+ (int) ((nlinks + 1) * sizeof *links));
links[nlinks++] = l;
}
static void
-rulesub(struct rule *rp, const char *loyearp, const char *hiyearp,
+rulesub(struct rule * rp, const char *loyearp, const char *hiyearp,
const char *typep, const char *monthp, const char *dayp,
const char *timep)
{
const struct lookup *lp;
const char *cp;
- char *dp;
- char *ep;
+ char *dp;
+ char *ep;
if ((lp = byword(monthp, mon_names)) == NULL)
{
static void
convert(long val, char *buf)
{
- int i;
- long shift;
+ int i;
+ long shift;
for (i = 0, shift = 24; i < 4; ++i, shift -= 8)
buf[i] = val >> shift;
static void
writezone(const char *name)
{
- FILE *fp;
- int i,
- j;
+ FILE *fp;
+ int i,
+ j;
static char *fullname;
static struct tzhead tzh;
pg_time_t ats[TZ_MAX_TIMES];
}
static void
-outzone(const struct zone *zpfirst, int zonecount)
+outzone(const struct zone * zpfirst, int zonecount)
{
const struct zone *zp;
struct rule *rp;
- int i,
- j;
- int usestart,
- useuntil;
- pg_time_t starttime = 0;
- pg_time_t untiltime = 0;
- long gmtoff;
- long stdoff;
- int year;
- long startoff;
- int startttisstd;
- int startttisgmt;
- int type;
+ int i,
+ j;
+ int usestart,
+ useuntil;
+ pg_time_t starttime = 0;
+ pg_time_t untiltime = 0;
+ long gmtoff;
+ long stdoff;
+ int year;
+ long startoff;
+ int startttisstd;
+ int startttisgmt;
+ int type;
char startbuf[BUFSIZ];
/*
charcnt = 0;
/*
- * to unconditionally initialize startttisstd.
+ * unconditionally initialize startttisstd.
*/
startttisstd = FALSE;
startttisgmt = FALSE;
break;
/*
- * Mark which rules to do in the current year. For those
- * to do, calculate rpytime(rp, year);
+ * Mark which rules to do in the current year. For those to
+ * do, calculate rpytime(rp, year);
*/
for (j = 0; j < zp->z_nrules; ++j)
{
}
for (;;)
{
- int k;
- pg_time_t jtime, ktime = 0;
- long offset;
+ int k;
+ pg_time_t jtime,
+ ktime = 0;
+ long offset;
char buf[BUFSIZ];
if (useuntil)
{
/*
- * Turn untiltime into UTC assuming the current
- * gmtoff and stdoff values.
+ * Turn untiltime into UTC assuming the current gmtoff
+ * and stdoff values.
*/
untiltime = zp->z_untiltime;
if (!zp->z_untilrule.r_todisgmt)
addtype(long gmtoff, const char *abbr, int isdst,
int ttisstd, int ttisgmt)
{
- int i;
- int j;
+ int i;
+ int j;
if (isdst != TRUE && isdst != FALSE)
{
}
/*
- * See if there's already an entry for this zone type. If so, just
- * return its index.
+ * See if there's already an entry for this zone type. If so, just return
+ * its index.
*/
for (i = 0; i < typecnt; ++i)
{
static void
leapadd(const pg_time_t t, int positive, int rolling, int count)
{
- int i;
- int j;
+ int i;
+ int j;
if (leapcnt + (positive ? count : 1) > TZ_MAX_LEAPS)
{
static void
adjleap(void)
{
- int i;
- long last = 0;
+ int i;
+ long last = 0;
/*
* propagate leap seconds forward
}
static const struct lookup *
-byword(const char *word, const struct lookup *table)
+byword(const char *word, const struct lookup * table)
{
const struct lookup *foundlp;
const struct lookup *lp;
static char **
getfields(char *cp)
{
- char *dp;
- char **array;
- int nsubs;
+ char *dp;
+ char **array;
+ int nsubs;
if (cp == NULL)
return NULL;
static long
oadd(long t1, long t2)
{
- long t;
+ long t;
t = t1 + t2;
if ((t2 > 0 && t <= t1) || (t2 < 0 && t >= t1))
static pg_time_t
tadd(const pg_time_t t1, long t2)
{
- pg_time_t t;
+ pg_time_t t;
if (t1 == max_time && t2 > 0)
return max_time;
*/
static pg_time_t
-rpytime(const struct rule *rp, int wantedy)
+rpytime(const struct rule * rp, int wantedy)
{
- int y,
- m,
- i;
- long dayoff; /* with a nod to Margaret O. */
- pg_time_t t;
+ int y,
+ m,
+ i;
+ long dayoff; /* with a nod to Margaret O. */
+ pg_time_t t;
if (wantedy == INT_MIN)
return min_time;
dayoff = oadd(dayoff, eitol(i));
if (rp->r_dycode == DC_DOWGEQ || rp->r_dycode == DC_DOWLEQ)
{
- long wday;
+ long wday;
#define LDAYSPERWEEK ((long) DAYSPERWEEK)
wday = eitol(EPOCH_WDAY);
static void
newabbr(const char *string)
{
- int i;
+ int i;
i = strlen(string) + 1;
if (charcnt + i > TZ_MAX_CHARS)
static int
mkdirs(char *argname)
{
- char *name;
- char *cp;
+ char *name;
+ char *cp;
if (argname == NULL || *argname == '\0')
return 0;
if (!itsdir(name))
{
/*
- * It doesn't seem to exist, so we try to create it. Creation
- * may fail because of the directory being created by some
- * other multiprocessor, so we get to do extra checking.
+ * It doesn't seem to exist, so we try to create it. Creation may
+ * fail because of the directory being created by some other
+ * multiprocessor, so we get to do extra checking.
*/
if (mkdir(name, MKDIR_UMASK) != 0)
{
if (errno != EEXIST || !itsdir(name))
{
(void) fprintf(stderr,
- _("%s: Can't create directory %s: %s\n"),
+ _("%s: Can't create directory %s: %s\n"),
progname, name, e);
ifree(name);
return -1;
if (col_in_tab == tab_size)
{
/*
- * Is the next character going to be a tab? Needed
- * to do tab replacement in current spot if next
- * char is going to be a tab, ignoring min_spaces
+ * Is the next character going to be a tab? Needed to
+ * do tab replacement in current spot if next char is
+ * going to be a tab, ignoring min_spaces
*/
nxt_spaces = 0;
while (1)
/*VARARGS*/
void
-halt(const char *format, ...)
+halt(const char *format,...)
{
va_list arg_ptr;
- const char *pstr;
+ const char *pstr;
void (*sig_func) ();
va_start(arg_ptr, format);
*
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/tools/findoidjoins/findoidjoins.c,v 1.1 2005/06/23 02:33:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/tools/findoidjoins/findoidjoins.c,v 1.2 2005/10/15 02:49:51 momjian Exp $
*/
#include "postgres_fe.h"
appendPQExpBuffer(&sql, "%s",
"SET search_path = public;"
"SELECT c.relname, (SELECT nspname FROM "
- "pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname "
+ "pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname "
"FROM pg_catalog.pg_class c "
"WHERE c.relkind = 'r' "
"AND c.relhasoids "
"SELECT c.relname, "
"(SELECT nspname FROM pg_catalog.pg_namespace n WHERE n.oid = c.relnamespace) AS nspname, "
"a.attname "
- "FROM pg_catalog.pg_class c, pg_catalog.pg_attribute a "
+ "FROM pg_catalog.pg_class c, pg_catalog.pg_attribute a "
"WHERE a.attnum > 0 AND c.relkind = 'r' "
"AND a.attrelid = c.oid "
"AND a.atttypid IN ('pg_catalog.oid'::regtype, "
"\"%s\".\"%s\" t2 "
"WHERE t1.\"%s\"::pg_catalog.oid = t2.oid "
"LIMIT 1",
- fk_nspname, fk_relname, pk_nspname, pk_relname, fk_attname);
+ fk_nspname, fk_relname, pk_nspname, pk_relname, fk_attname);
res = PQexec(conn, sql.data);
if (!res || PQresultStatus(res) != PGRES_TUPLES_OK)
if ((tmpfile = open(FSYNC_FILENAME, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) == -1)
die("can't open /var/tmp/test_fsync.out");
write(tmpfile, strout, WAL_FILE_SIZE);
- fsync(tmpfile); /* fsync so later fsync's don't have to do
- * it */
+ fsync(tmpfile); /* fsync so later fsync's don't have to do it */
close(tmpfile);
printf("Simple write timing:\n");
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/tools/thread/thread_test.c,v 1.40 2005/08/23 21:02:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/tools/thread/thread_test.c,v 1.41 2005/10/15 02:49:52 momjian Exp $
*
* This program tests to see if your standard libc functions use
* pthread_setspecific()/pthread_getspecific() to be thread-safe.
/******************************************************************
* Windows Hacks
*****************************************************************/
-
+
#ifdef WIN32
#define MAXHOSTNAMELEN 63
#include
-int mkstemp(char *template);
+int mkstemp(char *template);
int
mkstemp(char *template)
{
- FILE *foo;
+ FILE *foo;
mktemp(template);
foo = fopen(template, "rw");
if (!foo)
return -1;
else
- return (int)foo;
+ return (int) foo;
}
-
#endif
/******************************************************************
/* Test for POSIX.1c 2-arg sigwait() and fail on single-arg version */
#include
-int sigwait(const sigset_t *set, int *sig);
+int sigwait(const sigset_t *set, int *sig);
#if !defined(ENABLE_THREAD_SAFETY) && !defined(IN_CONFIGURE) && !(defined(WIN32))
fprintf(stderr, "Perhaps rerun 'configure' using '--enable-thread-safety'.\n");
return 1;
}
-
#else
/* This must be down here because this is the code that uses threads. */
-static void func_call_1(void);
-static void func_call_2(void);
+static void func_call_1(void);
+static void func_call_2(void);
#ifdef WIN32
#define TEMP_FILENAME_1 "thread_test.1.XXXXXX"
#define TEMP_FILENAME_2 "/tmp/thread_test.2.XXXXXX"
#endif
-static char *temp_filename_1;
-static char *temp_filename_2;
+static char *temp_filename_1;
+static char *temp_filename_2;
static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
static volatile int errno2_set = 0;
#ifndef HAVE_STRERROR_R
-static char *strerror_p1;
-static char *strerror_p2;
-static bool strerror_threadsafe = false;
+static char *strerror_p1;
+static char *strerror_p2;
+static bool strerror_threadsafe = false;
#endif
#ifndef WIN32
#ifndef HAVE_GETPWUID_R
static struct passwd *passwd_p1;
static struct passwd *passwd_p2;
-static bool getpwuid_threadsafe = false;
+static bool getpwuid_threadsafe = false;
#endif
#endif
#if !defined(HAVE_GETADDRINFO) && !defined(HAVE_GETHOSTBYNAME_R)
static struct hostent *hostent_p1;
static struct hostent *hostent_p2;
-static char myhostname[MAXHOSTNAMELEN];
-static bool gethostbyname_threadsafe = false;
+static char myhostname[MAXHOSTNAMELEN];
+static bool gethostbyname_threadsafe = false;
#endif
-static bool platform_is_threadsafe = true;
+static bool platform_is_threadsafe = true;
int
main(int argc, char *argv[])
{
pthread_t thread1,
- thread2;
- int fd;
+ thread2;
+ int fd;
+
#ifdef WIN32
WSADATA wsaData;
- int err;
+ int err;
#endif
if (argc > 1)
#ifdef WIN32
err = WSAStartup(MAKEWORD(1, 1), &wsaData);
- if (err != 0) {
+ if (err != 0)
+ {
fprintf(stderr, "Cannot start the network subsystem - %d**\nexiting\n", err);
exit(1);
}
pthread_create(&thread2, NULL, (void *(*) (void *)) func_call_2, NULL);
while (thread1_done == 0 || thread2_done == 0)
- sched_yield(); /* if this is a portability problem,
- * remove it */
+ sched_yield(); /* if this is a portability problem, remove it */
#ifdef WIN32
printf("Your GetLastError() is thread-safe.\n");
#else
void *p;
#endif
#ifdef WIN32
- HANDLE h1;
- HANDLE h2;
-#endif
+ HANDLE h1;
+ HANDLE h2;
+#endif
unlink(temp_filename_1);
-
-
+
+
/* create, then try to fail on exclusive create open */
#ifdef WIN32
- h1 = CreateFile(temp_filename_1, GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, 0, NULL);
- h2 = CreateFile(temp_filename_1, GENERIC_WRITE, 0, NULL, CREATE_NEW, 0, NULL);
+ h1 = CreateFile(temp_filename_1, GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, 0, NULL);
+ h2 = CreateFile(temp_filename_1, GENERIC_WRITE, 0, NULL, CREATE_NEW, 0, NULL);
if (h1 == INVALID_HANDLE_VALUE || GetLastError() != ERROR_FILE_EXISTS)
#else
if (open(temp_filename_1, O_RDWR | O_CREAT, 0600) < 0 ||
while (errno2_set == 0)
sched_yield();
#ifdef WIN32
- if (GetLastError() != ERROR_FILE_EXISTS)
+ if (GetLastError() != ERROR_FILE_EXISTS)
#else
- if (errno != EEXIST)
+ if (errno != EEXIST)
#endif
{
#ifdef WIN32
- fprintf(stderr, "GetLastError() not thread-safe **\nexiting\n");
+ fprintf(stderr, "GetLastError() not thread-safe **\nexiting\n");
#else
- fprintf(stderr, "errno not thread-safe **\nexiting\n");
-#endif
+ fprintf(stderr, "errno not thread-safe **\nexiting\n");
+#endif
unlink(temp_filename_1);
exit(1);
}
strerror_p1 = strerror(EACCES);
/*
- * If strerror() uses sys_errlist, the pointer might change for
- * different errno values, so we don't check to see if it varies
- * within the thread.
+ * If strerror() uses sys_errlist, the pointer might change for different
+ * errno values, so we don't check to see if it varies within the thread.
*/
#endif
unlink(temp_filename_2);
/* open non-existant file */
#ifdef WIN32
- CreateFile(temp_filename_2, GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL);
- if (GetLastError() != ERROR_FILE_NOT_FOUND)
+ CreateFile(temp_filename_2, GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL);
+ if (GetLastError() != ERROR_FILE_NOT_FOUND)
#else
if (open(temp_filename_2, O_RDONLY, 0600) >= 0)
#endif
while (errno1_set == 0)
sched_yield();
#ifdef WIN32
- if (GetLastError() != ENOENT)
+ if (GetLastError() != ENOENT)
#else
- if (errno != ENOENT)
+ if (errno != ENOENT)
#endif
{
#ifdef WIN32
- fprintf(stderr, "GetLastError() not thread-safe **\nexiting\n");
+ fprintf(stderr, "GetLastError() not thread-safe **\nexiting\n");
#else
- fprintf(stderr, "errno not thread-safe **\nexiting\n");
+ fprintf(stderr, "errno not thread-safe **\nexiting\n");
#endif
unlink(temp_filename_2);
exit(1);
strerror_p2 = strerror(EINVAL);
/*
- * If strerror() uses sys_errlist, the pointer might change for
- * different errno values, so we don't check to see if it varies
- * within the thread.
+ * If strerror() uses sys_errlist, the pointer might change for different
+ * errno values, so we don't check to see if it varies within the thread.
*/
#endif
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/tutorial/beard.c,v 1.12 2004/12/31 22:04:05 pgsql Exp $
+ * $PostgreSQL: pgsql/src/tutorial/beard.c,v 1.13 2005/10/15 02:49:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
beard_fd = DatumGetInt32(DirectFunctionCall2(lo_open,
ObjectIdGetDatum(beard),
- Int32GetDatum(INV_WRITE)));
+ Int32GetDatum(INV_WRITE)));
if (beard_fd < 0)
elog(ERROR, "Cannot access beard large object");
PG_RETURN_BOOL(false);
/*
- * Alternatively, we might prefer to do PG_RETURN_NULL() for null
- * salary
+ * Alternatively, we might prefer to do PG_RETURN_NULL() for null salary
*/
PG_RETURN_BOOL(salary > limit);
| |