pg_file_sync(PG_FUNCTION_ARGS)
{
char *filename;
- struct stat fst;
+ struct stat fst;
filename = convert_and_check_filename(PG_GETARG_TEXT_PP(0));
static bool
checkcondition_bit(void *checkval, ITEM *item, void *siglen)
{
- return GETBIT(checkval, HASHVAL(item->val, (int)(intptr_t) siglen));
+ return GETBIT(checkval, HASHVAL(item->val, (int) (intptr_t) siglen));
}
/*
signconsistent(QUERYTYPE *query, BITVECP sign, int siglen, bool calcnot)
{
return execute(GETQUERY(query) + query->size - 1,
- (void *) sign, (void *)(intptr_t) siglen, calcnot,
+ (void *) sign, (void *) (intptr_t) siglen, calcnot,
checkcondition_bit);
}
typedef struct LtreeSignature
{
- BITVECP sign;
- int siglen;
+ BITVECP sign;
+ int siglen;
} LtreeSignature;
static bool
#define LTG_GETRNODE(x, siglen) ( LTG_ISONENODE(x) ? LTG_NODE(x) : LTG_RNODE(x, siglen) )
extern ltree_gist *ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen,
- ltree *left, ltree *right);
+ ltree *left, ltree *right);
/* GiST support for ltree[] */
ltree *left, ltree *right)
{
int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
- (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
+ (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
ltree_gist *result = palloc(size);
SET_VARSIZE(result, size);
typedef struct LtreeSignature
{
- BITVECP sign;
- int siglen;
+ BITVECP sign;
+ int siglen;
} LtreeSignature;
static bool
foreach(lc, rowMarks)
{
RowMarkClause *rowmark = lfirst_node(RowMarkClause, lc);
+
if (!rowmark->pushedDown)
{
APP_JUMB(rowmark->rti);
Oid relid = PG_GETARG_OID(0);
Relation rel;
ForkNumber fork;
- BlockNumber block;
+ BlockNumber block;
rel = relation_open(relid, AccessExclusiveLock);
/*
* Check that non-superuser has used password to establish connection;
* otherwise, he's piggybacking on the postgres server's user
- * identity. See also dblink_security_check() in contrib/dblink
- * and check_conn_params.
+ * identity. See also dblink_security_check() in contrib/dblink and
+ * check_conn_params.
*/
if (!superuser_arg(user->userid) && UserMappingPasswordRequired(user) &&
!PQconnectionUsedPassword(conn))
foreach(cell, user->options)
{
DefElem *def = (DefElem *) lfirst(cell);
+
if (strcmp(def->defname, "password_required") == 0)
return defGetBoolean(def);
}
}
else if (strcmp(def->defname, "password_required") == 0)
{
- bool pw_required = defGetBoolean(def);
+ bool pw_required = defGetBoolean(def);
/*
* Only the superuser may set this option on a user mapping, or
* alter a user mapping on which this option is set. We allow a
- * user to clear this option if it's set - in fact, we don't have a
- * choice since we can't see the old mapping when validating an
+ * user to clear this option if it's set - in fact, we don't have
+ * a choice since we can't see the old mapping when validating an
* alter.
*/
if (!superuser() && !pw_required)
{"fetch_size", ForeignServerRelationId, false},
{"fetch_size", ForeignTableRelationId, false},
{"password_required", UserMappingRelationId, false},
+
/*
* sslcert and sslkey are in fact libpq options, but we repeat them
- * here to allow them to appear in both foreign server context
- * (when we generate libpq options) and user mapping context
- * (from here).
+ * here to allow them to appear in both foreign server context (when
+ * we generate libpq options) and user mapping context (from here).
*/
{"sslcert", UserMappingRelationId, true},
{"sslkey", UserMappingRelationId, true},
print " $feature_id\n";
}
print " ",
- defined($feature_packages{$feature_id}) ? $feature_packages{$feature_id} : "",
- "\n";
+ defined($feature_packages{$feature_id})
+ ? $feature_packages{$feature_id}
+ : "",
+ "\n";
if ($subfeature_id)
{
print " $subfeature_name\n";
*/
struct varlena *
detoast_attr_slice(struct varlena *attr,
- int32 sliceoffset, int32 slicelength)
+ int32 sliceoffset, int32 slicelength)
{
struct varlena *preslice;
struct varlena *result;
/*
* For compressed values, we need to fetch enough slices to decompress
- * at least the requested part (when a prefix is requested). Otherwise,
- * just fetch all slices.
+ * at least the requested part (when a prefix is requested).
+ * Otherwise, just fetch all slices.
*/
if (slicelength > 0 && sliceoffset >= 0)
{
- int32 max_size;
+ int32 max_size;
/*
* Determine maximum amount of compressed data needed for a prefix
Assert(!VARATT_IS_EXTERNAL_INDIRECT(redirect.pointer));
return detoast_attr_slice(redirect.pointer,
- sliceoffset, slicelength);
+ sliceoffset, slicelength);
}
else if (VARATT_IS_EXTERNAL_EXPANDED(attr))
{
SET_VARSIZE(result, attrsize + VARHDRSZ);
if (attrsize == 0)
- return result; /* Probably shouldn't happen, but just in case. */
+ return result; /* Probably shouldn't happen, but just in
+ * case. */
/*
* Open the toast relation and its indexes
VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);
/*
- * It's nonsense to fetch slices of a compressed datum unless when it's
- * a prefix -- this isn't lo_* we can't return a compressed datum which
- * is meaningful to toast later.
+ * It's nonsense to fetch slices of a compressed datum unless when it's a
+ * prefix -- this isn't lo_* we can't return a compressed datum which is
+ * meaningful to toast later.
*/
Assert(!VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer) || 0 == sliceoffset);
left->buf, right->buf, false, false))
{
/*
- * If the parent page was split, the existing downlink might
- * have moved.
+ * If the parent page was split, the existing downlink might have
+ * moved.
*/
stack->downlinkoffnum = InvalidOffsetNumber;
}
tuples, 2,
stack->downlinkoffnum,
left->buf, right->buf,
- true, /* Unlock parent */
- unlockbuf /* Unlock stack->buffer if caller wants that */
- ))
+ true, /* Unlock parent */
+ unlockbuf /* Unlock stack->buffer if caller wants
+ * that */
+ ))
{
/*
* If the parent page was split, the downlink might have moved.
{
uint32 splitpoint_group;
uint32 splitpoint_phases;
+
splitpoint_group = pg_ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
argtype == XIDOID || argtype == CIDOID))
/* okay, allowed use of hashint4() */ ;
else if ((funcid == F_HASHINT8 || funcid == F_HASHINT8EXTENDED) &&
- (argtype == XID8OID))
+ (argtype == XID8OID))
/* okay, allowed use of hashint8() */ ;
else if ((funcid == F_TIMESTAMP_HASH ||
funcid == F_TIMESTAMP_HASH_EXTENDED) &&
RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
/*
- * Note that heap_multi_insert is not used for catalog tuples yet,
- * but this will cover the gap once that is the case.
+ * Note that heap_multi_insert is not used for catalog tuples yet, but
+ * this will cover the gap once that is the case.
*/
if (needwal && need_cids)
log_heap_new_cid(relation, heaptuples[ndone]);
/* fetch options support procedure if specified */
if (amoptsprocnum != 0)
- procid =index_getprocid(indrel, attnum, amoptsprocnum);
+ procid = index_getprocid(indrel, attnum, amoptsprocnum);
if (!OidIsValid(procid))
{
bool isnull;
if (!DatumGetPointer(attoptions))
- return NULL; /* ok, no options, no procedure */
+ return NULL; /* ok, no options, no procedure */
/*
* Report an error if the opclass's options-parsing procedure does not
BTScanInsert itup_key;
ItemId itemid;
IndexTuple targetkey;
- BlockNumber leftsib, leafblkno;
+ BlockNumber leftsib,
+ leafblkno;
Buffer sleafbuf;
itemid = PageGetItemId(page, P_HIKEY);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
#ifdef USE_ASSERT_CHECKING
+
/*
* This is just an assertion because _bt_lock_subtree_parent should have
* guaranteed tuple has the expected contents
Buffer *subtreeparent, OffsetNumber *poffset,
BlockNumber *topparent, BlockNumber *topparentrightsib)
{
- BlockNumber parent, leftsibparent;
+ BlockNumber parent,
+ leftsibparent;
OffsetNumber parentoffset,
maxoff;
Buffer pbuf;
/*
* Now make sure that the parent deletion is itself safe by examining the
* child's grandparent page. Recurse, passing the parent page as the
- * child page (child's grandparent is the parent on the next level up).
- * If parent deletion is unsafe, then child deletion must also be unsafe
- * (in which case caller cannot delete any pages at all).
+ * child page (child's grandparent is the parent on the next level up). If
+ * parent deletion is unsafe, then child deletion must also be unsafe (in
+ * which case caller cannot delete any pages at all).
*/
*topparent = parent;
*topparentrightsib = opaque->btpo_next;
void *callback_state = vstate->callback_state;
Relation rel = info->index;
bool attempt_pagedel;
- BlockNumber blkno, backtrack_to;
+ BlockNumber blkno,
+ backtrack_to;
Buffer buf;
Page page;
BTPageOpaque opaque;
/*
* We need to save the location of the pivot tuple we chose in the
- * parent page on a stack. If we need to split a page, we'll use
- * the stack to work back up to its parent page. If caller ends up
- * splitting a page one level down, it usually ends up inserting a
- * new pivot tuple/downlink immediately after the location recorded
- * here.
+ * parent page on a stack. If we need to split a page, we'll use the
+ * stack to work back up to its parent page. If caller ends up
+ * splitting a page one level down, it usually ends up inserting a new
+ * pivot tuple/downlink immediately after the location recorded here.
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
static bool _bt_adjacenthtid(ItemPointer lowhtid, ItemPointer highhtid);
static OffsetNumber _bt_bestsplitloc(FindSplitData *state, int perfectpenalty,
bool *newitemonleft, FindSplitStrat strategy);
-static int _bt_defaultinterval(FindSplitData *state);
+static int _bt_defaultinterval(FindSplitData *state);
static int _bt_strategy(FindSplitData *state, SplitPoint *leftpage,
SplitPoint *rightpage, FindSplitStrat *strategy);
static void _bt_interval_edges(FindSplitData *state,
else if (info == XLOG_DBASE_DROP)
{
xl_dbase_drop_rec *xlrec = (xl_dbase_drop_rec *) rec;
- int i;
+ int i;
appendStringInfo(buf, "dir");
for (i = 0; i < xlrec->ntablespaces; i++)
xact_desc_relations(StringInfo buf, char *label, int nrels,
RelFileNode *xnodes)
{
- int i;
+ int i;
if (nrels > 0)
{
static void
xact_desc_subxacts(StringInfo buf, int nsubxacts, TransactionId *subxacts)
{
- int i;
+ int i;
if (nsubxacts > 0)
{
if ((thisgroup->functionset & (((uint64) 1) << i)) != 0)
continue; /* got it */
if (i == SPGIST_OPTIONS_PROC)
- continue; /* optional method */
+ continue; /* optional method */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("operator family \"%s\" of access method %s is missing support function %d for type %s",
if (chain)
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s can only be used in transaction blocks",
"COMMIT AND CHAIN")));
else
if (chain)
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s can only be used in transaction blocks",
"COMMIT AND CHAIN")));
else
if (chain)
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s can only be used in transaction blocks",
"ROLLBACK AND CHAIN")));
else
{
uint8 xact_info;
TimestampTz xtime;
- TimestampTz delayUntil;
+ TimestampTz delayUntil;
long secs;
int microsecs;
switch (ControlFile->state)
{
case DB_SHUTDOWNED:
- /* This is the expected case, so don't be chatty in standalone mode */
+
+ /*
+ * This is the expected case, so don't be chatty in standalone
+ * mode
+ */
ereport(IsPostmasterEnvironment ? LOG : NOTICE,
(errmsg("database system was shut down at %s",
str_time(ControlFile->time))));
datadirpathlen = strlen(DataDir);
/*
- * Report that we are now estimating the total backup size
- * if we're streaming base backup as requested by pg_basebackup
+ * Report that we are now estimating the total backup size if we're
+ * streaming base backup as requested by pg_basebackup
*/
if (tablespaces)
pgstat_progress_update_param(PROGRESS_BASEBACKUP_PHASE,
void
do_pg_abort_backup(int code, Datum arg)
{
- bool emit_warning = DatumGetBool(arg);
+ bool emit_warning = DatumGetBool(arg);
/*
* Quick exit if session is not keeping around a non-exclusive backup
*/
/*
- * We should be able to move to XLOG_FROM_STREAM
- * only in standby mode.
+ * We should be able to move to XLOG_FROM_STREAM only in
+ * standby mode.
*/
Assert(StandbyMode);
{
case XLOG_FROM_ARCHIVE:
case XLOG_FROM_PG_WAL:
+
/*
* WAL receiver must not be running when reading WAL from
* archive or pg_wal.
bool havedata;
/*
- * We should be able to move to XLOG_FROM_STREAM
- * only in standby mode.
+ * We should be able to move to XLOG_FROM_STREAM only in
+ * standby mode.
*/
Assert(StandbyMode);
TimeLineID restartTli;
/*
- * Ignore restore_command when not in archive recovery (meaning
- * we are in crash recovery).
+ * Ignore restore_command when not in archive recovery (meaning we are in
+ * crash recovery).
*/
if (!ArchiveRecoveryRequested)
goto not_available;
FullTransactionId
XLogRecGetFullXid(XLogReaderState *record)
{
- TransactionId xid,
- next_xid;
- uint32 epoch;
+ TransactionId xid,
+ next_xid;
+ uint32 epoch;
/*
* This function is only safe during replay, because it depends on the
epoch = EpochFromFullTransactionId(ShmemVariableCache->nextFullXid);
/*
- * If xid is numerically greater than next_xid, it has to be from the
- * last epoch.
+ * If xid is numerically greater than next_xid, it has to be from the last
+ * epoch.
*/
if (unlikely(xid > next_xid))
--epoch;
}
else
{
- push @{ $catalog_data{pg_description}}, \%descr;
+ push @{ $catalog_data{pg_description} }, \%descr;
}
}
close $schemapg;
# Finally, rename the completed files into place.
-Catalog::RenameTempFile($bkifile, $tmpext);
-Catalog::RenameTempFile($schemafile, $tmpext);
+Catalog::RenameTempFile($bkifile, $tmpext);
+Catalog::RenameTempFile($schemafile, $tmpext);
exit 0;
*/
foreach(cell, parent_cons)
{
- Oid parent = lfirst_oid(cell);
+ Oid parent = lfirst_oid(cell);
ScanKeyInit(&key,
Anum_pg_constraint_oid,
*
* Because of this arrangement, we can correctly catch all
* relevant relations by adding to 'parent_cons' all rows with
- * valid conparentid, and to the 'oids' list all rows with a
- * zero conparentid. If any oids are added to 'oids', redo the
- * first loop above by setting 'restart'.
+ * valid conparentid, and to the 'oids' list all rows with a zero
+ * conparentid. If any oids are added to 'oids', redo the first
+ * loop above by setting 'restart'.
*/
if (OidIsValid(con->conparentid))
parent_cons = list_append_unique_oid(parent_cons,
CastCreate(Oid sourcetypeid, Oid targettypeid, Oid funcid, char castcontext,
char castmethod, DependencyType behavior)
{
- Relation relation;
- HeapTuple tuple;
- Oid castid;
- Datum values[Natts_pg_cast];
- bool nulls[Natts_pg_cast];
- ObjectAddress myself,
- referenced;
+ Relation relation;
+ HeapTuple tuple;
+ Oid castid;
+ Datum values[Natts_pg_cast];
+ bool nulls[Natts_pg_cast];
+ ObjectAddress myself,
+ referenced;
relation = table_open(CastRelationId, RowExclusiveLock);
{
List *result = NIL;
Relation depRel;
- ScanKeyData key[2];
- SysScanDesc scan;
+ ScanKeyData key[2];
+ SysScanDesc scan;
HeapTuple tup;
depRel = table_open(DependRelationId, AccessShareLock);
sdepForm->objid);
break;
case SHARED_DEPENDENCY_POLICY:
+
/*
* Try to remove role from policy; if unable to, remove
* policy.
obj.classId = sdepForm->classid;
obj.objectId = sdepForm->objid;
obj.objectSubId = sdepForm->objsubid;
+
/*
* Acquire lock on object, then verify this dependency
* is still relevant. If not, the object might have
bool vm;
bool need_fsm_vacuum = false;
ForkNumber forks[MAX_FORKNUM];
- BlockNumber blocks[MAX_FORKNUM];
- int nforks = 0;
+ BlockNumber blocks[MAX_FORKNUM];
+ int nforks = 0;
/* Open it at the smgr level if not already done */
RelationOpenSmgr(rel);
blocks[nforks] = nblocks;
nforks++;
- /* Prepare for truncation of the FSM if it exists */
+ /* Prepare for truncation of the FSM if it exists */
fsm = smgrexists(rel->rd_smgr, FSM_FORKNUM);
if (fsm)
{
smgrtruncate(rel->rd_smgr, forks, nforks, blocks);
/*
- * Update upper-level FSM pages to account for the truncation.
- * This is important because the just-truncated pages were likely
- * marked as all-free, and would be preferentially selected.
+ * Update upper-level FSM pages to account for the truncation. This is
+ * important because the just-truncated pages were likely marked as
+ * all-free, and would be preferentially selected.
*/
if (need_fsm_vacuum)
FreeSpaceMapVacuumRange(rel, nblocks, InvalidBlockNumber);
SMgrRelation reln;
Relation rel;
ForkNumber forks[MAX_FORKNUM];
- BlockNumber blocks[MAX_FORKNUM];
- int nforks = 0;
+ BlockNumber blocks[MAX_FORKNUM];
+ int nforks = 0;
bool need_fsm_vacuum = false;
reln = smgropen(xlrec->rnode, InvalidBackendId);
smgrtruncate(reln, forks, nforks, blocks);
/*
- * Update upper-level FSM pages to account for the truncation.
- * This is important because the just-truncated pages were likely
- * marked as all-free, and would be preferentially selected.
+ * Update upper-level FSM pages to account for the truncation. This is
+ * important because the just-truncated pages were likely marked as
+ * all-free, and would be preferentially selected.
*/
if (need_fsm_vacuum)
FreeSpaceMapVacuumRange(rel, xlrec->blkno,
}
else
{
- List *currexts;
+ List *currexts;
/* Avoid duplicates */
currexts = getAutoExtensionsOfObject(address.classId,
Relation rel;
TableScanDesc scan;
HeapTuple tuple;
- List *ltblspc = NIL;
- ListCell *cell;
- int ntblspc;
- int i;
- Oid *tablespace_ids;
+ List *ltblspc = NIL;
+ ListCell *cell;
+ int ntblspc;
+ int i;
+ Oid *tablespace_ids;
rel = table_open(TableSpaceRelationId, AccessShareLock);
scan = table_beginscan_catalog(rel, 0, NULL);
static EventTriggerQueryState *currentEventTriggerState = NULL;
-typedef struct
-{
- const char *obtypename;
- bool supported;
-} event_trigger_support_data;
-
/* Support for dropped objects */
typedef struct SQLDropObject
{
* we don't need to do anything if there were 0 full groups.
*
* We still have to continue after this block if there are no full groups,
- * though, since it's possible that we have workers that did real work even
- * if the leader didn't participate.
+ * though, since it's possible that we have workers that did real work
+ * even if the leader didn't participate.
*/
if (fullsortGroupInfo->groupCount > 0)
{
&incrsortstate->shared_info->sinfo[n];
/*
- * If a worker hasn't processed any sort groups at all, then exclude
- * it from output since it either didn't launch or didn't
+ * If a worker hasn't processed any sort groups at all, then
+ * exclude it from output since it either didn't launch or didn't
* contribute anything meaningful.
*/
fullsortGroupInfo = &incsort_info->fullsortGroupInfo;
/*
* Since we never have any prefix groups unless we've first sorted
* a full groups and transitioned modes (copying the tuples into a
- * prefix group), we don't need to do anything if there were 0 full
- * groups.
+ * prefix group), we don't need to do anything if there were 0
+ * full groups.
*/
if (fullsortGroupInfo->groupCount == 0)
continue;
static void
show_hashagg_info(AggState *aggstate, ExplainState *es)
{
- Agg *agg = (Agg *)aggstate->ss.ps.plan;
- int64 memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024;
+ Agg *agg = (Agg *) aggstate->ss.ps.plan;
+ int64 memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024;
Assert(IsA(aggstate, AggState));
* does what is needed, we try to find a sequence of update scripts that
* will get us there.
*/
- filename = get_extension_script_filename(pcontrol, NULL, versionName);
- if (stat(filename, &fst) == 0)
- {
- /* Easy, no extra scripts */
- updateVersions = NIL;
- }
- else
- {
- /* Look for best way to install this version */
- List *evi_list;
- ExtensionVersionInfo *evi_start;
- ExtensionVersionInfo *evi_target;
+ filename = get_extension_script_filename(pcontrol, NULL, versionName);
+ if (stat(filename, &fst) == 0)
+ {
+ /* Easy, no extra scripts */
+ updateVersions = NIL;
+ }
+ else
+ {
+ /* Look for best way to install this version */
+ List *evi_list;
+ ExtensionVersionInfo *evi_start;
+ ExtensionVersionInfo *evi_target;
- /* Extract the version update graph from the script directory */
- evi_list = get_ext_ver_list(pcontrol);
+ /* Extract the version update graph from the script directory */
+ evi_list = get_ext_ver_list(pcontrol);
- /* Identify the target version */
- evi_target = get_ext_ver_info(versionName, &evi_list);
+ /* Identify the target version */
+ evi_target = get_ext_ver_info(versionName, &evi_list);
- /* Identify best path to reach target */
- evi_start = find_install_path(evi_list, evi_target,
- &updateVersions);
+ /* Identify best path to reach target */
+ evi_start = find_install_path(evi_list, evi_target,
+ &updateVersions);
- /* Fail if no path ... */
- if (evi_start == NULL)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("extension \"%s\" has no installation script nor update path for version \"%s\"",
- pcontrol->name, versionName)));
+ /* Fail if no path ... */
+ if (evi_start == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("extension \"%s\" has no installation script nor update path for version \"%s\"",
+ pcontrol->name, versionName)));
- /* Otherwise, install best starting point and then upgrade */
- versionName = evi_start->name;
- }
+ /* Otherwise, install best starting point and then upgrade */
+ versionName = evi_start->name;
+ }
/*
* Fetch control parameters for installation target version
char castmethod;
HeapTuple tuple;
AclResult aclresult;
- ObjectAddress myself;
+ ObjectAddress myself;
sourcetypeid = typenameTypeId(NULL, stmt->sourcetype);
targettypeid = typenameTypeId(NULL, stmt->targettype);
opfamilyoid, /* oid of containing opfamily */
opclassoid; /* oid of opclass we create */
int maxOpNumber, /* amstrategies value */
- optsProcNumber, /* amoptsprocnum value */
+ optsProcNumber, /* amoptsprocnum value */
maxProcNumber; /* amsupport value */
bool amstorage; /* amstorage flag */
List *operators; /* OpFamilyMember list for operators */
Oid amoid, /* our AM's oid */
opfamilyoid; /* oid of opfamily */
int maxOpNumber, /* amstrategies value */
- optsProcNumber, /* amopclassopts value */
+ optsProcNumber, /* amopclassopts value */
maxProcNumber; /* amsupport value */
HeapTuple tup;
Form_pg_am amform;
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("btree equal image functions must return boolean")));
+
/*
* pg_amproc functions are indexed by (lefttype, righttype), but
* an equalimage function can only be called at CREATE INDEX time.
* invalidate all partitions contained in the respective partition
* trees, not just those explicitly mentioned in the publication.
*/
- List *relids = GetPublicationRelations(pubform->oid,
- PUBLICATION_PART_ALL);
+ List *relids = GetPublicationRelations(pubform->oid,
+ PUBLICATION_PART_ALL);
/*
* We don't want to send too many individual messages, at some point
PublicationDropTables(pubid, rels, false);
else /* DEFELEM_SET */
{
- List *oldrelids = GetPublicationRelations(pubid,
- PUBLICATION_PART_ROOT);
+ List *oldrelids = GetPublicationRelations(pubid,
+ PUBLICATION_PART_ROOT);
List *delrels = NIL;
ListCell *oldlc;
Datum repl_val[Natts_pg_statistic_ext];
bool repl_null[Natts_pg_statistic_ext];
bool repl_repl[Natts_pg_statistic_ext];
- ObjectAddress address;
+ ObjectAddress address;
int newtarget = stmt->stxstattarget;
/* Limit statistics target to a sane range */
stxoid = get_statistics_object_oid(stmt->defnames, stmt->missing_ok);
/*
- * If we got here and the OID is not valid, it means the statistics
- * does not exist, but the command specified IF EXISTS. So report
- * this as a simple NOTICE and we're done.
+ * If we got here and the OID is not valid, it means the statistics does
+ * not exist, but the command specified IF EXISTS. So report this as a
+ * simple NOTICE and we're done.
*/
if (!OidIsValid(stxoid))
{
List *changedIndexOids; /* OIDs of indexes to rebuild */
List *changedIndexDefs; /* string definitions of same */
char *replicaIdentityIndex; /* index to reset as REPLICA IDENTITY */
- char *clusterOnIndex; /* index to use for CLUSTER */
+ char *clusterOnIndex; /* index to use for CLUSTER */
} AlteredTableInfo;
/* Struct describing one new constraint to check in Phase 3 scan */
if (drop->concurrent)
{
/*
- * Note that for temporary relations this lock may get upgraded
- * later on, but as no other session can access a temporary
- * relation, this is actually fine.
+ * Note that for temporary relations this lock may get upgraded later
+ * on, but as no other session can access a temporary relation, this
+ * is actually fine.
*/
lockmode = ShareUpdateExclusiveLock;
Assert(drop->removeType == OBJECT_INDEX);
}
/*
- * Inherited TRUNCATE commands perform access
- * permission checks on the parent table only.
- * So we skip checking the children's permissions
- * and don't call truncate_check_perms() here.
+ * Inherited TRUNCATE commands perform access permission
+ * checks on the parent table only. So we skip checking the
+ * children's permissions and don't call
+ * truncate_check_perms() here.
*/
truncate_check_rel(RelationGetRelid(rel), rel->rd_rel);
truncate_check_activity(rel);
errmsg("column \"%s\" inherits from generated column but specifies identity",
def->colname)));
}
+
/*
* If the parent column is not generated, then take whatever
* the child column definition says.
*/
foreach(lc, RelationGetIndexList(rel))
{
- Oid indexoid = lfirst_oid(lc);
- Relation indrel;
+ Oid indexoid = lfirst_oid(lc);
+ Relation indrel;
AttrNumber indattnum = 0;
indrel = index_open(indexoid, lockmode);
DropClonedTriggersFromPartition(Oid partitionId)
{
ScanKeyData skey;
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple trigtup;
Relation tgrel;
ObjectAddresses *objects;
/*
* After a tuple in a partition goes through a trigger, the user
- * could have changed the partition key enough that the tuple
- * no longer fits the partition. Verify that.
+ * could have changed the partition key enough that the tuple no
+ * longer fits the partition. Verify that.
*/
if (trigger->tgisclone &&
!ExecPartitionCheck(relinfo, slot, estate, false))
bool nullcheck)
{
ExprContext *aggcontext;
- int adjust_jumpnull = -1;
+ int adjust_jumpnull = -1;
if (ishash)
aggcontext = aggstate->hashcontext;
static Datum ExecJustAssignScanVarVirt(ExprState *state, ExprContext *econtext, bool *isnull);
/* execution helper functions */
-static pg_attribute_always_inline void
-ExecAggPlainTransByVal(AggState *aggstate, AggStatePerTrans pertrans,
- AggStatePerGroup pergroup,
- ExprContext *aggcontext, int setno);
-
-static pg_attribute_always_inline void
-ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans,
- AggStatePerGroup pergroup,
- ExprContext *aggcontext, int setno);
+static pg_attribute_always_inline void ExecAggPlainTransByVal(AggState *aggstate,
+ AggStatePerTrans pertrans,
+ AggStatePerGroup pergroup,
+ ExprContext *aggcontext,
+ int setno);
+static pg_attribute_always_inline void ExecAggPlainTransByRef(AggState *aggstate,
+ AggStatePerTrans pertrans,
+ AggStatePerGroup pergroup,
+ ExprContext *aggcontext,
+ int setno);
/*
* Prepare ExprState for interpreted execution.
EEO_CASE(EEOP_AGG_PLAIN_PERGROUP_NULLCHECK)
{
AggState *aggstate = castNode(AggState, state->parent);
- AggStatePerGroup pergroup_allaggs = aggstate->all_pergroups
- [op->d.agg_plain_pergroup_nullcheck.setoff];
+ AggStatePerGroup pergroup_allaggs =
+ aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
if (pergroup_allaggs == NULL)
EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
newVal = FunctionCallInvoke(fcinfo);
/*
- * For pass-by-ref datatype, must copy the new value into
- * aggcontext and free the prior transValue. But if transfn
- * returned a pointer to its first input, we don't need to do
- * anything. Also, if transfn returned a pointer to a R/W
- * expanded object that is already a child of the aggcontext,
- * assume we can adopt that value without copying it.
+ * For pass-by-ref datatype, must copy the new value into aggcontext and
+ * free the prior transValue. But if transfn returned a pointer to its
+ * first input, we don't need to do anything. Also, if transfn returned a
+ * pointer to a R/W expanded object that is already a child of the
+ * aggcontext, assume we can adopt that value without copying it.
*
- * It's safe to compare newVal with pergroup->transValue without
- * regard for either being NULL, because ExecAggTransReparent()
- * takes care to set transValue to 0 when NULL. Otherwise we could
- * end up accidentally not reparenting, when the transValue has
- * the same numerical value as newValue, despite being NULL. This
- * is a somewhat hot path, making it undesirable to instead solve
- * this with another branch for the common case of the transition
- * function returning its (modified) input argument.
+ * It's safe to compare newVal with pergroup->transValue without regard
+ * for either being NULL, because ExecAggTransReparent() takes care to set
+ * transValue to 0 when NULL. Otherwise we could end up accidentally not
+ * reparenting, when the transValue has the same numerical value as
+ * newValue, despite being NULL. This is a somewhat hot path, making it
+ * undesirable to instead solve this with another branch for the common
+ * case of the transition function returning its (modified) input
+ * argument.
*/
if (DatumGetPointer(newVal) != DatumGetPointer(pergroup->transValue))
newVal = ExecAggTransReparent(aggstate, pertrans,
LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
bool *isnew)
{
- TupleHashEntry entry;
- MemoryContext oldContext;
- uint32 hash;
+ TupleHashEntry entry;
+ MemoryContext oldContext;
+ uint32 hash;
/* Need to run the hash functions in short-lived context */
oldContext = MemoryContextSwitchTo(hashtable->tempcxt);
uint32
TupleHashTableHash(TupleHashTable hashtable, TupleTableSlot *slot)
{
- MemoryContext oldContext;
- uint32 hash;
+ MemoryContext oldContext;
+ uint32 hash;
hashtable->inputslot = slot;
hashtable->in_hash_funcs = hashtable->tab_hash_funcs;
LookupTupleHashEntryHash(TupleHashTable hashtable, TupleTableSlot *slot,
bool *isnew, uint32 hash)
{
- TupleHashEntry entry;
- MemoryContext oldContext;
+ TupleHashEntry entry;
+ MemoryContext oldContext;
/* Need to run the hash functions in short-lived context */
oldContext = MemoryContextSwitchTo(hashtable->tempcxt);
if (first_time)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
if (tupdesc == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
/*
* This is the first non-NULL result from the
if (rsinfo.setResult == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
ExprContext *
CreateWorkExprContext(EState *estate)
{
- Size minContextSize = ALLOCSET_DEFAULT_MINSIZE;
- Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE;
- Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
+ Size minContextSize = ALLOCSET_DEFAULT_MINSIZE;
+ Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE;
+ Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
/* choose the maxBlockSize to be no larger than 1/16 of work_mem */
while (16 * maxBlockSize > work_mem * 1024L)
*/
typedef struct HashTapeInfo
{
- LogicalTapeSet *tapeset;
- int ntapes;
- int *freetapes;
- int nfreetapes;
- int freetapes_alloc;
+ LogicalTapeSet *tapeset;
+ int ntapes;
+ int *freetapes;
+ int nfreetapes;
+ int freetapes_alloc;
} HashTapeInfo;
/*
typedef struct HashAggSpill
{
LogicalTapeSet *tapeset; /* borrowed reference to tape set */
- int npartitions; /* number of partitions */
- int *partitions; /* spill partition tape numbers */
- int64 *ntuples; /* number of tuples in each partition */
- uint32 mask; /* mask to find partition from hash value */
- int shift; /* after masking, shift by this amount */
+ int npartitions; /* number of partitions */
+ int *partitions; /* spill partition tape numbers */
+ int64 *ntuples; /* number of tuples in each partition */
+ uint32 mask; /* mask to find partition from hash value */
+ int shift; /* after masking, shift by this amount */
} HashAggSpill;
/*
*/
typedef struct HashAggBatch
{
- int setno; /* grouping set */
- int used_bits; /* number of bits of hash already used */
- LogicalTapeSet *tapeset; /* borrowed reference to tape set */
- int input_tapenum; /* input partition tape */
- int64 input_tuples; /* number of tuples in this batch */
+ int setno; /* grouping set */
+ int used_bits; /* number of bits of hash already used */
+ LogicalTapeSet *tapeset; /* borrowed reference to tape set */
+ int input_tapenum; /* input partition tape */
+ int64 input_tuples; /* number of tuples in this batch */
} HashAggBatch;
static void select_current_set(AggState *aggstate, int setno, bool is_hash);
static long hash_choose_num_buckets(double hashentrysize,
long estimated_nbuckets,
Size memory);
-static int hash_choose_num_partitions(uint64 input_groups,
- double hashentrysize,
- int used_bits,
- int *log2_npartittions);
+static int hash_choose_num_partitions(uint64 input_groups,
+ double hashentrysize,
+ int used_bits,
+ int *log2_npartittions);
static AggStatePerGroup lookup_hash_entry(AggState *aggstate, uint32 hash,
bool *in_hash_table);
static void lookup_hash_entries(AggState *aggstate);
* pointer to a R/W expanded object that is already a child of the
* aggcontext, assume we can adopt that value without copying it.
*
- * It's safe to compare newVal with pergroup->transValue without
- * regard for either being NULL, because ExecAggTransReparent()
- * takes care to set transValue to 0 when NULL. Otherwise we could
- * end up accidentally not reparenting, when the transValue has
- * the same numerical value as newValue, despite being NULL. This
- * is a somewhat hot path, making it undesirable to instead solve
- * this with another branch for the common case of the transition
- * function returning its (modified) input argument.
+ * It's safe to compare newVal with pergroup->transValue without regard
+ * for either being NULL, because ExecAggTransReparent() takes care to set
+ * transValue to 0 when NULL. Otherwise we could end up accidentally not
+ * reparenting, when the transValue has the same numerical value as
+ * newValue, despite being NULL. This is a somewhat hot path, making it
+ * undesirable to instead solve this with another branch for the common
+ * case of the transition function returning its (modified) input
+ * argument.
*/
if (!pertrans->transtypeByVal &&
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
TupleTableSlot *inputslot = aggstate->tmpcontext->ecxt_outertuple;
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
- int i;
+ int i;
/* transfer just the needed columns into hashslot */
slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
static void
build_hash_tables(AggState *aggstate)
{
- int setno;
+ int setno;
for (setno = 0; setno < aggstate->num_hashes; ++setno)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- long nbuckets;
- Size memory;
+ long nbuckets;
+ Size memory;
if (perhash->hashtable != NULL)
{
memory = aggstate->hash_mem_limit / aggstate->num_hashes;
/* choose reasonable number of buckets per hashtable */
- nbuckets = hash_choose_num_buckets(
- aggstate->hashentrysize, perhash->aggnode->numGroups, memory);
+ nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
+ perhash->aggnode->numGroups,
+ memory);
build_hash_table(aggstate, setno, nbuckets);
}
build_hash_table(AggState *aggstate, int setno, long nbuckets)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- MemoryContext metacxt = aggstate->hash_metacxt;
- MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
- MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
- Size additionalsize;
+ MemoryContext metacxt = aggstate->hash_metacxt;
+ MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
+ MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
+ Size additionalsize;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
*/
additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
- perhash->hashtable = BuildTupleHashTableExt(
- &aggstate->ss.ps,
- perhash->hashslot->tts_tupleDescriptor,
- perhash->numCols,
- perhash->hashGrpColIdxHash,
- perhash->eqfuncoids,
- perhash->hashfunctions,
- perhash->aggnode->grpCollations,
- nbuckets,
- additionalsize,
- metacxt,
- hashcxt,
- tmpcxt,
- DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+ perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps,
+ perhash->hashslot->tts_tupleDescriptor,
+ perhash->numCols,
+ perhash->hashGrpColIdxHash,
+ perhash->eqfuncoids,
+ perhash->hashfunctions,
+ perhash->aggnode->grpCollations,
+ nbuckets,
+ additionalsize,
+ metacxt,
+ hashcxt,
+ tmpcxt,
+ DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
}
/*
Size
hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
{
- Size tupleChunkSize;
- Size pergroupChunkSize;
- Size transitionChunkSize;
- Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
- tupleWidth);
- Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
+ Size tupleChunkSize;
+ Size pergroupChunkSize;
+ Size transitionChunkSize;
+ Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
+ tupleWidth);
+ Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
tupleChunkSize = CHUNKHDRSZ + tupleSize;
static void
hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
{
- AggStatePerPhase phase;
- int i = minslot ? 1 : 0;
- int j = nullcheck ? 1 : 0;
+ AggStatePerPhase phase;
+ int i = minslot ? 1 : 0;
+ int j = nullcheck ? 1 : 0;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
if (aggstate->aggstrategy == AGG_HASHED)
phase = &aggstate->phases[0];
- else /* AGG_MIXED */
+ else /* AGG_MIXED */
phase = &aggstate->phases[1];
if (phase->evaltrans_cache[i][j] == NULL)
{
- const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
- bool outerfixed = aggstate->ss.ps.outeropsfixed;
- bool dohash = true;
- bool dosort;
+ const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
+ bool outerfixed = aggstate->ss.ps.outeropsfixed;
+ bool dohash = true;
+ bool dosort;
dosort = aggstate->aggstrategy == AGG_MIXED ? true : false;
aggstate->ss.ps.outeropsfixed = true;
}
- phase->evaltrans_cache[i][j] = ExecBuildAggTrans(
- aggstate, phase, dosort, dohash, nullcheck);
+ phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
+ dosort, dohash,
+ nullcheck);
/* change back */
aggstate->ss.ps.outerops = outerops;
Size *mem_limit, uint64 *ngroups_limit,
int *num_partitions)
{
- int npartitions;
- Size partition_mem;
+ int npartitions;
+ Size partition_mem;
/* if not expected to spill, use all of work_mem */
if (input_groups * hashentrysize < work_mem * 1024L)
/*
* Calculate expected memory requirements for spilling, which is the size
- * of the buffers needed for all the tapes that need to be open at
- * once. Then, subtract that from the memory available for holding hash
- * tables.
+ * of the buffers needed for all the tapes that need to be open at once.
+ * Then, subtract that from the memory available for holding hash tables.
*/
npartitions = hash_choose_num_partitions(input_groups,
hashentrysize,
static void
hash_agg_check_limits(AggState *aggstate)
{
- uint64 ngroups = aggstate->hash_ngroups_current;
- Size meta_mem = MemoryContextMemAllocated(
- aggstate->hash_metacxt, true);
- Size hash_mem = MemoryContextMemAllocated(
- aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ uint64 ngroups = aggstate->hash_ngroups_current;
+ Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
+ true);
+ Size hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
+ true);
/*
* Don't spill unless there's at least one group in the hash table so we
hashagg_tapeinfo_init(aggstate);
- aggstate->hash_spills = palloc(
- sizeof(HashAggSpill) * aggstate->num_hashes);
+ aggstate->hash_spills = palloc(sizeof(HashAggSpill) * aggstate->num_hashes);
for (int setno = 0; setno < aggstate->num_hashes; setno++)
{
- AggStatePerHash perhash = &aggstate->perhash[setno];
- HashAggSpill *spill = &aggstate->hash_spills[setno];
+ AggStatePerHash perhash = &aggstate->perhash[setno];
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
perhash->aggnode->numGroups,
static void
hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
- Size meta_mem;
- Size hash_mem;
- Size buffer_mem;
- Size total_mem;
+ Size meta_mem;
+ Size hash_mem;
+ Size buffer_mem;
+ Size total_mem;
if (aggstate->aggstrategy != AGG_MIXED &&
aggstate->aggstrategy != AGG_HASHED)
meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
/* memory for the group keys and transition states */
- hash_mem = MemoryContextMemAllocated(
- aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
/* memory for read/write tape buffers, if spilled */
buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
/* update disk usage */
if (aggstate->hash_tapeinfo != NULL)
{
- uint64 disk_used = LogicalTapeSetBlocks(
- aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
+ uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
if (aggstate->hash_disk_used < disk_used)
aggstate->hash_disk_used = disk_used;
{
aggstate->hashentrysize =
sizeof(TupleHashEntryData) +
- (hash_mem / (double)aggstate->hash_ngroups_current);
+ (hash_mem / (double) aggstate->hash_ngroups_current);
}
}
static long
hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
{
- long max_nbuckets;
- long nbuckets = ngroups;
+ long max_nbuckets;
+ long nbuckets = ngroups;
max_nbuckets = memory / hashentrysize;
hash_choose_num_partitions(uint64 input_groups, double hashentrysize,
int used_bits, int *log2_npartitions)
{
- Size mem_wanted;
- int partition_limit;
- int npartitions;
- int partition_bits;
+ Size mem_wanted;
+ int partition_limit;
+ int npartitions;
+ int partition_bits;
/*
* Avoid creating so many partitions that the memory requirements of the
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
TupleHashEntryData *entry;
- bool isnew = false;
- bool *p_isnew;
+ bool isnew = false;
+ bool *p_isnew;
/* if hash table already spilled, don't create new entries */
p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
if (isnew)
{
- AggStatePerGroup pergroup;
- int transno;
+ AggStatePerGroup pergroup;
+ int transno;
aggstate->hash_ngroups_current++;
hash_agg_check_limits(aggstate);
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
- AggStatePerHash perhash = &aggstate->perhash[setno];
- uint32 hash;
- bool in_hash_table;
+ AggStatePerHash perhash = &aggstate->perhash[setno];
+ uint32 hash;
+ bool in_hash_table;
select_current_set(aggstate, setno, true);
prepare_hash_slot(aggstate);
/* check to see if we need to spill the tuple for this grouping set */
if (!in_hash_table)
{
- HashAggSpill *spill = &aggstate->hash_spills[setno];
- TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
+ TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
if (spill->partitions == NULL)
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
static bool
agg_refill_hash_table(AggState *aggstate)
{
- HashAggBatch *batch;
- HashAggSpill spill;
- HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
- uint64 ngroups_estimate;
- bool spill_initialized = false;
+ HashAggBatch *batch;
+ HashAggSpill spill;
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ uint64 ngroups_estimate;
+ bool spill_initialized = false;
if (aggstate->hash_batches == NIL)
return false;
LogicalTapeRewindForRead(tapeinfo->tapeset, batch->input_tapenum,
HASHAGG_READ_BUFFER_SIZE);
- for (;;) {
- TupleTableSlot *slot = aggstate->hash_spill_slot;
- MinimalTuple tuple;
- uint32 hash;
- bool in_hash_table;
+ for (;;)
+ {
+ TupleTableSlot *slot = aggstate->hash_spill_slot;
+ MinimalTuple tuple;
+ uint32 hash;
+ bool in_hash_table;
CHECK_FOR_INTERRUPTS();
aggstate->tmpcontext->ecxt_outertuple = slot;
prepare_hash_slot(aggstate);
- aggstate->hash_pergroup[batch->setno] = lookup_hash_entry(
- aggstate, hash, &in_hash_table);
+ aggstate->hash_pergroup[batch->setno] =
+ lookup_hash_entry(aggstate, hash, &in_hash_table);
if (in_hash_table)
{
*/
spill_initialized = true;
hashagg_spill_init(&spill, tapeinfo, batch->used_bits,
- ngroups_estimate, aggstate->hashentrysize);
+ ngroups_estimate, aggstate->hashentrysize);
}
/* no memory for a new group, spill */
hashagg_spill_tuple(&spill, slot, hash);
static void
hashagg_tapeinfo_init(AggState *aggstate)
{
- HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
- int init_tapes = 16; /* expanded dynamically */
+ HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
+ int init_tapes = 16; /* expanded dynamically */
tapeinfo->tapeset = LogicalTapeSetCreate(init_tapes, NULL, NULL, -1);
tapeinfo->ntapes = init_tapes;
hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *partitions,
int npartitions)
{
- int partidx = 0;
+ int partidx = 0;
/* use free tapes if available */
while (partidx < npartitions && tapeinfo->nfreetapes > 0)
if (tapeinfo->freetapes_alloc == tapeinfo->nfreetapes)
{
tapeinfo->freetapes_alloc <<= 1;
- tapeinfo->freetapes = repalloc(
- tapeinfo->freetapes, tapeinfo->freetapes_alloc * sizeof(int));
+ tapeinfo->freetapes = repalloc(tapeinfo->freetapes,
+ tapeinfo->freetapes_alloc * sizeof(int));
}
tapeinfo->freetapes[tapeinfo->nfreetapes++] = tapenum;
}
hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
uint64 input_groups, double hashentrysize)
{
- int npartitions;
- int partition_bits;
+ int npartitions;
+ int partition_bits;
- npartitions = hash_choose_num_partitions(
- input_groups, hashentrysize, used_bits, &partition_bits);
+ npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
+ used_bits, &partition_bits);
spill->partitions = palloc0(sizeof(int) * npartitions);
spill->ntuples = palloc0(sizeof(int64) * npartitions);
static Size
hashagg_spill_tuple(HashAggSpill *spill, TupleTableSlot *slot, uint32 hash)
{
- LogicalTapeSet *tapeset = spill->tapeset;
- int partition;
- MinimalTuple tuple;
- int tapenum;
- int total_written = 0;
- bool shouldFree;
+ LogicalTapeSet *tapeset = spill->tapeset;
+ int partition;
+ MinimalTuple tuple;
+ int tapenum;
+ int total_written = 0;
+ bool shouldFree;
Assert(spill->partitions != NULL);
hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
{
LogicalTapeSet *tapeset = batch->tapeset;
- int tapenum = batch->input_tapenum;
- MinimalTuple tuple;
- uint32 t_len;
- size_t nread;
- uint32 hash;
+ int tapenum = batch->input_tapenum;
+ MinimalTuple tuple;
+ uint32 t_len;
+ size_t nread;
+ uint32 hash;
nread = LogicalTapeRead(tapeset, tapenum, &hash, sizeof(uint32));
if (nread == 0)
tuple->t_len = t_len;
nread = LogicalTapeRead(tapeset, tapenum,
- (void *)((char *)tuple + sizeof(uint32)),
+ (void *) ((char *) tuple + sizeof(uint32)),
t_len - sizeof(uint32));
if (nread != t_len - sizeof(uint32))
ereport(ERROR,
static void
hashagg_finish_initial_spills(AggState *aggstate)
{
- int setno;
- int total_npartitions = 0;
+ int setno;
+ int total_npartitions = 0;
if (aggstate->hash_spills != NULL)
{
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
+
total_npartitions += spill->npartitions;
hashagg_spill_finish(aggstate, spill, setno);
}
static void
hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
{
- int i;
- int used_bits = 32 - spill->shift;
+ int i;
+ int used_bits = 32 - spill->shift;
if (spill->npartitions == 0)
- return; /* didn't spill */
+ return; /* didn't spill */
for (i = 0; i < spill->npartitions; i++)
{
- int tapenum = spill->partitions[i];
- HashAggBatch *new_batch;
+ int tapenum = spill->partitions[i];
+ HashAggBatch *new_batch;
/* if the partition is empty, don't create a new batch of work */
if (spill->ntuples[i] == 0)
static void
hashagg_reset_spill_state(AggState *aggstate)
{
- ListCell *lc;
+ ListCell *lc;
/* free spills from initial pass */
if (aggstate->hash_spills != NULL)
{
- int setno;
+ int setno;
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
+
pfree(spill->ntuples);
pfree(spill->partitions);
}
/* free batches */
foreach(lc, aggstate->hash_batches)
{
- HashAggBatch *batch = (HashAggBatch*) lfirst(lc);
+ HashAggBatch *batch = (HashAggBatch *) lfirst(lc);
+
pfree(batch);
}
list_free(aggstate->hash_batches);
/* close tape set */
if (aggstate->hash_tapeinfo != NULL)
{
- HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
LogicalTapeSetClose(tapeinfo->tapeset);
pfree(tapeinfo->freetapes);
*/
if (use_hashing)
{
- Plan *outerplan = outerPlan(node);
- uint64 totalGroups = 0;
- int i;
+ Plan *outerplan = outerPlan(node);
+ uint64 totalGroups = 0;
+ int i;
- aggstate->hash_metacxt = AllocSetContextCreate(
- aggstate->ss.ps.state->es_query_cxt,
- "HashAgg meta context",
- ALLOCSET_DEFAULT_SIZES);
- aggstate->hash_spill_slot = ExecInitExtraTupleSlot(
- estate, scanDesc, &TTSOpsMinimalTuple);
+ aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
+ "HashAgg meta context",
+ ALLOCSET_DEFAULT_SIZES);
+ aggstate->hash_spill_slot = ExecInitExtraTupleSlot(estate, scanDesc,
+ &TTSOpsMinimalTuple);
/* this is an array of pointers, not structures */
aggstate->hash_pergroup = pergroups;
- aggstate->hashentrysize = hash_agg_entry_size(
- aggstate->numtrans, outerplan->plan_width, node->transitionSpace);
+ aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
+ outerplan->plan_width,
+ node->transitionSpace);
/*
* Consider all of the grouping sets together when setting the limits
ExecInitQual(node->bitmapqualorig, (PlanState *) scanstate);
/*
- * Maximum number of prefetches for the tablespace if configured, otherwise
- * the current value of the effective_io_concurrency GUC.
+ * Maximum number of prefetches for the tablespace if configured,
+ * otherwise the current value of the effective_io_concurrency GUC.
*/
scanstate->prefetch_maximum =
get_tablespace_io_concurrency(currentRelation->rd_rel->reltablespace);
* - groupName: the token fullsort or prefixsort
*/
#define INSTRUMENT_SORT_GROUP(node, groupName) \
- if (node->ss.ps.instrument != NULL) \
- { \
- if (node->shared_info && node->am_worker) \
+ do { \
+ if ((node)->ss.ps.instrument != NULL) \
{ \
- Assert(IsParallelWorker()); \
- Assert(ParallelWorkerNumber <= node->shared_info->num_workers); \
- instrumentSortedGroup(&node->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, node->groupName##_state); \
- } else { \
- instrumentSortedGroup(&node->incsort_info.groupName##GroupInfo, node->groupName##_state); \
+ if ((node)->shared_info && (node)->am_worker) \
+ { \
+ Assert(IsParallelWorker()); \
+ Assert(ParallelWorkerNumber <= (node)->shared_info->num_workers); \
+ instrumentSortedGroup(&(node)->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, \
+ (node)->groupName##_state); \
+ } \
+ else \
+ { \
+ instrumentSortedGroup(&(node)->incsort_info.groupName##GroupInfo, \
+ (node)->groupName##_state); \
+ } \
} \
- }
+ } while (0)
+
/* ----------------------------------------------------------------
* instrumentSortedGroup
Tuplesortstate *sortState)
{
TuplesortInstrumentation sort_instr;
+
groupInfo->groupCount++;
tuplesort_get_stats(sortState, &sort_instr);
SO1_printf("Sorting presorted prefix tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
- INSTRUMENT_SORT_GROUP(node, prefixsort)
+ INSTRUMENT_SORT_GROUP(node, prefixsort);
if (node->bounded)
{
SO1_printf("Sorting fullsort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
- INSTRUMENT_SORT_GROUP(node, fullsort)
+ INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (final tuple)\n");
node->execution_status = INCSORT_READFULLSORT;
nTuples);
tuplesort_performsort(fullsort_state);
- INSTRUMENT_SORT_GROUP(node, fullsort)
+ INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (found end of group)\n");
node->execution_status = INCSORT_READFULLSORT;
}
/*
- * Unless we've already transitioned modes to reading from the full
- * sort state, then we assume that having read at least
+ * Unless we've already transitioned modes to reading from the
+ * full sort state, then we assume that having read at least
* DEFAULT_MAX_FULL_SORT_GROUP_SIZE tuples means it's likely we're
* processing a large group of tuples all having equal prefix keys
* (but haven't yet found the final tuple in that prefix key
SO1_printf("Sorting fullsort tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
- INSTRUMENT_SORT_GROUP(node, fullsort)
+ INSTRUMENT_SORT_GROUP(node, fullsort);
/*
* If the full sort tuplesort happened to switch into top-n
/*
* We might have multiple prefix key groups in the full sort
- * state, so the mode transition function needs to know that it
- * needs to move from the fullsort to presorted prefix sort.
+ * state, so the mode transition function needs to know that
+ * it needs to move from the fullsort to presorted prefix
+ * sort.
*/
node->n_fullsort_remaining = nTuples;
SO1_printf("Sorting presorted prefix tuplesort with >= %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
- INSTRUMENT_SORT_GROUP(node, prefixsort)
+ INSTRUMENT_SORT_GROUP(node, prefixsort);
SO_printf("Setting execution_status to INCSORT_READPREFIXSORT (found end of group)\n");
node->execution_status = INCSORT_READPREFIXSORT;
SO_printf("ExecInitIncrementalSort: initializing sort node\n");
/*
- * Incremental sort can't be used with EXEC_FLAG_BACKWARD or EXEC_FLAG_MARK,
- * because the current sort state contains only one sort batch rather than
- * the full result set.
+ * Incremental sort can't be used with EXEC_FLAG_BACKWARD or
+ * EXEC_FLAG_MARK, because the current sort state contains only one sort
+ * batch rather than the full result set.
*/
Assert((eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)) == 0);
* Initialize child nodes.
*
* Incremental sort does not support backwards scans and mark/restore, so
- * we don't bother removing the flags from eflags here. We allow passing
- * a REWIND flag, because although incremental sort can't use it, the child
+ * we don't bother removing the flags from eflags here. We allow passing a
+ * REWIND flag, because although incremental sort can't use it, the child
* nodes may be able to do something more useful.
*/
outerPlanState(incrsortstate) = ExecInitNode(outerPlan(node), estate, eflags);
* re-execute the sort along with the child node. Incremental sort itself
* can't do anything smarter, but maybe the child nodes can.
*
- * In theory if we've only filled the full sort with one batch (and haven't
- * reset it for a new batch yet) then we could efficiently rewind, but
- * that seems a narrow enough case that it's not worth handling specially
- * at this time.
+ * In theory if we've only filled the full sort with one batch (and
+ * haven't reset it for a new batch yet) then we could efficiently rewind,
+ * but that seems a narrow enough case that it's not worth handling
+ * specially at this time.
*/
/* must drop pointer to sort result tuple */
/*
* If we've set up either of the sort states yet, we need to reset them.
* We could end them and null out the pointers, but there's no reason to
- * repay the setup cost, and because ExecIncrementalSort guards
- * presorted column functions by checking to see if the full sort state
- * has been initialized yet, setting the sort states to null here might
- * actually cause a leak.
+ * repay the setup cost, and because ExecIncrementalSort guards presorted
+ * column functions by checking to see if the full sort state has been
+ * initialized yet, setting the sort states to null here might actually
+ * cause a leak.
*/
if (node->fullsort_state != NULL)
{
if (tidstate->ss.ss_currentScanDesc == NULL)
tidstate->ss.ss_currentScanDesc =
table_beginscan_tid(tidstate->ss.ss_currentRelation,
- tidstate->ss.ps.state->es_snapshot);
+ tidstate->ss.ps.state->es_snapshot);
scan = tidstate->ss.ss_currentScanDesc;
/*
case EEOP_AGG_PLAIN_PERGROUP_NULLCHECK:
{
- int jumpnull;
- LLVMValueRef v_aggstatep;
- LLVMValueRef v_allpergroupsp;
- LLVMValueRef v_pergroup_allaggs;
- LLVMValueRef v_setoff;
+ int jumpnull;
+ LLVMValueRef v_aggstatep;
+ LLVMValueRef v_allpergroupsp;
+ LLVMValueRef v_pergroup_allaggs;
+ LLVMValueRef v_setoff;
jumpnull = op->d.agg_plain_pergroup_nullcheck.jumpnull;
* pergroup_allaggs = aggstate->all_pergroups
* [op->d.agg_plain_pergroup_nullcheck.setoff];
*/
- v_aggstatep = LLVMBuildBitCast(
- b, v_parent, l_ptr(StructAggState), "");
+ v_aggstatep = LLVMBuildBitCast(b, v_parent,
+ l_ptr(StructAggState), "");
- v_allpergroupsp = l_load_struct_gep(
- b, v_aggstatep,
- FIELDNO_AGGSTATE_ALL_PERGROUPS,
- "aggstate.all_pergroups");
+ v_allpergroupsp = l_load_struct_gep(b, v_aggstatep,
+ FIELDNO_AGGSTATE_ALL_PERGROUPS,
+ "aggstate.all_pergroups");
- v_setoff = l_int32_const(
- op->d.agg_plain_pergroup_nullcheck.setoff);
+ v_setoff = l_int32_const(op->d.agg_plain_pergroup_nullcheck.setoff);
- v_pergroup_allaggs = l_load_gep1(
- b, v_allpergroupsp, v_setoff, "");
+ v_pergroup_allaggs = l_load_gep1(b, v_allpergroupsp, v_setoff, "");
- LLVMBuildCondBr(
- b,
- LLVMBuildICmp(b, LLVMIntEQ,
- LLVMBuildPtrToInt(
- b, v_pergroup_allaggs, TypeSizeT, ""),
- l_sizet_const(0), ""),
- opblocks[jumpnull],
- opblocks[opno + 1]);
+ LLVMBuildCondBr(b,
+ LLVMBuildICmp(b, LLVMIntEQ,
+ LLVMBuildPtrToInt(b, v_pergroup_allaggs, TypeSizeT, ""),
+ l_sizet_const(0), ""),
+ opblocks[jumpnull],
+ opblocks[opno + 1]);
break;
}
static bool verify_client_proof(scram_state *state);
static bool verify_final_nonce(scram_state *state);
static void mock_scram_secret(const char *username, int *iterations,
- char **salt, uint8 *stored_key, uint8 *server_key);
+ char **salt, uint8 *stored_key, uint8 *server_key);
static bool is_scram_printable(char *p);
static char *sanitize_char(char c);
static char *sanitize_str(const char *s);
if (password_type == PASSWORD_TYPE_SCRAM_SHA_256)
{
if (parse_scram_secret(shadow_pass, &state->iterations, &state->salt,
- state->StoredKey, state->ServerKey))
+ state->StoredKey, state->ServerKey))
got_secret = true;
else
{
}
/*
- * If the user did not have a valid SCRAM secret, we still go through
- * the motions with a mock one, and fail as if the client supplied an
+ * If the user did not have a valid SCRAM secret, we still go through the
+ * motions with a mock one, and fail as if the client supplied an
* incorrect password. This is to avoid revealing information to an
* attacker.
*/
if (!got_secret)
{
mock_scram_secret(state->port->user_name, &state->iterations,
- &state->salt, state->StoredKey, state->ServerKey);
+ &state->salt, state->StoredKey, state->ServerKey);
state->doomed = true;
}
errmsg("could not generate random salt")));
result = scram_build_secret(saltbuf, SCRAM_DEFAULT_SALT_LEN,
- SCRAM_DEFAULT_ITERATIONS, password);
+ SCRAM_DEFAULT_ITERATIONS, password);
if (prep_password)
pfree(prep_password);
pg_saslprep_rc rc;
if (!parse_scram_secret(secret, &iterations, &encoded_salt,
- stored_key, server_key))
+ stored_key, server_key))
{
/*
* The password looked like a SCRAM secret, but could not be parsed.
*/
bool
parse_scram_secret(const char *secret, int *iterations, char **salt,
- uint8 *stored_key, uint8 *server_key)
+ uint8 *stored_key, uint8 *server_key)
{
char *v;
char *p;
*/
static void
mock_scram_secret(const char *username, int *iterations, char **salt,
- uint8 *stored_key, uint8 *server_key)
+ uint8 *stored_key, uint8 *server_key)
{
char *raw_salt;
char *encoded_salt;
#include "utils/memutils.h"
/* default init hook can be overridden by a shared library */
-static void default_openssl_tls_init(SSL_CTX *context, bool isServerStart);
+static void default_openssl_tls_init(SSL_CTX *context, bool isServerStart);
openssl_tls_init_hook_typ openssl_tls_init_hook = default_openssl_tls_init;
static int my_sock_read(BIO *h, char *buf, int size);
/*
* Call init hook (usually to set password callback)
*/
- (* openssl_tls_init_hook)(context, isServerStart);
+ (*openssl_tls_init_hook) (context, isServerStart);
/* used by the callback */
ssl_is_server_start = isServerStart;
if (ssl_passphrase_command[0] && ssl_passphrase_command_supports_reload)
SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb);
else
+
/*
* If reloading and no external command is configured, override
* OpenSSL's default handling of passphrase-protected files,
strspn(shadow_pass + 3, MD5_PASSWD_CHARSET) == MD5_PASSWD_LEN - 3)
return PASSWORD_TYPE_MD5;
if (parse_scram_secret(shadow_pass, &iterations, &encoded_salt,
- stored_key, server_key))
+ stored_key, server_key))
return PASSWORD_TYPE_SCRAM_SHA_256;
return PASSWORD_TYPE_PLAINTEXT;
}
List *useful_pathkeys_list = NIL;
/*
- * Considering query_pathkeys is always worth it, because it might allow us
- * to avoid a total sort when we have a partially presorted path available.
+ * Considering query_pathkeys is always worth it, because it might allow
+ * us to avoid a total sort when we have a partially presorted path
+ * available.
*/
if (root->query_pathkeys)
{
ListCell *lc;
- int npathkeys = 0; /* useful pathkeys */
+ int npathkeys = 0; /* useful pathkeys */
foreach(lc, root->query_pathkeys)
{
EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
/*
- * We can only build an Incremental Sort for pathkeys which contain
- * an EC member in the current relation, so ignore any suffix of the
- * list as soon as we find a pathkey without an EC member the
- * relation.
+ * We can only build an Incremental Sort for pathkeys which
+ * contain an EC member in the current relation, so ignore any
+ * suffix of the list as soon as we find a pathkey without an EC
+ * member the relation.
*
- * By still returning the prefix of the pathkeys list that does meet
- * criteria of EC membership in the current relation, we enable not
- * just an incremental sort on the entirety of query_pathkeys but
- * also incremental sort below a JOIN.
+ * By still returning the prefix of the pathkeys list that does
+ * meet criteria of EC membership in the current relation, we
+ * enable not just an incremental sort on the entirety of
+ * query_pathkeys but also incremental sort below a JOIN.
*/
if (!find_em_expr_for_rel(pathkey_ec, rel))
break;
}
/*
- * The whole query_pathkeys list matches, so append it directly, to allow
- * comparing pathkeys easily by comparing list pointer. If we have to truncate
- * the pathkeys, we gotta do a copy though.
+ * The whole query_pathkeys list matches, so append it directly, to
+ * allow comparing pathkeys easily by comparing list pointer. If we
+ * have to truncate the pathkeys, we gotta do a copy though.
*/
if (npathkeys == list_length(root->query_pathkeys))
useful_pathkeys_list = lappend(useful_pathkeys_list,
/*
* If the path has no ordering at all, then we can't use either
- * incremental sort or rely on implict sorting with a gather merge.
+ * incremental sort or rely on implict sorting with a gather
+ * merge.
*/
if (subpath->pathkeys == NIL)
continue;
is_sorted = pathkeys_count_contained_in(useful_pathkeys,
- subpath->pathkeys,
- &presorted_keys);
+ subpath->pathkeys,
+ &presorted_keys);
/*
* We don't need to consider the case where a subpath is already
Path *tmp;
/*
- * We should have already excluded pathkeys of length 1 because
- * then presorted_keys > 0 would imply is_sorted was true.
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
*/
Assert(list_length(useful_pathkeys) != 1);
/*
* Extract presorted keys as list of expressions.
*
- * We need to be careful about Vars containing "varno 0" which might
- * have been introduced by generate_append_tlist, which would confuse
+ * We need to be careful about Vars containing "varno 0" which might have
+ * been introduced by generate_append_tlist, which would confuse
* estimate_num_groups (in fact it'd fail for such expressions). See
* recurse_set_operations which has to deal with the same issue.
*
- * Unlike recurse_set_operations we can't access the original target
- * list here, and even if we could it's not very clear how useful would
- * that be for a set operation combining multiple tables. So we simply
- * detect if there are any expressions with "varno 0" and use the
- * default DEFAULT_NUM_DISTINCT in that case.
+ * Unlike recurse_set_operations we can't access the original target list
+ * here, and even if we could it's not very clear how useful would that be
+ * for a set operation combining multiple tables. So we simply detect if
+ * there are any expressions with "varno 0" and use the default
+ * DEFAULT_NUM_DISTINCT in that case.
*
- * We might also use either 1.0 (a single group) or input_tuples (each
- * row being a separate group), pretty much the worst and best case for
+ * We might also use either 1.0 (a single group) or input_tuples (each row
+ * being a separate group), pretty much the worst and best case for
* incremental sort. But those are extreme cases and using something in
* between seems reasonable. Furthermore, generate_append_tlist is used
* for set operations, which are likely to produce mostly unique output
/*
* Add the disk costs of hash aggregation that spills to disk.
*
- * Groups that go into the hash table stay in memory until finalized,
- * so spilling and reprocessing tuples doesn't incur additional
- * invocations of transCost or finalCost. Furthermore, the computed
- * hash value is stored with the spilled tuples, so we don't incur
- * extra invocations of the hash function.
+ * Groups that go into the hash table stay in memory until finalized, so
+ * spilling and reprocessing tuples doesn't incur additional invocations
+ * of transCost or finalCost. Furthermore, the computed hash value is
+ * stored with the spilled tuples, so we don't incur extra invocations of
+ * the hash function.
*
- * Hash Agg begins returning tuples after the first batch is
- * complete. Accrue writes (spilled tuples) to startup_cost and to
- * total_cost; accrue reads only to total_cost.
+ * Hash Agg begins returning tuples after the first batch is complete.
+ * Accrue writes (spilled tuples) to startup_cost and to total_cost;
+ * accrue reads only to total_cost.
*/
if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
{
- double pages;
- double pages_written = 0.0;
- double pages_read = 0.0;
- double hashentrysize;
- double nbatches;
- Size mem_limit;
- uint64 ngroups_limit;
- int num_partitions;
- int depth;
+ double pages;
+ double pages_written = 0.0;
+ double pages_read = 0.0;
+ double hashentrysize;
+ double nbatches;
+ Size mem_limit;
+ uint64 ngroups_limit;
+ int num_partitions;
+ int depth;
/*
* Estimate number of batches based on the computed limits. If less
* than or equal to one, all groups are expected to fit in memory;
* otherwise we expect to spill.
*/
- hashentrysize = hash_agg_entry_size(
- aggcosts->numAggs, input_width, aggcosts->transitionSpace);
+ hashentrysize = hash_agg_entry_size(aggcosts->numAggs, input_width,
+ aggcosts->transitionSpace);
hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
&ngroups_limit, &num_partitions);
- nbatches = Max( (numGroups * hashentrysize) / mem_limit,
- numGroups / ngroups_limit );
+ nbatches = Max((numGroups * hashentrysize) / mem_limit,
+ numGroups / ngroups_limit);
nbatches = Max(ceil(nbatches), 1.0);
num_partitions = Max(num_partitions, 2);
* recursion; but for the purposes of this calculation assume it stays
* constant.
*/
- depth = ceil( log(nbatches) / log(num_partitions) );
+ depth = ceil(log(nbatches) / log(num_partitions));
/*
* Estimate number of pages read and written. For each level of
Assert(joinrel->consider_partitionwise_join);
/*
- * We can not perform partitionwise join if either of the joining relations
- * is not partitioned.
+ * We can not perform partitionwise join if either of the joining
+ * relations is not partitioned.
*/
if (!IS_PARTITIONED_REL(rel1) || !IS_PARTITIONED_REL(rel2))
return;
* partition bounds as inputs, and the partitions with the same
* cardinal positions form the pairs.
*
- * Note: even in cases where one or both inputs have merged bounds,
- * it would be possible for both the bounds to be exactly the same, but
+ * Note: even in cases where one or both inputs have merged bounds, it
+ * would be possible for both the bounds to be exactly the same, but
* it seems unlikely to be worth the cycles to check.
*/
if (!rel1->partbounds_merged &&
/*
* If the join rel's partbounds_merged flag is true, it means inputs
* are not guaranteed to have the same partition bounds, therefore we
- * can't assume that the partitions at the same cardinal positions form
- * the pairs; let get_matching_part_pairs() generate the pairs.
+ * can't assume that the partitions at the same cardinal positions
+ * form the pairs; let get_matching_part_pairs() generate the pairs.
* Otherwise, nothing to do since we can assume that.
*/
if (joinrel->partbounds_merged)
{
bool rel1_is_simple = IS_SIMPLE_REL(rel1);
bool rel2_is_simple = IS_SIMPLE_REL(rel2);
- int cnt_parts;
+ int cnt_parts;
*parts1 = NIL;
*parts2 = NIL;
* Get a child rel for rel1 with the relids. Note that we should have
* the child rel even if rel1 is a join rel, because in that case the
* partitions specified in the relids would have matching/overlapping
- * boundaries, so the specified partitions should be considered as ones
- * to be joined when planning partitionwise joins of rel1, meaning that
- * the child rel would have been built by the time we get here.
+ * boundaries, so the specified partitions should be considered as
+ * ones to be joined when planning partitionwise joins of rel1,
+ * meaning that the child rel would have been built by the time we get
+ * here.
*/
if (rel1_is_simple)
{
return 0; /* unordered path */
(void) pathkeys_count_contained_in(root->query_pathkeys, pathkeys,
- &n_common_pathkeys);
+ &n_common_pathkeys);
return n_common_pathkeys;
}
allow_hash = false; /* policy-based decision not to hash */
else
{
- Size hashentrysize = hash_agg_entry_size(
- 0, cheapest_input_path->pathtarget->width, 0);
+ Size hashentrysize = hash_agg_entry_size(0, cheapest_input_path->pathtarget->width, 0);
allow_hash = enable_hashagg_disk ||
(hashentrysize * numDistinctRows <= work_mem * 1024L);
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
- input_path->pathkeys, &presorted_keys);
+ input_path->pathkeys, &presorted_keys);
if (is_sorted)
{
else
{
/*
- * Try adding an explicit sort, but only to the cheapest total path
- * since a full sort should generally add the same cost to all
- * paths.
+ * Try adding an explicit sort, but only to the cheapest total
+ * path since a full sort should generally add the same cost to
+ * all paths.
*/
if (input_path == cheapest_input_path)
{
}
/*
- * If incremental sort is enabled, then try it as well. Unlike with
- * regular sorts, we can't just look at the cheapest path, because
- * the cost of incremental sort depends on how well presorted the
- * path is. Additionally incremental sort may enable a cheaper
- * startup path to win out despite higher total cost.
+ * If incremental sort is enabled, then try it as well. Unlike
+ * with regular sorts, we can't just look at the cheapest path,
+ * because the cost of incremental sort depends on how well
+ * presorted the path is. Additionally incremental sort may enable
+ * a cheaper startup path to win out despite higher total cost.
*/
if (!enable_incrementalsort)
continue;
double total_groups;
/*
- * We don't care if this is the cheapest partial path - we can't
- * simply skip it, because it may be partially sorted in which
- * case we want to consider adding incremental sort (instead of
- * full sort, which is what happens above).
+ * We don't care if this is the cheapest partial path - we
+ * can't simply skip it, because it may be partially sorted in
+ * which case we want to consider adding incremental sort
+ * (instead of full sort, which is what happens above).
*/
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
- input_path->pathkeys,
- &presorted_keys);
+ input_path->pathkeys,
+ &presorted_keys);
/* No point in adding incremental sort on fully sorted paths. */
if (is_sorted)
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
if (path == cheapest_path || is_sorted)
{
else if (parse->hasAggs)
{
/*
- * We have aggregation, possibly with plain GROUP BY. Make
- * an AggPath.
+ * We have aggregation, possibly with plain GROUP BY. Make an
+ * AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
else if (parse->groupClause)
{
/*
- * We have GROUP BY without aggregation or grouping sets.
- * Make a GroupPath.
+ * We have GROUP BY without aggregation or grouping sets. Make
+ * a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
/*
* Insert a Sort node, if required. But there's no point in
continue;
/*
- * We should have already excluded pathkeys of length 1 because
- * then presorted_keys > 0 would imply is_sorted was true.
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
*/
Assert(list_length(root->group_pathkeys) != 1);
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
/* Ignore already sorted paths */
if (is_sorted)
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
if (path == cheapest_partial_path || is_sorted)
{
* Consider incremental sort on all partial paths, if enabled.
*
* We can also skip the entire loop when we only have a single-item
- * group_pathkeys because then we can't possibly have a presorted
- * prefix of the list without having the list be fully sorted.
+ * group_pathkeys because then we can't possibly have a presorted prefix
+ * of the list without having the list be fully sorted.
*/
if (!enable_incrementalsort || list_length(root->group_pathkeys) == 1)
return;
double total_groups;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
if (is_sorted)
continue;
* unadorned NULL that's not accepted back by the grammar.
*/
if (exprKind == EXPR_KIND_LIMIT && limitOption == LIMIT_OPTION_WITH_TIES &&
- IsA(clause, A_Const) && ((A_Const *) clause)->val.type == T_Null)
+ IsA(clause, A_Const) &&((A_Const *) clause)->val.type == T_Null)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ROW_COUNT_IN_LIMIT_CLAUSE),
errmsg("row count cannot be NULL in FETCH FIRST ... WITH TIES clause")));
/*
* We must fill the attmap now so that it can be used to process generated
* column default expressions in the per-column loop below.
- */
+ */
new_attno = 1;
for (parent_attno = 1; parent_attno <= tupleDesc->natts;
parent_attno++)
* mentioned above.
*/
Datum attoptions =
- get_attoptions(RelationGetRelid(index_rel), i + 1);
+ get_attoptions(RelationGetRelid(index_rel), i + 1);
defopclass = GetDefaultOpClass(attform->atttypid,
index_rel->rd_rel->relam);
typedef struct PartitionMap
{
int nparts; /* number of partitions */
- int *merged_indexes; /* indexes of merged partitions */
+ int *merged_indexes; /* indexes of merged partitions */
bool *merged; /* flags to indicate whether partitions are
* merged with non-dummy partitions */
bool did_remapping; /* did we re-map partitions? */
static void init_partition_map(RelOptInfo *rel, PartitionMap *map);
static void free_partition_map(PartitionMap *map);
static bool is_dummy_partition(RelOptInfo *rel, int part_index);
-static int merge_matching_partitions(PartitionMap *outer_map,
- PartitionMap *inner_map,
- int outer_part,
- int inner_part,
- int *next_index);
-static int process_outer_partition(PartitionMap *outer_map,
- PartitionMap *inner_map,
- bool outer_has_default,
- bool inner_has_default,
- int outer_index,
- int inner_default,
- JoinType jointype,
- int *next_index,
- int *default_index);
-static int process_inner_partition(PartitionMap *outer_map,
- PartitionMap *inner_map,
- bool outer_has_default,
- bool inner_has_default,
- int inner_index,
- int outer_default,
- JoinType jointype,
- int *next_index,
- int *default_index);
+static int merge_matching_partitions(PartitionMap *outer_map,
+ PartitionMap *inner_map,
+ int outer_part,
+ int inner_part,
+ int *next_index);
+static int process_outer_partition(PartitionMap *outer_map,
+ PartitionMap *inner_map,
+ bool outer_has_default,
+ bool inner_has_default,
+ int outer_index,
+ int inner_default,
+ JoinType jointype,
+ int *next_index,
+ int *default_index);
+static int process_inner_partition(PartitionMap *outer_map,
+ PartitionMap *inner_map,
+ bool outer_has_default,
+ bool inner_has_default,
+ int inner_index,
+ int outer_default,
+ JoinType jointype,
+ int *next_index,
+ int *default_index);
static void merge_null_partitions(PartitionMap *outer_map,
PartitionMap *inner_map,
bool outer_has_null,
JoinType jointype,
int *next_index,
int *default_index);
-static int merge_partition_with_dummy(PartitionMap *map, int index,
- int *next_index);
+static int merge_partition_with_dummy(PartitionMap *map, int index,
+ int *next_index);
static void fix_merged_indexes(PartitionMap *outer_map,
PartitionMap *inner_map,
int nmerged, List *merged_indexes);
List *merged_indexes,
int null_index,
int default_index);
-static int get_range_partition(RelOptInfo *rel,
- PartitionBoundInfo bi,
- int *lb_pos,
- PartitionRangeBound *lb,
- PartitionRangeBound *ub);
-static int get_range_partition_internal(PartitionBoundInfo bi,
- int *lb_pos,
- PartitionRangeBound *lb,
- PartitionRangeBound *ub);
+static int get_range_partition(RelOptInfo *rel,
+ PartitionBoundInfo bi,
+ int *lb_pos,
+ PartitionRangeBound *lb,
+ PartitionRangeBound *ub);
+static int get_range_partition_internal(PartitionBoundInfo bi,
+ int *lb_pos,
+ PartitionRangeBound *lb,
+ PartitionRangeBound *ub);
static bool compare_range_partitions(int partnatts, FmgrInfo *partsupfuncs,
Oid *partcollations,
PartitionRangeBound *outer_lb,
PartitionRangeBound *outer_ub,
PartitionRangeBound *inner_lb,
PartitionRangeBound *inner_ub,
- int lb_cmpval, int ub_cmpval,
+ int lb_cmpval, int ub_cmpval,
PartitionRangeBound *merged_lb,
PartitionRangeBound *merged_ub);
static void add_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
dest->kind = NULL;
/*
- * For hash partitioning, datums array will have two elements - modulus and
- * remainder.
+ * For hash partitioning, datums array will have two elements - modulus
+ * and remainder.
*/
hash_part = (key->strategy == PARTITION_STRATEGY_HASH);
natts = hash_part ? 2 : partnatts;
default:
elog(ERROR, "unexpected partition strategy: %d",
(int) outer_binfo->strategy);
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
}
}
/*
* Merge partitions from both sides. In each iteration we compare a pair
- * of list values, one from each side, and decide whether the corresponding
- * partitions match or not. If the two values match exactly, move to the
- * next pair of list values, otherwise move to the next list value on the
- * side with a smaller list value.
+ * of list values, one from each side, and decide whether the
+ * corresponding partitions match or not. If the two values match
+ * exactly, move to the next pair of list values, otherwise move to the
+ * next list value on the side with a smaller list value.
*/
outer_pos = inner_pos = 0;
while (outer_pos < outer_bi->ndatums || inner_pos < inner_bi->ndatums)
if (outer_pos < outer_bi->ndatums)
{
/*
- * If the partition on the outer side has been proven empty, ignore
- * it and move to the next datum on the outer side.
+ * If the partition on the outer side has been proven empty,
+ * ignore it and move to the next datum on the outer side.
*/
outer_index = outer_bi->indexes[outer_pos];
if (is_dummy_partition(outer_rel, outer_index))
if (inner_pos < inner_bi->ndatums)
{
/*
- * If the partition on the inner side has been proven empty, ignore
- * it and move to the next datum on the inner side.
+ * If the partition on the inner side has been proven empty,
+ * ignore it and move to the next datum on the inner side.
*/
inner_index = inner_bi->indexes[inner_pos];
if (is_dummy_partition(inner_rel, inner_index))
* We run this loop till both sides finish. This allows us to avoid
* duplicating code to handle the remaining values on the side which
* finishes later. For that we set the comparison parameter cmpval in
- * such a way that it appears as if the side which finishes earlier has
- * an extra value higher than any other value on the unfinished side.
- * That way we advance the values on the unfinished side till all of
- * its values are exhausted.
+ * such a way that it appears as if the side which finishes earlier
+ * has an extra value higher than any other value on the unfinished
+ * side. That way we advance the values on the unfinished side till
+ * all of its values are exhausted.
*/
if (outer_pos >= outer_bi->ndatums)
cmpval = 1;
Assert(outer_pos < outer_bi->ndatums);
/*
- * If the inner side has the default partition, or this is an outer
- * join, try to assign a merged partition to the outer partition
- * (see process_outer_partition()). Otherwise, the outer partition
- * will not contribute to the result.
+ * If the inner side has the default partition, or this is an
+ * outer join, try to assign a merged partition to the outer
+ * partition (see process_outer_partition()). Otherwise, the
+ * outer partition will not contribute to the result.
*/
if (inner_has_default || IS_OUTER_JOIN(jointype))
{
/*
* If the outer side has the default partition, or this is a FULL
* join, try to assign a merged partition to the inner partition
- * (see process_inner_partition()). Otherwise, the inner partition
- * will not contribute to the result.
+ * (see process_inner_partition()). Otherwise, the inner
+ * partition will not contribute to the result.
*/
if (outer_has_default || jointype == JOIN_FULL)
{
* partitions match or not. If the two ranges overlap, move to the next
* pair of ranges, otherwise move to the next range on the side with a
* lower range. outer_lb_pos/inner_lb_pos keep track of the positions of
- * lower bounds in the datums arrays in the outer/inner PartitionBoundInfos
- * respectively.
+ * lower bounds in the datums arrays in the outer/inner
+ * PartitionBoundInfos respectively.
*/
outer_lb_pos = inner_lb_pos = 0;
outer_index = get_range_partition(outer_rel, outer_bi, &outer_lb_pos,
* We run this loop till both sides finish. This allows us to avoid
* duplicating code to handle the remaining ranges on the side which
* finishes later. For that we set the comparison parameter cmpval in
- * such a way that it appears as if the side which finishes earlier has
- * an extra range higher than any other range on the unfinished side.
- * That way we advance the ranges on the unfinished side till all of
- * its ranges are exhausted.
+ * such a way that it appears as if the side which finishes earlier
+ * has an extra range higher than any other range on the unfinished
+ * side. That way we advance the ranges on the unfinished side till
+ * all of its ranges are exhausted.
*/
if (outer_index == -1)
{
goto cleanup;
/*
- * A row from a non-overlapping portion (if any) of a partition
- * on one side might find its join partner in the default
- * partition (if any) on the other side, causing the same
- * situation as above; give up in that case.
+ * A row from a non-overlapping portion (if any) of a partition on
+ * one side might find its join partner in the default partition
+ * (if any) on the other side, causing the same situation as
+ * above; give up in that case.
*/
if ((outer_has_default && (lb_cmpval > 0 || ub_cmpval < 0)) ||
(inner_has_default && (lb_cmpval < 0 || ub_cmpval > 0)))
outer_map.merged[outer_index] == false);
/*
- * If the inner side has the default partition, or this is an outer
- * join, try to assign a merged partition to the outer partition
- * (see process_outer_partition()). Otherwise, the outer partition
- * will not contribute to the result.
+ * If the inner side has the default partition, or this is an
+ * outer join, try to assign a merged partition to the outer
+ * partition (see process_outer_partition()). Otherwise, the
+ * outer partition will not contribute to the result.
*/
if (inner_has_default || IS_OUTER_JOIN(jointype))
{
/*
* If the outer side has the default partition, or this is a FULL
* join, try to assign a merged partition to the inner partition
- * (see process_inner_partition()). Otherwise, the inner partition
- * will not contribute to the result.
+ * (see process_inner_partition()). Otherwise, the inner
+ * partition will not contribute to the result.
*/
if (outer_has_default || jointype == JOIN_FULL)
{
}
/*
- * If we assigned a merged partition, add the range bounds and index of
- * the merged partition if appropriate.
+ * If we assigned a merged partition, add the range bounds and index
+ * of the merged partition if appropriate.
*/
if (merged_index >= 0 && merged_index != default_index)
add_merged_range_bounds(partnatts, partsupfuncs, partcollations,
merge_matching_partitions(PartitionMap *outer_map, PartitionMap *inner_map,
int outer_index, int inner_index, int *next_index)
{
- int outer_merged_index;
- int inner_merged_index;
- bool outer_merged;
- bool inner_merged;
+ int outer_merged_index;
+ int inner_merged_index;
+ bool outer_merged;
+ bool inner_merged;
Assert(outer_index >= 0 && outer_index < outer_map->nparts);
outer_merged_index = outer_map->merged_indexes[outer_index];
*/
if (outer_merged_index == -1 && inner_merged_index == -1)
{
- int merged_index = *next_index;
+ int merged_index = *next_index;
Assert(!outer_merged);
Assert(!inner_merged);
int *next_index,
int *default_index)
{
- int merged_index = -1;
+ int merged_index = -1;
Assert(outer_index >= 0);
/*
* If the inner side has the default partition, a row from the outer
* partition might find its join partner in the default partition; try
- * merging the outer partition with the default partition. Otherwise, this
- * should be an outer join, in which case the outer partition has to be
- * scanned all the way anyway; merge the outer partition with a dummy
+ * merging the outer partition with the default partition. Otherwise,
+ * this should be an outer join, in which case the outer partition has to
+ * be scanned all the way anyway; merge the outer partition with a dummy
* partition on the other side.
*/
if (inner_has_default)
/*
* If the outer side has the default partition as well, the default
- * partition on the inner side will have two matching partitions on the
- * other side: the outer partition and the default partition on the
- * outer side. Partitionwise join doesn't handle this scenario yet.
+ * partition on the inner side will have two matching partitions on
+ * the other side: the outer partition and the default partition on
+ * the outer side. Partitionwise join doesn't handle this scenario
+ * yet.
*/
if (outer_has_default)
return -1;
return -1;
/*
- * If this is a FULL join, the default partition on the inner side
- * has to be scanned all the way anyway, so the resulting partition
- * will contain all key values from the default partition, which any
- * other partition of the join relation will not contain. Thus the
+ * If this is a FULL join, the default partition on the inner side has
+ * to be scanned all the way anyway, so the resulting partition will
+ * contain all key values from the default partition, which any other
+ * partition of the join relation will not contain. Thus the
* resulting partition will act as the default partition of the join
* relation; record the index in *default_index if not already done.
*/
int *next_index,
int *default_index)
{
- int merged_index = -1;
+ int merged_index = -1;
Assert(inner_index >= 0);
/*
* If the outer side has the default partition, a row from the inner
* partition might find its join partner in the default partition; try
- * merging the inner partition with the default partition. Otherwise, this
- * should be a FULL join, in which case the inner partition has to be
+ * merging the inner partition with the default partition. Otherwise,
+ * this should be a FULL join, in which case the inner partition has to be
* scanned all the way anyway; merge the inner partition with a dummy
* partition on the other side.
*/
/*
* If the inner side has the default partition as well, the default
- * partition on the outer side will have two matching partitions on the
- * other side: the inner partition and the default partition on the
- * inner side. Partitionwise join doesn't handle this scenario yet.
+ * partition on the outer side will have two matching partitions on
+ * the other side: the inner partition and the default partition on
+ * the inner side. Partitionwise join doesn't handle this scenario
+ * yet.
*/
if (inner_has_default)
return -1;
int *next_index,
int *null_index)
{
- bool consider_outer_null = false;
- bool consider_inner_null = false;
+ bool consider_outer_null = false;
+ bool consider_inner_null = false;
Assert(outer_has_null || inner_has_null);
Assert(*null_index == -1);
/*
* If this is an outer join, the NULL partition on the outer side has
* to be scanned all the way anyway; merge the NULL partition with a
- * dummy partition on the other side. In that case consider_outer_null
- * means that the NULL partition only contains NULL values as the key
- * values, so the merged partition will do so; treat it as the NULL
- * partition of the join relation.
+ * dummy partition on the other side. In that case
+ * consider_outer_null means that the NULL partition only contains
+ * NULL values as the key values, so the merged partition will do so;
+ * treat it as the NULL partition of the join relation.
*/
if (IS_OUTER_JOIN(jointype))
{
Assert(inner_has_null);
/*
- * If this is a FULL join, the NULL partition on the inner side has
- * to be scanned all the way anyway; merge the NULL partition with a
- * dummy partition on the other side. In that case consider_inner_null
- * means that the NULL partition only contains NULL values as the key
- * values, so the merged partition will do so; treat it as the NULL
- * partition of the join relation.
+ * If this is a FULL join, the NULL partition on the inner side has to
+ * be scanned all the way anyway; merge the NULL partition with a
+ * dummy partition on the other side. In that case
+ * consider_inner_null means that the NULL partition only contains
+ * NULL values as the key values, so the merged partition will do so;
+ * treat it as the NULL partition of the join relation.
*/
if (jointype == JOIN_FULL)
*null_index = merge_partition_with_dummy(inner_map, inner_null,
int *next_index,
int *default_index)
{
- int outer_merged_index = -1;
- int inner_merged_index = -1;
+ int outer_merged_index = -1;
+ int inner_merged_index = -1;
Assert(outer_has_default || inner_has_default);
/*
* If this is an outer join, the default partition on the outer side
* has to be scanned all the way anyway; if we have not yet assigned a
- * partition, merge the default partition with a dummy partition on the
- * other side. The merged partition will act as the default partition
- * of the join relation (see comments in process_inner_partition()).
+ * partition, merge the default partition with a dummy partition on
+ * the other side. The merged partition will act as the default
+ * partition of the join relation (see comments in
+ * process_inner_partition()).
*/
if (IS_OUTER_JOIN(jointype))
{
else if (!outer_has_default && inner_has_default)
{
/*
- * If this is a FULL join, the default partition on the inner side
- * has to be scanned all the way anyway; if we have not yet assigned a
- * partition, merge the default partition with a dummy partition on the
- * other side. The merged partition will act as the default partition
- * of the join relation (see comments in process_outer_partition()).
+ * If this is a FULL join, the default partition on the inner side has
+ * to be scanned all the way anyway; if we have not yet assigned a
+ * partition, merge the default partition with a dummy partition on
+ * the other side. The merged partition will act as the default
+ * partition of the join relation (see comments in
+ * process_outer_partition()).
*/
if (jointype == JOIN_FULL)
{
static int
merge_partition_with_dummy(PartitionMap *map, int index, int *next_index)
{
- int merged_index = *next_index;
+ int merged_index = *next_index;
Assert(index >= 0 && index < map->nparts);
Assert(map->merged_indexes[index] == -1);
int *outer_indexes;
int *inner_indexes;
int max_nparts;
- int i;
+ int i;
Assert(nmerged > 0);
Assert(*outer_parts == NIL);
{
if (i < outer_nparts)
{
- int merged_index = outer_map->merged_indexes[i];
+ int merged_index = outer_map->merged_indexes[i];
if (merged_index >= 0)
{
}
if (i < inner_nparts)
{
- int merged_index = inner_map->merged_indexes[i];
+ int merged_index = inner_map->merged_indexes[i];
if (merged_index >= 0)
{
int inner_index = inner_indexes[i];
/*
- * If both partitions are dummy, it means the merged partition that had
- * been assigned to the outer/inner partition was removed when
- * re-merging the outer/inner partition in merge_matching_partitions();
- * ignore the merged partition.
+ * If both partitions are dummy, it means the merged partition that
+ * had been assigned to the outer/inner partition was removed when
+ * re-merging the outer/inner partition in
+ * merge_matching_partitions(); ignore the merged partition.
*/
if (outer_index == -1 && inner_index == -1)
continue;
Assert(bi->strategy == PARTITION_STRATEGY_RANGE);
- do {
+ do
+ {
part_index = get_range_partition_internal(bi, lb_pos, lb, ub);
if (part_index == -1)
return -1;
PartitionRangeBound *outer_ub,
PartitionRangeBound *inner_lb,
PartitionRangeBound *inner_ub,
- int lb_cmpval, int ub_cmpval,
+ int lb_cmpval, int ub_cmpval,
PartitionRangeBound *merged_lb,
PartitionRangeBound *merged_ub)
{
/*
* A LEFT/ANTI join will have all the rows from the outer side, so
- * the bounds of the merged partition will be the same as the outer
- * bounds.
+ * the bounds of the merged partition will be the same as the
+ * outer bounds.
*/
*merged_lb = *outer_lb;
*merged_ub = *outer_ub;
case JOIN_FULL:
/*
- * A FULL join will have all the rows from both sides, so the lower
- * bound of the merged partition will be the lower of the two lower
- * bounds, and the upper bound of the merged partition will be the
- * higher of the two upper bounds.
+ * A FULL join will have all the rows from both sides, so the
+ * lower bound of the merged partition will be the lower of the
+ * two lower bounds, and the upper bound of the merged partition
+ * will be the higher of the two upper bounds.
*/
*merged_lb = (lb_cmpval < 0) ? *outer_lb : *inner_lb;
*merged_ub = (ub_cmpval > 0) ? *outer_ub : *inner_ub;
}
else
{
- PartitionRangeBound prev_ub;
+ PartitionRangeBound prev_ub;
Assert(*merged_datums);
Assert(*merged_kinds);
ListCell *lc;
/*
- * If this partitioned relation has a default partition and is itself
- * a partition (as evidenced by partition_qual being not NIL), we first
+ * If this partitioned relation has a default partition and is itself a
+ * partition (as evidenced by partition_qual being not NIL), we first
* check if the clauses contradict the partition constraint. If they do,
* there's no need to generate any steps as it'd already be proven that no
* partitions need to be scanned.
/* Determine if this table needs vacuum or analyze. */
*dovacuum = force_vacuum || (vactuples > vacthresh) ||
- (vac_ins_base_thresh >= 0 && instuples > vacinsthresh);
+ (vac_ins_base_thresh >= 0 && instuples > vacinsthresh);
*doanalyze = (anltuples > anlthresh);
}
else
ProcessConfigFile(PGC_SIGHUP);
/*
- * Checkpointer is the last process to shut down, so we ask it to
- * hold the keys for a range of other tasks required most of which
- * have nothing to do with checkpointing at all.
+ * Checkpointer is the last process to shut down, so we ask it to hold
+ * the keys for a range of other tasks required most of which have
+ * nothing to do with checkpointing at all.
*
- * For various reasons, some config values can change dynamically
- * so the primary copy of them is held in shared memory to make
- * sure all backends see the same value. We make Checkpointer
- * responsible for updating the shared memory copy if the
- * parameter setting changes because of SIGHUP.
+ * For various reasons, some config values can change dynamically so
+ * the primary copy of them is held in shared memory to make sure all
+ * backends see the same value. We make Checkpointer responsible for
+ * updating the shared memory copy if the parameter setting changes
+ * because of SIGHUP.
*/
UpdateSharedMemoryConfig();
}
if (ShutdownRequestPending)
{
/*
- * From here on, elog(ERROR) should end with exit(1), not send
- * control back to the sigsetjmp block above
+ * From here on, elog(ERROR) should end with exit(1), not send control
+ * back to the sigsetjmp block above
*/
ExitOnAnyError = true;
/* Close down the database */
ShutdownXLOG(0, 0);
/* Normal exit from the checkpointer is here */
- proc_exit(0); /* done */
+ proc_exit(0); /* done */
}
}
pgstat_recv_resetslrucounter(PgStat_MsgResetslrucounter *msg, int len)
{
int i;
- TimestampTz ts = GetCurrentTimestamp();
+ TimestampTz ts = GetCurrentTimestamp();
for (i = 0; i < SLRU_NUM_ELEMENTS; i++)
{
/*
* It is quite possible that a non-aggressive VACUUM ended up skipping
* various pages, however, we'll zero the insert counter here regardless.
- * It's currently used only to track when we need to perform an
- * "insert" autovacuum, which are mainly intended to freeze newly inserted
- * tuples. Zeroing this may just mean we'll not try to vacuum the table
- * again until enough tuples have been inserted to trigger another insert
+ * It's currently used only to track when we need to perform an "insert"
+ * autovacuum, which are mainly intended to freeze newly inserted tuples.
+ * Zeroing this may just mean we'll not try to vacuum the table again
+ * until enough tuples have been inserted to trigger another insert
* autovacuum. An anti-wraparound autovacuum will catch any persistent
* stragglers.
*/
int
pgstat_slru_index(const char *name)
{
- int i;
+ int i;
for (i = 0; i < SLRU_NUM_ELEMENTS; i++)
{
if (SSLok == 'S' && secure_open_server(port) == -1)
return STATUS_ERROR;
#endif
+
/*
* regular startup packet, cancel, etc packet should follow, but not
* another SSL negotiation request, and a GSS request should only
if (GSSok == 'G' && secure_open_gssapi(port) == -1)
return STATUS_ERROR;
#endif
+
/*
* regular startup packet, cancel, etc packet should follow, but not
* another GSS negotiation request, and an SSL request should only
void
AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid,
const char *pathname, size_t size, pg_time_t mtime,
- pg_checksum_context * checksum_ctx)
+ pg_checksum_context *checksum_ctx)
{
char pathbuf[MAXPGPATH];
int pathlen;
}
/*
- * Each file's entry needs to be separated from any entry that follows by a
- * comma, but there's no comma before the first one or after the last one.
- * To make that work, adding a file to the manifest starts by terminating
- * the most recently added line, with a comma if appropriate, but does not
- * terminate the line inserted for this file.
+ * Each file's entry needs to be separated from any entry that follows by
+ * a comma, but there's no comma before the first one or after the last
+ * one. To make that work, adding a file to the manifest starts by
+ * terminating the most recently added line, with a comma if appropriate,
+ * but does not terminate the line inserted for this file.
*/
initStringInfo(&buf);
if (manifest->first_file)
{
Oid partoid; /* LogicalRepPartMap's key */
LogicalRepRelMapEntry relmapentry;
-} LogicalRepPartMapEntry;
+} LogicalRepPartMapEntry;
/*
* Relcache invalidation callback for our relation map cache.
if (!publish)
{
- bool ancestor_published = false;
+ bool ancestor_published = false;
/*
* For a partition, check if any of the ancestors are
*/
if (am_partition)
{
- List *ancestors = get_partition_ancestors(relid);
- ListCell *lc2;
+ List *ancestors = get_partition_ancestors(relid);
+ ListCell *lc2;
- /* Find the "topmost" ancestor that is in this publication. */
+ /*
+ * Find the "topmost" ancestor that is in this
+ * publication.
+ */
foreach(lc2, ancestors)
{
- Oid ancestor = lfirst_oid(lc2);
+ Oid ancestor = lfirst_oid(lc2);
if (list_member_oid(GetRelationPublications(ancestor),
pub->oid))
retlsn = moveto;
/*
- * Dirty the slot so as it is written out at the next checkpoint.
- * Note that the LSN position advanced may still be lost in the
- * event of a crash, but this makes the data consistent after a
- * clean shutdown.
+ * Dirty the slot so as it is written out at the next checkpoint. Note
+ * that the LSN position advanced may still be lost in the event of a
+ * crash, but this makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}
* keep track of their progress, so we should make more of an
* effort to save it for them.
*
- * Dirty the slot so it is written out at the next checkpoint.
- * The LSN position advanced to may still be lost on a crash
- * but this makes the data consistent after a clean shutdown.
+ * Dirty the slot so it is written out at the next checkpoint. The
+ * LSN position advanced to may still be lost on a crash but this
+ * makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}
walrcv->conninfo[0] = '\0';
/*
- * Use configured replication slot if present, and ignore the value
- * of create_temp_slot as the slot name should be persistent. Otherwise,
- * use create_temp_slot to determine whether this WAL receiver should
- * create a temporary slot by itself and use it, or not.
+ * Use configured replication slot if present, and ignore the value of
+ * create_temp_slot as the slot name should be persistent. Otherwise, use
+ * create_temp_slot to determine whether this WAL receiver should create a
+ * temporary slot by itself and use it, or not.
*/
if (slotname != NULL && slotname[0] != '\0')
{
void
WalSndResourceCleanup(bool isCommit)
{
- ResourceOwner resowner;
+ ResourceOwner resowner;
if (CurrentResourceOwner == NULL)
return;
/*
- * Deleting CurrentResourceOwner is not allowed, so we must save a
- * pointer in a local variable and clear it first.
+ * Deleting CurrentResourceOwner is not allowed, so we must save a pointer
+ * in a local variable and clear it first.
*/
resowner = CurrentResourceOwner;
CurrentResourceOwner = NULL;
else if (IsA(clause, ScalarArrayOpExpr))
{
/* If it's an scalar array operator, check for Var IN Const. */
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
/*
* Reject ALL() variant, we only care about ANY/IN.
/*
* If it's not an "=" operator, just ignore the clause, as it's not
* compatible with functional dependencies. The operator is identified
- * simply by looking at which function it uses to estimate selectivity.
- * That's a bit strange, but it's what other similar places do.
+ * simply by looking at which function it uses to estimate
+ * selectivity. That's a bit strange, but it's what other similar
+ * places do.
*/
if (get_oprrest(expr->opno) != F_EQSEL)
return false;
find_strongest_dependency(MVDependencies **dependencies, int ndependencies,
Bitmapset *attnums)
{
- int i, j;
+ int i,
+ j;
MVDependency *strongest = NULL;
/* number of attnums in clauses */
/*
* this dependency is stronger, but we must still check that it's
- * fully matched to these attnums. We perform this check last as it's
- * slightly more expensive than the previous checks.
+ * fully matched to these attnums. We perform this check last as
+ * it's slightly more expensive than the previous checks.
*/
if (dependency_is_fully_matched(dependency, attnums))
strongest = dependency; /* save new best match */
static void statext_store(Oid relid,
MVNDistinct *ndistinct, MVDependencies *dependencies,
MCVList *mcv, VacAttrStats **stats);
-static int statext_compute_stattarget(int stattarget,
- int natts, VacAttrStats **stats);
+static int statext_compute_stattarget(int stattarget,
+ int natts, VacAttrStats **stats);
/*
* Compute requested extended stats, using the rows sampled for the plain
stats);
/*
- * Don't rebuild statistics objects with statistics target set to 0 (we
- * just leave the existing values around, just like we do for regular
- * per-column statistics).
+ * Don't rebuild statistics objects with statistics target set to 0
+ * (we just leave the existing values around, just like we do for
+ * regular per-column statistics).
*/
if (stattarget == 0)
continue;
foreach(lc, lstats)
{
- StatExtEntry *stat = (StatExtEntry *) lfirst(lc);
- int stattarget = stat->stattarget;
- VacAttrStats **stats;
- int nattrs = bms_num_members(stat->columns);
+ StatExtEntry *stat = (StatExtEntry *) lfirst(lc);
+ int stattarget = stat->stattarget;
+ VacAttrStats **stats;
+ int nattrs = bms_num_members(stat->columns);
/*
* Check if we can build this statistics object based on the columns
static int
statext_compute_stattarget(int stattarget, int nattrs, VacAttrStats **stats)
{
- int i;
+ int i;
/*
- * If there's statistics target set for the statistics object, use it.
- * It may be set to 0 which disables building of that statistic.
+ * If there's statistics target set for the statistics object, use it. It
+ * may be set to 0 which disables building of that statistic.
*/
if (stattarget >= 0)
return stattarget;
/*
* The target for the statistics object is set to -1, in which case we
- * look at the maximum target set for any of the attributes the object
- * is defined on.
+ * look at the maximum target set for any of the attributes the object is
+ * defined on.
*/
for (i = 0; i < nattrs; i++)
{
/* Var IN Array */
if (IsA(clause, ScalarArrayOpExpr))
{
- RangeTblEntry *rte = root->simple_rte_array[relid];
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
+ RangeTblEntry *rte = root->simple_rte_array[relid];
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
Var *var;
/* Only expressions with two arguments are considered compatible. */
ListCell *l;
Bitmapset **list_attnums;
int listidx;
- Selectivity sel = 1.0;
+ Selectivity sel = 1.0;
/* check if there's any stats that might be useful for us. */
if (!has_stats_of_kind(rel->statlist, STATS_EXT_MCV))
stat = choose_best_statistics(rel->statlist, STATS_EXT_MCV,
list_attnums, list_length(clauses));
- /* if no (additional) matching stats could be found then we've nothing to do */
+ /*
+ * if no (additional) matching stats could be found then we've nothing
+ * to do
+ */
if (!stat)
break;
foreach(l, clauses)
{
/*
- * If the clause is compatible with the selected statistics, mark it
- * as estimated and add it to the list to estimate.
+ * If the clause is compatible with the selected statistics, mark
+ * it as estimated and add it to the list to estimate.
*/
if (list_attnums[listidx] != NULL &&
bms_is_subset(list_attnums[listidx], stat->keys))
/*
* First compute "simple" selectivity, i.e. without the extended
* statistics, and essentially assuming independence of the
- * columns/clauses. We'll then use the various selectivities computed from
- * MCV list to improve it.
+ * columns/clauses. We'll then use the various selectivities computed
+ * from MCV list to improve it.
*/
simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid,
- jointype, sjinfo, NULL);
+ jointype, sjinfo, NULL);
/*
- * Now compute the multi-column estimate from the MCV list, along with the
- * other selectivities (base & total selectivity).
+ * Now compute the multi-column estimate from the MCV list, along with
+ * the other selectivities (base & total selectivity).
*/
mcv_sel = mcv_clauselist_selectivity(root, stat, stat_clauses, varRelid,
jointype, sjinfo, rel,
if (other_sel > 1.0 - mcv_totalsel)
other_sel = 1.0 - mcv_totalsel;
- /* Overall selectivity is the combination of MCV and non-MCV estimates. */
+ /*
+ * Overall selectivity is the combination of MCV and non-MCV
+ * estimates.
+ */
stat_sel = mcv_sel + other_sel;
CLAMP_PROBABILITY(stat_sel);
bool
examine_clause_args(List *args, Var **varp, Const **cstp, bool *varonleftp)
{
- Var *var;
- Const *cst;
- bool varonleft;
- Node *leftop,
- *rightop;
+ Var *var;
+ Const *cst;
+ bool varonleft;
+ Node *leftop,
+ *rightop;
/* enforced by statext_is_compatible_clause_internal */
Assert(list_length(args) == 2);
if (IsA(rightop, RelabelType))
rightop = (Node *) ((RelabelType *) rightop)->arg;
- if (IsA(leftop, Var) && IsA(rightop, Const))
+ if (IsA(leftop, Var) &&IsA(rightop, Const))
{
var = (Var *) leftop;
cst = (Const *) rightop;
varonleft = true;
}
- else if (IsA(leftop, Const) && IsA(rightop, Var))
+ else if (IsA(leftop, Const) &&IsA(rightop, Var))
{
var = (Var *) rightop;
cst = (Const *) leftop;
groups = build_distinct_groups(nitems, items, mss, &ngroups);
/*
- * Maximum number of MCV items to store, based on the statistics target
- * we computed for the statistics object (from target set for the object
+ * Maximum number of MCV items to store, based on the statistics target we
+ * computed for the statistics object (from target set for the object
* itself, attributes and the system default). In any case, we can't keep
* more groups than we have available.
*/
{
int j;
SortItem key;
- MultiSortSupport tmp;
+ MultiSortSupport tmp;
/* frequencies for values in each attribute */
SortItem **freqs;
static int
sort_item_compare(const void *a, const void *b, void *arg)
{
- SortSupport ssup = (SortSupport) arg;
+ SortSupport ssup = (SortSupport) arg;
SortItem *ia = (SortItem *) a;
SortItem *ib = (SortItem *) b;
/* allocate arrays for all columns as a single chunk */
ptr = palloc(MAXALIGN(sizeof(SortItem *) * mss->ndims) +
- mss->ndims * MAXALIGN(sizeof(SortItem) * ngroups));
+ mss->ndims * MAXALIGN(sizeof(SortItem) * ngroups));
/* initial array of pointers */
result = (SortItem **) ptr;
for (dim = 0; dim < mss->ndims; dim++)
{
- SortSupport ssup = &mss->ssup[dim];
+ SortSupport ssup = &mss->ssup[dim];
/* array of values for a single column */
result[dim] = (SortItem *) ptr;
/*
* Identify distinct values, compute frequency (there might be
- * multiple MCV items containing this value, so we need to sum
- * counts from all of them.
+ * multiple MCV items containing this value, so we need to sum counts
+ * from all of them.
*/
ncounts[dim] = 1;
for (i = 1; i < ngroups; i++)
{
- if (sort_item_compare(&result[dim][i-1], &result[dim][i], ssup) == 0)
+ if (sort_item_compare(&result[dim][i - 1], &result[dim][i], ssup) == 0)
{
- result[dim][ncounts[dim]-1].count += result[dim][i].count;
+ result[dim][ncounts[dim] - 1].count += result[dim][i].count;
continue;
}
*/
info[dim].nvalues = ndistinct;
- if (info[dim].typbyval) /* by-value data types */
+ if (info[dim].typbyval) /* by-value data types */
{
info[dim].nbytes = info[dim].nvalues * info[dim].typlen;
/*
* We copy the data into the MCV item during deserialization, so
* we don't need to allocate any extra space.
- */
+ */
info[dim].nbytes_aligned = 0;
}
- else if (info[dim].typlen > 0) /* fixed-length by-ref */
+ else if (info[dim].typlen > 0) /* fixed-length by-ref */
{
/*
* We don't care about alignment in the serialized data, so we
* pack the data as much as possible. But we also track how much
- * data will be needed after deserialization, and in that case
- * we need to account for alignment of each item.
+ * data will be needed after deserialization, and in that case we
+ * need to account for alignment of each item.
*
* Note: As the items are fixed-length, we could easily compute
* this during deserialization, but we do it here anyway.
/* serialized length (uint32 length + data) */
len = VARSIZE_ANY_EXHDR(values[dim][i]);
- info[dim].nbytes += sizeof(uint32); /* length */
- info[dim].nbytes += len; /* value (no header) */
+ info[dim].nbytes += sizeof(uint32); /* length */
+ info[dim].nbytes += len; /* value (no header) */
/*
* During deserialization we'll build regular varlena values
/* c-strings include terminator, so +1 byte */
len = strlen(DatumGetCString(values[dim][i])) + 1;
- info[dim].nbytes += sizeof(uint32); /* length */
- info[dim].nbytes += len; /* value */
+ info[dim].nbytes += sizeof(uint32); /* length */
+ info[dim].nbytes += len; /* value */
/* space needed for properly aligned deserialized copies */
info[dim].nbytes_aligned += MAXALIGN(len);
* whole serialized MCV list (varlena header, MCV header, dimension info
* for each attribute, deduplicated values and items).
*/
- total_length = (3 * sizeof(uint32)) /* magic + type + nitems */
- + sizeof(AttrNumber) /* ndimensions */
- + (ndims * sizeof(Oid)); /* attribute types */
+ total_length = (3 * sizeof(uint32)) /* magic + type + nitems */
+ + sizeof(AttrNumber) /* ndimensions */
+ + (ndims * sizeof(Oid)); /* attribute types */
/* dimension info */
total_length += ndims * sizeof(DimensionInfo);
info[dim].nvalues, sizeof(Datum),
compare_scalars_simple, &ssup[dim]);
- Assert(value != NULL); /* serialization or deduplication error */
+ Assert(value != NULL); /* serialization or deduplication
+ * error */
/* compute index within the deduplicated array */
index = (uint16) (value - values[dim]);
* serialized data - it's not aligned properly, and it may disappear while
* we're still using the MCV list, e.g. due to catcache release.
*
- * We do care about alignment here, because we will allocate all the pieces
- * at once, but then use pointers to different parts.
+ * We do care about alignment here, because we will allocate all the
+ * pieces at once, but then use pointers to different parts.
*/
mcvlen = MAXALIGN(offsetof(MCVList, items) + (sizeof(MCVItem) * nitems));
/* finally translate the indexes (for non-NULL only) */
for (dim = 0; dim < ndims; dim++)
{
- uint16 index;
+ uint16 index;
memcpy(&index, ptr, sizeof(uint16));
ptr += sizeof(uint16);
/* stuff done on every call of the function */
funcctx = SRF_PERCALL_SETUP();
- if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more left to send */
+ if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more
+ * left to send */
{
Datum values[5];
bool nulls[5];
{
astate_nulls = accumArrayResult(astate_nulls,
- BoolGetDatum(item->isnull[i]),
- false,
- BOOLOID,
- CurrentMemoryContext);
+ BoolGetDatum(item->isnull[i]),
+ false,
+ BOOLOID,
+ CurrentMemoryContext);
if (!item->isnull[i])
{
txt = cstring_to_text(DatumGetPointer(val));
astate_values = accumArrayResult(astate_values,
- PointerGetDatum(txt),
- false,
- TEXTOID,
- CurrentMemoryContext);
+ PointerGetDatum(txt),
+ false,
+ TEXTOID,
+ CurrentMemoryContext);
}
else
astate_values = accumArrayResult(astate_values,
- (Datum) 0,
- true,
- TEXTOID,
- CurrentMemoryContext);
+ (Datum) 0,
+ true,
+ TEXTOID,
+ CurrentMemoryContext);
}
values[0] = Int32GetDatum(funcctx->call_cntr);
MCVItem *item = &mcvlist->items[i];
/*
- * When the MCV item or the Const value is NULL we can treat
- * this as a mismatch. We must not call the operator because
- * of strictness.
+ * When the MCV item or the Const value is NULL we can
+ * treat this as a mismatch. We must not call the operator
+ * because of strictness.
*/
if (item->isnull[idx] || cst->constisnull)
{
*
* We don't store collations used to build the statistics,
* but we can use the collation for the attribute itself,
- * as stored in varcollid. We do reset the statistics after
- * a type change (including collation change), so this is
- * OK. We may need to relax this after allowing extended
- * statistics on expressions.
+ * as stored in varcollid. We do reset the statistics
+ * after a type change (including collation change), so
+ * this is OK. We may need to relax this after allowing
+ * extended statistics on expressions.
*/
if (varonleft)
match = DatumGetBool(FunctionCall2Coll(&opproc,
}
else if (IsA(clause, ScalarArrayOpExpr))
{
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
FmgrInfo opproc;
/* valid only after examine_clause_args returns true */
MCVItem *item = &mcvlist->items[i];
/*
- * When the MCV item or the Const value is NULL we can treat
- * this as a mismatch. We must not call the operator because
- * of strictness.
+ * When the MCV item or the Const value is NULL we can
+ * treat this as a mismatch. We must not call the operator
+ * because of strictness.
*/
if (item->isnull[idx] || cst->constisnull)
{
for (j = 0; j < num_elems; j++)
{
- Datum elem_value = elem_values[j];
- bool elem_isnull = elem_nulls[j];
- bool elem_match;
+ Datum elem_value = elem_values[j];
+ bool elem_isnull = elem_nulls[j];
+ bool elem_match;
/* NULL values always evaluate as not matching. */
if (elem_isnull)
bufHdr->tag.forkNum == forkNum[j] &&
bufHdr->tag.blockNum >= firstDelBlock[j])
{
- InvalidateBuffer(bufHdr); /* releases spinlock */
+ InvalidateBuffer(bufHdr); /* releases spinlock */
break;
}
}
{
buf = fsm_readbuf(rel, first_removed_address, false);
if (!BufferIsValid(buf))
- return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
+ return InvalidBlockNumber; /* nothing to do; the FSM was already
+ * smaller */
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/* NO EREPORT(ERROR) from here till changes are logged */
{
new_nfsmblocks = fsm_logical_to_physical(first_removed_address);
if (smgrnblocks(rel->rd_smgr, FSM_FORKNUM) <= new_nfsmblocks)
- return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
+ return InvalidBlockNumber; /* nothing to do; the FSM was already
+ * smaller */
}
return new_nfsmblocks;
!PostmasterIsAlive())
{
/*
- * The extra PostmasterIsAliveInternal() check prevents false alarms on
- * systems that give a different value for getppid() while being traced
- * by a debugger.
+ * The extra PostmasterIsAliveInternal() check prevents false alarms
+ * on systems that give a different value for getppid() while being
+ * traced by a debugger.
*/
set->report_postmaster_not_running = true;
}
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- proc->delayChkpt = false; /* be sure this is cleared in abort */
+ proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
Assert(pgxact->nxids == 0);
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- proc->delayChkpt = false; /* be sure this is cleared in abort */
+ proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
/* Clear the subtransaction-XID cache too while holding the lock */
{
pid_t pss_pid;
sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
- pg_atomic_uint64 pss_barrierGeneration;
- pg_atomic_uint32 pss_barrierCheckMask;
+ pg_atomic_uint64 pss_barrierGeneration;
+ pg_atomic_uint32 pss_barrierCheckMask;
} ProcSignalSlot;
/*
*/
typedef struct
{
- pg_atomic_uint64 psh_barrierGeneration;
- ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
+ pg_atomic_uint64 psh_barrierGeneration;
+ ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
} ProcSignalHeader;
/*
Size
ProcSignalShmemSize(void)
{
- Size size;
+ Size size;
size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
/* If we're first, initialize. */
if (!found)
{
- int i;
+ int i;
pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
/*
* Initialize barrier state. Since we're a brand-new process, there
* shouldn't be any leftover backend-private state that needs to be
- * updated. Therefore, we can broadcast the latest barrier generation
- * and disregard any previously-set check bits.
+ * updated. Therefore, we can broadcast the latest barrier generation and
+ * disregard any previously-set check bits.
*
* NB: This only works if this initialization happens early enough in the
* startup sequence that we haven't yet cached any state that might need
- * to be invalidated. That's also why we have a memory barrier here, to
- * be sure that any later reads of memory happen strictly after this.
+ * to be invalidated. That's also why we have a memory barrier here, to be
+ * sure that any later reads of memory happen strictly after this.
*/
pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0);
barrier_generation =
uint64
EmitProcSignalBarrier(ProcSignalBarrierType type)
{
- uint64 flagbit = UINT64CONST(1) << (uint64) type;
- uint64 generation;
+ uint64 flagbit = UINT64CONST(1) << (uint64) type;
+ uint64 generation;
/*
* Set all the flags.
*
- * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this
- * is totally ordered with respect to anything the caller did before, and
- * anything that we do afterwards. (This is also true of the later call
- * to pg_atomic_add_fetch_u64.)
+ * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
+ * totally ordered with respect to anything the caller did before, and
+ * anything that we do afterwards. (This is also true of the later call to
+ * pg_atomic_add_fetch_u64.)
*/
for (int i = 0; i < NumProcSignalSlots; i++)
{
* generation.
*
* Concurrency is not a problem here. Backends that have exited don't
- * matter, and new backends that have joined since we entered this function
- * must already have current state, since the caller is responsible for
- * making sure that the relevant state is entirely visible before calling
- * this function in the first place. We still have to wake them up -
- * because we can't distinguish between such backends and older backends
- * that need to update state - but they won't actually need to change
- * any state.
+ * matter, and new backends that have joined since we entered this
+ * function must already have current state, since the caller is
+ * responsible for making sure that the relevant state is entirely visible
+ * before calling this function in the first place. We still have to wake
+ * them up - because we can't distinguish between such backends and older
+ * backends that need to update state - but they won't actually need to
+ * change any state.
*/
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
{
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
- pid_t pid = slot->pss_pid;
+ pid_t pid = slot->pss_pid;
if (pid != 0)
kill(pid, SIGUSR1);
void
WaitForProcSignalBarrier(uint64 generation)
{
- long timeout = 125L;
+ long timeout = 125L;
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
{
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
- uint64 oldval;
+ uint64 oldval;
oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
while (oldval < generation)
{
- int events;
+ int events;
CHECK_FOR_INTERRUPTS();
}
/*
- * The caller is probably calling this function because it wants to
- * read the shared state or perform further writes to shared state once
- * all backends are known to have absorbed the barrier. However, the
- * read of pss_barrierGeneration was performed unlocked; insert a memory
- * barrier to separate it from whatever follows.
+ * The caller is probably calling this function because it wants to read
+ * the shared state or perform further writes to shared state once all
+ * backends are known to have absorbed the barrier. However, the read of
+ * pss_barrierGeneration was performed unlocked; insert a memory barrier
+ * to separate it from whatever follows.
*/
pg_memory_barrier();
}
void
ProcessProcSignalBarrier(void)
{
- uint64 generation;
- uint32 flags;
+ uint64 generation;
+ uint32 flags;
/* Exit quickly if there's no work to do. */
if (!ProcSignalBarrierPending)
ProcSignalBarrierPending = false;
/*
- * Read the current barrier generation, and then get the flags that
- * are set for this backend. Note that pg_atomic_exchange_u32 is a full
+ * Read the current barrier generation, and then get the flags that are
+ * set for this backend. Note that pg_atomic_exchange_u32 is a full
* barrier, so we're guaranteed that the read of the barrier generation
* happens before we atomically extract the flags, and that any subsequent
* state changes happen afterward.
* machinery gets committed. Rename PROCSIGNAL_BARRIER_PLACEHOLDER to
* PROCSIGNAL_BARRIER_SOMETHING_ELSE where SOMETHING_ELSE is something
* appropriately descriptive. Get rid of this function and instead have
- * ProcessBarrierSomethingElse. Most likely, that function should live
- * in the file pertaining to that subsystem, rather than here.
+ * ProcessBarrierSomethingElse. Most likely, that function should live in
+ * the file pertaining to that subsystem, rather than here.
*/
}
if (slot != NULL)
{
- uint64 mygen;
- uint64 curgen;
+ uint64 mygen;
+ uint64 curgen;
mygen = pg_atomic_read_u64(&slot->pss_barrierGeneration);
curgen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
}
else
{
- Size allocated_size;
+ Size allocated_size;
/* It isn't in the table yet. allocate and initialize it */
structPtr = ShmemAllocRaw(size, &allocated_size);
MemoryContext oldcontext;
HASH_SEQ_STATUS hstat;
ShmemIndexEnt *ent;
- Size named_allocated = 0;
+ Size named_allocated = 0;
Datum values[PG_GET_SHMEM_SIZES_COLS];
bool nulls[PG_GET_SHMEM_SIZES_COLS];
found_conflict = true;
else
found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
- lock, proclock);
+ lock, proclock);
if (!found_conflict)
{
void
smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nblocks)
{
- int i;
+ int i;
/*
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
/*
* We might as well update the local smgr_fsm_nblocks and
- * smgr_vm_nblocks settings. The smgr cache inval message that
- * this function sent will cause other backends to invalidate
- * their copies of smgr_fsm_nblocks and smgr_vm_nblocks,
- * and these ones too at the next command boundary.
- * But these ensure they aren't outright wrong until then.
+ * smgr_vm_nblocks settings. The smgr cache inval message that this
+ * function sent will cause other backends to invalidate their copies
+ * of smgr_fsm_nblocks and smgr_vm_nblocks, and these ones too at the
+ * next command boundary. But these ensure they aren't outright wrong
+ * until then.
*/
if (forknum[i] == FSM_FORKNUM)
reln->smgr_fsm_nblocks = nblocks[i];
/*
* Surprisingly, ALTER SYSTEM meets all our definitions of
* read-only: it changes nothing that affects the output of
- * pg_dump, it doesn't write WAL or imperil the application
- * of future WAL, and it doesn't depend on any state that needs
+ * pg_dump, it doesn't write WAL or imperil the application of
+ * future WAL, and it doesn't depend on any state that needs
* to be synchronized with parallel workers.
*
* So, despite the fact that it writes to a file, it's read
case T_VariableSetStmt:
{
/*
- * These modify only backend-local state, so they're OK to
- * run in a read-only transaction or on a standby. However,
- * they are disallowed in parallel mode, because they either
- * rely upon or modify backend-local state that might not be
+ * These modify only backend-local state, so they're OK to run
+ * in a read-only transaction or on a standby. However, they
+ * are disallowed in parallel mode, because they either rely
+ * upon or modify backend-local state that might not be
* synchronized among cooperating backends.
*/
return COMMAND_OK_IN_RECOVERY | COMMAND_OK_IN_READ_ONLY_TXN;
case T_VacuumStmt:
{
/*
- * These commands write WAL, so they're not strictly read-only,
- * and running them in parallel workers isn't supported.
+ * These commands write WAL, so they're not strictly
+ * read-only, and running them in parallel workers isn't
+ * supported.
*
* However, they don't change the database state in a way that
* would affect pg_dump output, so it's fine to run them in a
case T_CopyStmt:
{
- CopyStmt *stmt = (CopyStmt *) parsetree;
+ CopyStmt *stmt = (CopyStmt *) parsetree;
/*
- * You might think that COPY FROM is not at all read only,
- * but it's OK to copy into a temporary table, because that
+ * You might think that COPY FROM is not at all read only, but
+ * it's OK to copy into a temporary table, because that
* wouldn't change the output of pg_dump. If the target table
* turns out to be non-temporary, DoCopy itself will call
* PreventCommandIfReadOnly.
case T_VariableShowStmt:
{
/*
- * These commands don't modify any data and are safe to run
- * in a parallel worker.
+ * These commands don't modify any data and are safe to run in
+ * a parallel worker.
*/
return COMMAND_IS_STRICTLY_READ_ONLY;
}
{
/*
* NOTIFY requires an XID assignment, so it can't be permitted
- * on a standby. Perhaps LISTEN could, since without NOTIFY
- * it would be OK to just do nothing, at least until promotion,
+ * on a standby. Perhaps LISTEN could, since without NOTIFY it
+ * would be OK to just do nothing, at least until promotion,
* but we currently prohibit it lest the user get the wrong
* idea.
*
case T_LockStmt:
{
- LockStmt *stmt = (LockStmt *) parsetree;
+ LockStmt *stmt = (LockStmt *) parsetree;
/*
* Only weaker locker modes are allowed during recovery. The
- * restrictions here must match those in LockAcquireExtended().
+ * restrictions here must match those in
+ * LockAcquireExtended().
*/
if (stmt->mode > RowExclusiveLock)
return COMMAND_OK_IN_READ_ONLY_TXN;
TransactionStmt *stmt = (TransactionStmt *) parsetree;
/*
- * PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all
- * write WAL, so they're not read-only in the strict sense;
- * but the first and third do not change pg_dump output, so
- * they're OK in a read-only transactions.
+ * PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all write
+ * WAL, so they're not read-only in the strict sense; but the
+ * first and third do not change pg_dump output, so they're OK
+ * in a read-only transactions.
*
* We also consider COMMIT PREPARED to be OK in a read-only
* transaction environment, by way of exception.
case USE_XSD_DATES:
/* compatible with ISO date formats */
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = '-';
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '-';
}
*str++ = '/';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
break;
case USE_GERMAN_DATES:
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '.';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
break;
case USE_POSTGRES_DATES:
}
*str++ = '-';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
break;
}
case USE_XSD_DATES:
/* Compatible with ISO-8601 date formats */
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = '-';
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '-';
}
*str++ = '/';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = ' ';
str = pg_ultostr_zeropad(str, tm->tm_hour, 2);
*str++ = ':';
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '.';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = ' ';
str = pg_ultostr_zeropad(str, tm->tm_hour, 2);
*str++ = ':';
str = AppendTimestampSeconds(str, tm, fsec);
*str++ = ' ';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
if (print_tz)
{
static int32
int4gcd_internal(int32 arg1, int32 arg2)
{
- int32 swap;
- int32 a1, a2;
+ int32 swap;
+ int32 a1,
+ a2;
/*
* Put the greater absolute value in arg1.
Datum
int4gcd(PG_FUNCTION_ARGS)
{
- int32 arg1 = PG_GETARG_INT32(0);
- int32 arg2 = PG_GETARG_INT32(1);
- int32 result;
+ int32 arg1 = PG_GETARG_INT32(0);
+ int32 arg2 = PG_GETARG_INT32(1);
+ int32 result;
result = int4gcd_internal(arg1, arg2);
Datum
int4lcm(PG_FUNCTION_ARGS)
{
- int32 arg1 = PG_GETARG_INT32(0);
- int32 arg2 = PG_GETARG_INT32(1);
- int32 gcd;
- int32 result;
+ int32 arg1 = PG_GETARG_INT32(0);
+ int32 arg2 = PG_GETARG_INT32(1);
+ int32 gcd;
+ int32 result;
/*
* Handle lcm(x, 0) = lcm(0, x) = 0 as a special case. This prevents a
static int64
int8gcd_internal(int64 arg1, int64 arg2)
{
- int64 swap;
- int64 a1, a2;
+ int64 swap;
+ int64 a1,
+ a2;
/*
* Put the greater absolute value in arg1.
Datum
int8gcd(PG_FUNCTION_ARGS)
{
- int64 arg1 = PG_GETARG_INT64(0);
- int64 arg2 = PG_GETARG_INT64(1);
- int64 result;
+ int64 arg1 = PG_GETARG_INT64(0);
+ int64 arg2 = PG_GETARG_INT64(1);
+ int64 result;
result = int8gcd_internal(arg1, arg2);
Datum
int8lcm(PG_FUNCTION_ARGS)
{
- int64 arg1 = PG_GETARG_INT64(0);
- int64 arg2 = PG_GETARG_INT64(1);
- int64 gcd;
- int64 result;
+ int64 arg1 = PG_GETARG_INT64(0);
+ int64 arg2 = PG_GETARG_INT64(1);
+ int64 gcd;
+ int64 result;
/*
* Handle lcm(x, 0) = lcm(0, x) = 0 as a special case. This prevents a
JsonLexContext *lex;
JsonTokenType tok;
char *type;
- JsonParseErrorType result;
+ JsonParseErrorType result;
json = PG_GETARG_TEXT_PP(0);
lex = makeJsonLexContext(json, false);
void
pg_parse_json_or_ereport(JsonLexContext *lex, JsonSemAction *sem)
{
- JsonParseErrorType result;
+ JsonParseErrorType result;
result = pg_parse_json(lex, sem);
if (result != JSON_SUCCESS)
/* ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); */
/* Jsonb *newval = PG_GETARG_JSONB_P(2); */
/* bool create = PG_GETARG_BOOL(3); */
- text *handle_null;
- char *handle_val;
+ text *handle_null;
+ char *handle_val;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(3))
PG_RETURN_NULL();
errmsg("null_value_treatment must be \"delete_key\", \"return_target\", \"use_json_null\", or \"raise_exception\"")));
/* if the new value isn't an SQL NULL just call jsonb_set */
- if (! PG_ARGISNULL(2))
+ if (!PG_ARGISNULL(2))
return jsonb_set(fcinfo);
handle_null = PG_GETARG_TEXT_P(4);
handle_val = text_to_cstring(handle_null);
- if (strcmp(handle_val,"raise_exception") == 0)
+ if (strcmp(handle_val, "raise_exception") == 0)
{
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
}
else if (strcmp(handle_val, "use_json_null") == 0)
{
- Datum newval;
+ Datum newval;
newval = DirectFunctionCall1(jsonb_in, CStringGetDatum("null"));
else if (strcmp(handle_val, "return_target") == 0)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
+
PG_RETURN_JSONB_P(in);
}
else
decimalLength32(const uint32 v)
{
int t;
- static uint32 PowersOfTen[] =
- {1, 10, 100,
- 1000, 10000, 100000,
- 1000000, 10000000, 100000000,
- 1000000000};
+ static const uint32 PowersOfTen[] = {
+ 1, 10, 100,
+ 1000, 10000, 100000,
+ 1000000, 10000000, 100000000,
+ 1000000000
+ };
+
/*
* Compute base-10 logarithm by dividing the base-2 logarithm by a
* good-enough approximation of the base-2 logarithm of 10
decimalLength64(const uint64 v)
{
int t;
- static uint64 PowersOfTen[] = {
- UINT64CONST(1), UINT64CONST(10),
- UINT64CONST(100), UINT64CONST(1000),
- UINT64CONST(10000), UINT64CONST(100000),
- UINT64CONST(1000000), UINT64CONST(10000000),
- UINT64CONST(100000000), UINT64CONST(1000000000),
- UINT64CONST(10000000000), UINT64CONST(100000000000),
- UINT64CONST(1000000000000), UINT64CONST(10000000000000),
- UINT64CONST(100000000000000), UINT64CONST(1000000000000000),
- UINT64CONST(10000000000000000), UINT64CONST(100000000000000000),
+ static const uint64 PowersOfTen[] = {
+ UINT64CONST(1), UINT64CONST(10),
+ UINT64CONST(100), UINT64CONST(1000),
+ UINT64CONST(10000), UINT64CONST(100000),
+ UINT64CONST(1000000), UINT64CONST(10000000),
+ UINT64CONST(100000000), UINT64CONST(1000000000),
+ UINT64CONST(10000000000), UINT64CONST(100000000000),
+ UINT64CONST(1000000000000), UINT64CONST(10000000000000),
+ UINT64CONST(100000000000000), UINT64CONST(1000000000000000),
+ UINT64CONST(10000000000000000), UINT64CONST(100000000000000000),
UINT64CONST(1000000000000000000), UINT64CONST(10000000000000000000)
};
pg_stat_get_slru(PG_FUNCTION_ARGS)
{
#define PG_STAT_GET_SLRU_COLS 9
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- int i;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ int i;
PgStat_SLRUStats *stats;
/* check to see if caller supports us returning a tuplestore */
/* request SLRU stats from the stat collector */
stats = pgstat_fetch_slru();
- for (i = 0; ; i++)
+ for (i = 0;; i++)
{
/* for each row */
Datum values[PG_STAT_GET_SLRU_COLS];
bool nulls[PG_STAT_GET_SLRU_COLS];
- PgStat_SLRUStats stat = stats[i];
+ PgStat_SLRUStats stat = stats[i];
const char *name;
name = pgstat_slru_name(i);
const RangeType *tst);
static int bound_cmp(const void *a, const void *b, void *arg);
-static int adjacent_inner_consistent(TypeCacheEntry *typcache,
- const RangeBound *arg, const RangeBound *centroid,
- const RangeBound *prev);
-static int adjacent_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *arg,
- const RangeBound *centroid);
+static int adjacent_inner_consistent(TypeCacheEntry *typcache,
+ const RangeBound *arg, const RangeBound *centroid,
+ const RangeBound *prev);
+static int adjacent_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *arg,
+ const RangeBound *centroid);
/*
* SP-GiST 'config' interface function.
char *nspname;
/*
- * Would this collation be found by regcollationin? If not, qualify it.
+ * Would this collation be found by regcollationin? If not,
+ * qualify it.
*/
if (CollationIsVisible(collationid))
nspname = NULL;
initStringInfo(&buf);
get_opclass_name(opclass, InvalidOid, &buf);
- return &buf.data[1]; /* get_opclass_name() prepends space */
+ return &buf.data[1]; /* get_opclass_name() prepends space */
}
/*
char *value;
/*
- * Each array element should have the form name=value. If the "="
- * is missing for some reason, treat it like an empty value.
+ * Each array element should have the form name=value. If the "=" is
+ * missing for some reason, treat it like an empty value.
*/
name = option;
separator = strchr(option, '=');
/*
* In general we need to quote the value; but to avoid unnecessary
- * clutter, do not quote if it is an identifier that would not
- * need quoting. (We could also allow numbers, but that is a bit
- * trickier than it looks --- for example, are leading zeroes
- * significant? We don't want to assume very much here about what
- * custom reloptions might mean.)
+ * clutter, do not quote if it is an identifier that would not need
+ * quoting. (We could also allow numbers, but that is a bit trickier
+ * than it looks --- for example, are leading zeroes significant? We
+ * don't want to assume very much here about what custom reloptions
+ * might mean.)
*/
if (quote_identifier(value) == value)
appendStringInfoString(buf, value);
static bool
checkcondition_bit(void *checkval, QueryOperand *val, ExecPhraseData *data)
{
- void *key = (SignTSVector *) checkval;
+ void *key = (SignTSVector *) checkval;
/*
* we are not able to find a prefix in signature tree
static int
hemdist(SignTSVector *a, SignTSVector *b)
{
- int siglena = GETSIGLEN(a);
- int siglenb = GETSIGLEN(b);
+ int siglena = GETSIGLEN(a);
+ int siglenb = GETSIGLEN(b);
if (ISALLTRUE(a))
{
else
size_alpha = SIGLENBIT(siglen) -
sizebitvec((cache[j].allistrue) ?
- GETSIGN(datum_l) :
- GETSIGN(cache[j].sign),
- siglen);
+ GETSIGN(datum_l) :
+ GETSIGN(cache[j].sign),
+ siglen);
}
else
size_alpha = hemdistsign(cache[j].sign, GETSIGN(datum_l), siglen);
if (isnull)
result = (Datum) 0;
else
- result = datumCopy(attopts, false, -1); /* text[] */
+ result = datumCopy(attopts, false, -1); /* text[] */
ReleaseSysCache(tuple);
bool
get_index_isreplident(Oid index_oid)
{
- HeapTuple tuple;
- Form_pg_index rd_index;
- bool result;
+ HeapTuple tuple;
+ Form_pg_index rd_index;
+ bool result;
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid));
if (!HeapTupleIsValid(tuple))
if (relation->rd_rel->relispartition)
{
/* Add publications that the ancestors are in too. */
- List *ancestors = get_partition_ancestors(RelationGetRelid(relation));
- ListCell *lc;
+ List *ancestors = get_partition_ancestors(RelationGetRelid(relation));
+ ListCell *lc;
foreach(lc, ancestors)
{
- Oid ancestor = lfirst_oid(lc);
+ Oid ancestor = lfirst_oid(lc);
puboids = list_concat_unique_oid(puboids,
GetRelationPublications(ancestor));
* RelationGetIndexAttOptions
* get AM/opclass-specific options for an index parsed into a binary form
*/
-bytea **
+bytea **
RelationGetIndexAttOptions(Relation relation, bool copy)
{
MemoryContext oldcxt;
bytea **opts = relation->rd_opcoptions;
Oid relid = RelationGetRelid(relation);
- int natts = RelationGetNumberOfAttributes(relation); /* XXX IndexRelationGetNumberOfKeyAttributes */
+ int natts = RelationGetNumberOfAttributes(relation); /* XXX
+ * IndexRelationGetNumberOfKeyAttributes */
int i;
/* Try to copy cached options. */
p = backtrace_symbol_list;
for (;;)
{
- if (*p == '\0') /* end of backtrace_symbol_list */
+ if (*p == '\0') /* end of backtrace_symbol_list */
break;
if (strcmp(funcname, p) == 0)
int
errbacktrace(void)
{
- ErrorData *edata = &errordata[errordata_stack_depth];
+ ErrorData *edata = &errordata[errordata_stack_depth];
MemoryContext oldcontext;
recursion_depth++;
int
my_log2(long num)
{
- /* guard against too-large input, which would be invalid for pg_ceil_log2_*() */
+ /*
+ * guard against too-large input, which would be invalid for
+ * pg_ceil_log2_*()
+ */
if (num > LONG_MAX / 2)
num = LONG_MAX / 2;
ProcessingMode Mode = InitProcessing;
-BackendType MyBackendType;
+BackendType MyBackendType;
/* List of lock files to be removed at proc exit */
static List *lock_files = NIL;
else if ((*newval)[i] == ' ' ||
(*newval)[i] == '\n' ||
(*newval)[i] == '\t')
- ; /* ignore these */
+ ; /* ignore these */
else
someval[j++] = (*newval)[i]; /* copy anything else */
}
AllocSet set = (AllocSet) context;
AllocBlock block;
Size keepersize PG_USED_FOR_ASSERTS_ONLY
- = set->keeper->endptr - ((char *) set);
+ = set->keeper->endptr - ((char *) set);
AssertArg(AllocSetIsValid(set));
else
{
/* Normal case, release the block */
- context->mem_allocated -= block->endptr - ((char*) block);
+ context->mem_allocated -= block->endptr - ((char *) block);
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, block->freeptr - ((char *) block));
AllocSet set = (AllocSet) context;
AllocBlock block = set->blocks;
Size keepersize PG_USED_FOR_ASSERTS_ONLY
- = set->keeper->endptr - ((char *) set);
+ = set->keeper->endptr - ((char *) set);
AssertArg(AllocSetIsValid(set));
if (block->next)
block->next->prev = block->prev;
- context->mem_allocated -= block->endptr - ((char*) block);
+ context->mem_allocated -= block->endptr - ((char *) block);
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, block->freeptr - ((char *) block));
/* Do the realloc */
blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
- oldblksize = block->endptr - ((char *)block);
+ oldblksize = block->endptr - ((char *) block);
block = (AllocBlock) realloc(block, blksize);
if (block == NULL)
Size
MemoryContextMemAllocated(MemoryContext context, bool recurse)
{
- Size total = context->mem_allocated;
+ Size total = context->mem_allocated;
AssertArg(MemoryContextIsValid(context));
headerSize = offsetof(SlabContext, freelist) + freelistSize;
#ifdef MEMORY_CONTEXT_CHECKING
+
/*
- * With memory checking, we need to allocate extra space for the bitmap
- * of free chunks. The bitmap is an array of bools, so we don't need to
- * worry about alignment.
+ * With memory checking, we need to allocate extra space for the bitmap of
+ * free chunks. The bitmap is an array of bools, so we don't need to worry
+ * about alignment.
*/
headerSize += chunksPerBlock * sizeof(bool);
#endif
Size freeBlocksLen; /* current allocated length of freeBlocks[] */
/* The array of logical tapes. */
- int nTapes; /* # of logical tapes in set */
- LogicalTape *tapes; /* has nTapes nentries */
+ int nTapes; /* # of logical tapes in set */
+ LogicalTape *tapes; /* has nTapes nentries */
};
static void ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer);
static long
ltsGetFreeBlock(LogicalTapeSet *lts)
{
- long *heap = lts->freeBlocks;
- long blocknum;
- int heapsize;
+ long *heap = lts->freeBlocks;
+ long blocknum;
+ int heapsize;
unsigned long pos;
/* freelist empty; allocate a new block */
heapsize = lts->nFreeBlocks;
while (true)
{
- unsigned long left = left_offset(pos);
+ unsigned long left = left_offset(pos);
unsigned long right = right_offset(pos);
unsigned long min_child;
static void
ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
{
- long *heap;
+ long *heap;
unsigned long pos;
/*
while (pos != 0)
{
unsigned long parent = parent_offset(pos);
+
if (heap[parent] < heap[pos])
break;
static void
ltsInitTape(LogicalTape *lt)
{
- lt->writing = true;
- lt->frozen = false;
- lt->dirty = false;
- lt->firstBlockNumber = -1L;
- lt->curBlockNumber = -1L;
- lt->nextBlockNumber = -1L;
+ lt->writing = true;
+ lt->frozen = false;
+ lt->dirty = false;
+ lt->firstBlockNumber = -1L;
+ lt->curBlockNumber = -1L;
+ lt->nextBlockNumber = -1L;
lt->offsetBlockNumber = 0L;
- lt->buffer = NULL;
- lt->buffer_size = 0;
+ lt->buffer = NULL;
+ lt->buffer_size = 0;
/* palloc() larger than MaxAllocSize would fail */
- lt->max_size = MaxAllocSize;
- lt->pos = 0;
- lt->nbytes = 0;
+ lt->max_size = MaxAllocSize;
+ lt->pos = 0;
+ lt->nbytes = 0;
}
/*
void
LogicalTapeSetExtend(LogicalTapeSet *lts, int nAdditional)
{
- int i;
- int nTapesOrig = lts->nTapes;
+ int i;
+ int nTapesOrig = lts->nTapes;
lts->nTapes += nAdditional;
- lts->tapes = (LogicalTape *) repalloc(
- lts->tapes, lts->nTapes * sizeof(LogicalTape));
+ lts->tapes = (LogicalTape *) repalloc(lts->tapes,
+ lts->nTapes * sizeof(LogicalTape));
for (i = nTapesOrig; i < lts->nTapes; i++)
ltsInitTape(<s->tapes[i]);
bool isMaxSpaceDisk; /* true when maxSpace is value for on-disk
* space, false when it's value for in-memory
* space */
- TupSortStatus maxSpaceStatus; /* sort status when maxSpace was reached */
- MemoryContext maincontext; /* memory context for tuple sort metadata that
+ TupSortStatus maxSpaceStatus; /* sort status when maxSpace was reached */
+ MemoryContext maincontext; /* memory context for tuple sort metadata that
* persists across multiple batches */
MemoryContext sortcontext; /* memory context holding most sort data */
MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
/*
* Sort evicts data to the disk when it wasn't able to fit that data into
- * main memory. This is why we assume space used on the disk to be
- * more important for tracking resource usage than space used in memory.
- * Note that the amount of space occupied by some tupleset on the disk might
- * be less than amount of space occupied by the same tupleset in
- * memory due to more compact representation.
+ * main memory. This is why we assume space used on the disk to be more
+ * important for tracking resource usage than space used in memory. Note
+ * that the amount of space occupied by some tupleset on the disk might be
+ * less than amount of space occupied by the same tupleset in memory due
+ * to more compact representation.
*/
if ((isSpaceDisk && !state->isMaxSpaceDisk) ||
(isSpaceDisk == state->isMaxSpaceDisk && spaceUsed > state->maxSpace))
if (superuser_password)
PG_CMD_PRINTF("ALTER USER \"%s\" WITH PASSWORD E'%s';\n\n",
- username, escape_quotes(superuser_password));
+ username, escape_quotes(superuser_password));
}
/*
* that it wins if libc defines a locale named ucs_basic.
*/
PG_CMD_PRINTF("INSERT INTO pg_collation (oid, collname, collnamespace, collowner, collprovider, collisdeterministic, collencoding, collcollate, collctype)"
- "VALUES (pg_nextoid('pg_catalog.pg_collation', 'oid', 'pg_catalog.pg_collation_oid_index'), 'ucs_basic', 'pg_catalog'::regnamespace, %u, '%c', true, %d, 'C', 'C');\n\n",
- BOOTSTRAP_SUPERUSERID, COLLPROVIDER_LIBC, PG_UTF8);
+ "VALUES (pg_nextoid('pg_catalog.pg_collation', 'oid', 'pg_catalog.pg_collation_oid_index'), 'ucs_basic', 'pg_catalog'::regnamespace, %u, '%c', true, %d, 'C', 'C');\n\n",
+ BOOTSTRAP_SUPERUSERID, COLLPROVIDER_LIBC, PG_UTF8);
/* Now import all collations we can find in the operating system */
PG_CMD_PUTS("SELECT pg_import_system_collations('pg_catalog');\n\n");
free(lines);
PG_CMD_PRINTF("UPDATE information_schema.sql_implementation_info "
- " SET character_value = '%s' "
- " WHERE implementation_info_name = 'DBMS VERSION';\n\n",
- infoversion);
+ " SET character_value = '%s' "
+ " WHERE implementation_info_name = 'DBMS VERSION';\n\n",
+ infoversion);
PG_CMD_PRINTF("COPY information_schema.sql_features "
- " (feature_id, feature_name, sub_feature_id, "
- " sub_feature_name, is_supported, comments) "
- " FROM E'%s';\n\n",
- escape_quotes(features_file));
+ " (feature_id, feature_name, sub_feature_id, "
+ " sub_feature_name, is_supported, comments) "
+ " FROM E'%s';\n\n",
+ escape_quotes(features_file));
}
/*
#ifdef HAVE_LIBZ
if (compresslevel != 0)
{
- int fd = dup(fileno(stdout));
+ int fd = dup(fileno(stdout));
+
if (fd < 0)
{
pg_log_error("could not duplicate stdout: %m");
if (strcmp(basedir, "-") == 0 && manifest)
{
char header[512];
- PQExpBufferData buf;
+ PQExpBufferData buf;
initPQExpBuffer(&buf);
ReceiveBackupManifestInMemory(conn, &buf);
# Run base backup.
$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backup", '-X', 'none' ],
'pg_basebackup runs');
-ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
+ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
# Permissions on backup should be default
$node->command_ok(
[
- 'pg_basebackup', '-D', "$tempdir/backup2", '--no-manifest',
- '--waldir', "$tempdir/xlog2"
+ 'pg_basebackup', '-D',
+ "$tempdir/backup2", '--no-manifest',
+ '--waldir', "$tempdir/xlog2"
],
'separate xlog directory');
-ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
-ok(! -f "$tempdir/backup2/backup_manifest", 'manifest was suppressed');
-ok(-d "$tempdir/xlog2/", 'xlog directory was created');
+ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
+ok(!-f "$tempdir/backup2/backup_manifest", 'manifest was suppressed');
+ok(-d "$tempdir/xlog2/", 'xlog directory was created');
rmtree("$tempdir/backup2");
rmtree("$tempdir/xlog2");
}
else if (mode == PG_MODE_ENABLE)
{
- int w;
+ int w;
/* Set checksum in page header */
header->pd_checksum = csum;
/* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */
if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
(foreign_servers_include_oids.head == NULL ||
- !simple_oid_list_member(&foreign_servers_include_oids,
- tbinfo->foreign_server)))
+ !simple_oid_list_member(&foreign_servers_include_oids,
+ tbinfo->foreign_server)))
return;
/* Skip partitioned tables (data in partitions) */
if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
TableInfo *tbinfo = &tblinfo[i];
/*
- * Only regular and partitioned tables can be added to
- * publications.
+ * Only regular and partitioned tables can be added to publications.
*/
if (tbinfo->relkind != RELKIND_RELATION &&
tbinfo->relkind != RELKIND_PARTITIONED_TABLE)
{
if (dobj->depends_on_ext)
{
- char *nm;
+ char *nm;
PGresult *res;
- PQExpBuffer query;
- int ntups;
- int i_extname;
- int i;
+ PQExpBuffer query;
+ int ntups;
+ int i_extname;
+ int i;
/* dodge fmtId() non-reentrancy */
nm = pg_strdup(objname);
indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't');
indxinfo[j].indisreplident = (PQgetvalue(res, j, i_indisreplident)[0] == 't');
indxinfo[j].parentidx = atooid(PQgetvalue(res, j, i_parentidx));
- indxinfo[j].partattaches = (SimplePtrList) { NULL, NULL };
+ indxinfo[j].partattaches = (SimplePtrList)
+ {
+ NULL, NULL
+ };
contype = *(PQgetvalue(res, j, i_contype));
if (contype == 'p' || contype == 'u' || contype == 'x')
for (j = 0; j < ntups; j++)
{
- TableInfo *reftable;
+ TableInfo *reftable;
constrinfo[j].dobj.objType = DO_FK_CONSTRAINT;
constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
delq = createPQExpBuffer();
foreign = tbinfo &&
- tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
+ tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : "";
if (coninfo->contype == 'p' ||
coninfo->contype == 'u' ||
DumpComponents dump; /* bitmask of components to dump */
DumpComponents dump_contains; /* as above, but for contained objects */
bool ext_member; /* true if object is member of extension */
- bool depends_on_ext; /* true if object depends on an extension */
+ bool depends_on_ext; /* true if object depends on an extension */
DumpId *dependencies; /* dumpIds of objects this one depends on */
int nDeps; /* number of valid dependencies */
int allocDeps; /* allocated size of dependencies[] */
bool indisclustered;
bool indisreplident;
Oid parentidx; /* if partitioned, parent index OID */
- SimplePtrList partattaches; /* if partitioned, partition attach objects */
+ SimplePtrList partattaches; /* if partitioned, partition attach objects */
/* if there is an associated constraint object, its dumpId: */
DumpId indexconstraint;
'CREATE COLLATION test0 FROM "C"' => {
create_order => 76,
create_sql => 'CREATE COLLATION test0 FROM "C";',
- regexp =>
+ regexp =>
qr/CREATE COLLATION public.test0 \(provider = libc, locale = 'C'(, version = '[^']*')?\);/m,
collation => 1,
like => { %full_runs, section_pre_data => 1, },
"CREATE DATABASE dump_test2 LOCALE = 'C'" => {
create_order => 47,
- create_sql => "CREATE DATABASE dump_test2 LOCALE = 'C' TEMPLATE = template0;",
- regexp => qr/^
+ create_sql =>
+ "CREATE DATABASE dump_test2 LOCALE = 'C' TEMPLATE = template0;",
+ regexp => qr/^
\QCREATE DATABASE dump_test2 \E.*\QLOCALE = 'C';\E
/xm,
like => { pg_dumpall_dbprivs => 1, },
unlike => { exclude_dump_test_schema => 1, },
},
- # verify that a custom operator/opclass/range type is dumped in right order
+ # verify that a custom operator/opclass/range type is dumped in right order
'CREATE OPERATOR CLASS dump_test.op_class_custom' => {
create_order => 74,
create_sql => 'CREATE OPERATOR dump_test.~~ (
'ALTER STATISTICS extended_stats_options' => {
create_order => 98,
- create_sql => 'ALTER STATISTICS dump_test.test_ext_stats_opts SET STATISTICS 1000',
+ create_sql =>
+ 'ALTER STATISTICS dump_test.test_ext_stats_opts SET STATISTICS 1000',
regexp => qr/^
\QALTER STATISTICS dump_test.test_ext_stats_opts SET STATISTICS 1000;\E
/xms,
# Verify that dumping foreign data includes only foreign tables of
# matching servers
-$node->safe_psql( 'postgres', "CREATE FOREIGN DATA WRAPPER dummy");
-$node->safe_psql( 'postgres', "CREATE SERVER s0 FOREIGN DATA WRAPPER dummy");
-$node->safe_psql( 'postgres', "CREATE SERVER s1 FOREIGN DATA WRAPPER dummy");
-$node->safe_psql( 'postgres', "CREATE SERVER s2 FOREIGN DATA WRAPPER dummy");
-$node->safe_psql( 'postgres', "CREATE FOREIGN TABLE t0 (a int) SERVER s0");
-$node->safe_psql( 'postgres', "CREATE FOREIGN TABLE t1 (a int) SERVER s1");
+$node->safe_psql('postgres', "CREATE FOREIGN DATA WRAPPER dummy");
+$node->safe_psql('postgres', "CREATE SERVER s0 FOREIGN DATA WRAPPER dummy");
+$node->safe_psql('postgres', "CREATE SERVER s1 FOREIGN DATA WRAPPER dummy");
+$node->safe_psql('postgres', "CREATE SERVER s2 FOREIGN DATA WRAPPER dummy");
+$node->safe_psql('postgres', "CREATE FOREIGN TABLE t0 (a int) SERVER s0");
+$node->safe_psql('postgres', "CREATE FOREIGN TABLE t1 (a int) SERVER s1");
my ($cmd, $stdout, $stderr, $result);
command_fails_like(
- [ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ],
+ [ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ],
qr/foreign-data wrapper \"dummy\" has no handler\r?\npg_dump: error: query was:.*t0/,
"correctly fails to dump a foreign table from a dummy FDW");
command_ok(
- [ "pg_dump", '-p', $port, '-a', '--include-foreign-data=s2', 'postgres' ] ,
+ [ "pg_dump", '-p', $port, '-a', '--include-foreign-data=s2', 'postgres' ],
"dump foreign server with no tables");
#include "pg_rewind.h"
#include "port/pg_bswap.h"
-PGconn *conn = NULL;
+PGconn *conn = NULL;
/*
* Files are fetched max CHUNKSIZE bytes at a time.
struct JsonManifestParseContext;
typedef struct JsonManifestParseContext JsonManifestParseContext;
-typedef void (*json_manifest_perfile_callback)(JsonManifestParseContext *,
- char *pathname,
- size_t size, pg_checksum_type checksum_type,
- int checksum_length, uint8 *checksum_payload);
-typedef void (*json_manifest_perwalrange_callback)(JsonManifestParseContext *,
- TimeLineID tli,
- XLogRecPtr start_lsn, XLogRecPtr end_lsn);
-typedef void (*json_manifest_error_callback)(JsonManifestParseContext *,
- const char *fmt, ...) pg_attribute_printf(2, 3)
- pg_attribute_noreturn();
+typedef void (*json_manifest_perfile_callback) (JsonManifestParseContext *,
+ char *pathname,
+ size_t size, pg_checksum_type checksum_type,
+ int checksum_length, uint8 *checksum_payload);
+typedef void (*json_manifest_perwalrange_callback) (JsonManifestParseContext *,
+ TimeLineID tli,
+ XLogRecPtr start_lsn, XLogRecPtr end_lsn);
+typedef void (*json_manifest_error_callback) (JsonManifestParseContext *,
+ const char *fmt,...) pg_attribute_printf(2, 3)
+ pg_attribute_noreturn();
struct JsonManifestParseContext
{
}
/*
- * We don't verify checksums at this stage. We first finish verifying
- * that we have the expected set of files with the expected sizes, and
- * only afterwards verify the checksums. That's because computing
- * checksums may take a while, and we'd like to report more obvious
- * problems quickly.
+ * We don't verify checksums at this stage. We first finish verifying that
+ * we have the expected set of files with the expected sizes, and only
+ * afterwards verify the checksums. That's because computing checksums may
+ * take a while, and we'd like to report more obvious problems quickly.
*/
}
*/
static void
verify_file_checksum(verifier_context *context, manifest_file *m,
- char *fullpath)
+ char *fullpath)
{
pg_checksum_context checksum_ctx;
char *relpath = m->pathname;
program_version_ok('pg_verifybackup');
program_options_handling_ok('pg_verifybackup');
-command_fails_like(['pg_verifybackup'],
- qr/no backup directory specified/,
- 'target directory must be specified');
-command_fails_like(['pg_verifybackup', $tempdir],
- qr/could not open file.*\/backup_manifest\"/,
- 'pg_verifybackup requires a manifest');
-command_fails_like(['pg_verifybackup', $tempdir, $tempdir],
- qr/too many command-line arguments/,
- 'multiple target directories not allowed');
+command_fails_like(
+ ['pg_verifybackup'],
+ qr/no backup directory specified/,
+ 'target directory must be specified');
+command_fails_like(
+ [ 'pg_verifybackup', $tempdir ],
+ qr/could not open file.*\/backup_manifest\"/,
+ 'pg_verifybackup requires a manifest');
+command_fails_like(
+ [ 'pg_verifybackup', $tempdir, $tempdir ],
+ qr/too many command-line arguments/,
+ 'multiple target directories not allowed');
# create fake manifest file
open(my $fh, '>', "$tempdir/backup_manifest") || die "open: $!";
close($fh);
# but then try to use an alternate, nonexisting manifest
-command_fails_like(['pg_verifybackup', '-m', "$tempdir/not_the_manifest",
- $tempdir],
- qr/could not open file.*\/not_the_manifest\"/,
- 'pg_verifybackup respects -m flag');
+command_fails_like(
+ [ 'pg_verifybackup', '-m', "$tempdir/not_the_manifest", $tempdir ],
+ qr/could not open file.*\/not_the_manifest\"/,
+ 'pg_verifybackup respects -m flag');
for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512))
{
my $backup_path = $master->backup_dir . '/' . $algorithm;
- my @backup = ('pg_basebackup', '-D', $backup_path,
- '--manifest-checksums', $algorithm,
- '--no-sync');
+ my @backup = (
+ 'pg_basebackup', '-D', $backup_path,
+ '--manifest-checksums', $algorithm, '--no-sync');
my @verify = ('pg_verifybackup', '-e', $backup_path);
# A backup with a bogus algorithm should fail.
if ($algorithm eq 'bogus')
{
$master->command_fails(\@backup,
- "backup fails with algorithm \"$algorithm\"");
+ "backup fails with algorithm \"$algorithm\"");
next;
}
{
my $manifest = slurp_file("$backup_path/backup_manifest");
my $count_of_algorithm_in_manifest =
- (() = $manifest =~ /$algorithm/mig);
- cmp_ok($count_of_algorithm_in_manifest, '>', 100,
- "$algorithm is mentioned many times in the manifest");
+ (() = $manifest =~ /$algorithm/mig);
+ cmp_ok($count_of_algorithm_in_manifest,
+ '>', 100, "$algorithm is mentioned many times in the manifest");
}
# Make sure that it verifies OK.
$master->command_ok(\@verify,
- "verify backup with algorithm \"$algorithm\"");
+ "verify backup with algorithm \"$algorithm\"");
# Remove backup immediately to save disk space.
rmtree($backup_path);
# Include a user-defined tablespace in the hopes of detecting problems in that
# area.
-my $source_ts_path = TestLib::perl2host(TestLib::tempdir_short());
+my $source_ts_path = TestLib::perl2host(TestLib::tempdir_short());
my $source_ts_prefix = $source_ts_path;
$source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!;
my @scenario = (
{
- 'name' => 'extra_file',
+ 'name' => 'extra_file',
'mutilate' => \&mutilate_extra_file,
'fails_like' =>
- qr/extra_file.*present on disk but not in the manifest/
+ qr/extra_file.*present on disk but not in the manifest/
},
{
- 'name' => 'extra_tablespace_file',
+ 'name' => 'extra_tablespace_file',
'mutilate' => \&mutilate_extra_tablespace_file,
'fails_like' =>
- qr/extra_ts_file.*present on disk but not in the manifest/
+ qr/extra_ts_file.*present on disk but not in the manifest/
},
{
- 'name' => 'missing_file',
+ 'name' => 'missing_file',
'mutilate' => \&mutilate_missing_file,
'fails_like' =>
- qr/pg_xact\/0000.*present in the manifest but not on disk/
+ qr/pg_xact\/0000.*present in the manifest but not on disk/
},
{
- 'name' => 'missing_tablespace',
+ 'name' => 'missing_tablespace',
'mutilate' => \&mutilate_missing_tablespace,
'fails_like' =>
- qr/pg_tblspc.*present in the manifest but not on disk/
+ qr/pg_tblspc.*present in the manifest but not on disk/
},
{
- 'name' => 'append_to_file',
- 'mutilate' => \&mutilate_append_to_file,
- 'fails_like' =>
- qr/has size \d+ on disk but size \d+ in the manifest/
+ 'name' => 'append_to_file',
+ 'mutilate' => \&mutilate_append_to_file,
+ 'fails_like' => qr/has size \d+ on disk but size \d+ in the manifest/
},
{
- 'name' => 'truncate_file',
- 'mutilate' => \&mutilate_truncate_file,
- 'fails_like' =>
- qr/has size 0 on disk but size \d+ in the manifest/
+ 'name' => 'truncate_file',
+ 'mutilate' => \&mutilate_truncate_file,
+ 'fails_like' => qr/has size 0 on disk but size \d+ in the manifest/
},
{
- 'name' => 'replace_file',
- 'mutilate' => \&mutilate_replace_file,
+ 'name' => 'replace_file',
+ 'mutilate' => \&mutilate_replace_file,
'fails_like' => qr/checksum mismatch for file/
},
{
- 'name' => 'bad_manifest',
- 'mutilate' => \&mutilate_bad_manifest,
+ 'name' => 'bad_manifest',
+ 'mutilate' => \&mutilate_bad_manifest,
'fails_like' => qr/manifest checksum mismatch/
},
{
- 'name' => 'open_file_fails',
- 'mutilate' => \&mutilate_open_file_fails,
- 'fails_like' => qr/could not open file/,
+ 'name' => 'open_file_fails',
+ 'mutilate' => \&mutilate_open_file_fails,
+ 'fails_like' => qr/could not open file/,
'skip_on_windows' => 1
},
{
- 'name' => 'open_directory_fails',
- 'mutilate' => \&mutilate_open_directory_fails,
- 'cleanup' => \&cleanup_open_directory_fails,
- 'fails_like' => qr/could not open directory/,
+ 'name' => 'open_directory_fails',
+ 'mutilate' => \&mutilate_open_directory_fails,
+ 'cleanup' => \&cleanup_open_directory_fails,
+ 'fails_like' => qr/could not open directory/,
'skip_on_windows' => 1
},
{
- 'name' => 'search_directory_fails',
- 'mutilate' => \&mutilate_search_directory_fails,
- 'cleanup' => \&cleanup_search_directory_fails,
- 'fails_like' => qr/could not stat file or directory/,
+ 'name' => 'search_directory_fails',
+ 'mutilate' => \&mutilate_search_directory_fails,
+ 'cleanup' => \&cleanup_search_directory_fails,
+ 'fails_like' => qr/could not stat file or directory/,
'skip_on_windows' => 1
- }
-);
+ });
for my $scenario (@scenario)
{
my $name = $scenario->{'name'};
- SKIP:
+ SKIP:
{
skip "unix-style permissions not supported on Windows", 4
- if $scenario->{'skip_on_windows'} && $windows_os;
+ if $scenario->{'skip_on_windows'} && $windows_os;
# Take a backup and check that it verifies OK.
- my $backup_path = $master->backup_dir . '/' . $name;
+ my $backup_path = $master->backup_dir . '/' . $name;
my $backup_ts_path = TestLib::perl2host(TestLib::tempdir_short());
# The tablespace map parameter confuses Msys2, which tries to mangle
# it. Tell it not to.
# See https://www.msys2.org/wiki/Porting/#filesystem-namespaces
local $ENV{MSYS2_ARG_CONV_EXCL} = $source_ts_prefix;
- $master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync',
- '-T', "${source_ts_path}=${backup_ts_path}"],
- "base backup ok");
- command_ok(['pg_verifybackup', $backup_path ],
- "intact backup verified");
+ $master->command_ok(
+ [
+ 'pg_basebackup', '-D', $backup_path, '--no-sync',
+ '-T', "${source_ts_path}=${backup_ts_path}"
+ ],
+ "base backup ok");
+ command_ok([ 'pg_verifybackup', $backup_path ],
+ "intact backup verified");
# Mutilate the backup in some way.
$scenario->{'mutilate'}->($backup_path);
# Now check that the backup no longer verifies.
- command_fails_like(['pg_verifybackup', $backup_path ],
- $scenario->{'fails_like'},
- "corrupt backup fails verification: $name");
+ command_fails_like(
+ [ 'pg_verifybackup', $backup_path ],
+ $scenario->{'fails_like'},
+ "corrupt backup fails verification: $name");
# Run cleanup hook, if provided.
$scenario->{'cleanup'}->($backup_path)
- if exists $scenario->{'cleanup'};
+ if exists $scenario->{'cleanup'};
# Finally, use rmtree to reclaim space.
rmtree($backup_path);
sub mutilate_extra_tablespace_file
{
my ($backup_path) = @_;
- my ($tsoid) = grep { $_ ne '.' && $_ ne '..' }
- slurp_dir("$backup_path/pg_tblspc");
+ my ($tsoid) =
+ grep { $_ ne '.' && $_ ne '..' } slurp_dir("$backup_path/pg_tblspc");
my ($catvdir) = grep { $_ ne '.' && $_ ne '..' }
- slurp_dir("$backup_path/pg_tblspc/$tsoid");
+ slurp_dir("$backup_path/pg_tblspc/$tsoid");
my ($tsdboid) = grep { $_ ne '.' && $_ ne '..' }
- slurp_dir("$backup_path/pg_tblspc/$tsoid/$catvdir");
+ slurp_dir("$backup_path/pg_tblspc/$tsoid/$catvdir");
create_extra_file($backup_path,
- "pg_tblspc/$tsoid/$catvdir/$tsdboid/extra_ts_file");
+ "pg_tblspc/$tsoid/$catvdir/$tsdboid/extra_ts_file");
return;
}
sub mutilate_missing_tablespace
{
my ($backup_path) = @_;
- my ($tsoid) = grep { $_ ne '.' && $_ ne '..' }
- slurp_dir("$backup_path/pg_tblspc");
+ my ($tsoid) =
+ grep { $_ ne '.' && $_ ne '..' } slurp_dir("$backup_path/pg_tblspc");
my $pathname = "$backup_path/pg_tblspc/$tsoid";
if ($windows_os)
{
sub mutilate_replace_file
{
my ($backup_path) = @_;
- my $pathname = "$backup_path/PG_VERSION";
- my $contents = slurp_file($pathname);
+ my $pathname = "$backup_path/PG_VERSION";
+ my $contents = slurp_file($pathname);
open(my $fh, '>', $pathname) || die "open $pathname: $!";
print $fh 'q' x length($contents);
close($fh);
}
# rmtree can't cope with a mode 400 directory, so change back to 700.
-sub cleanup_search_directory_fails
+sub cleanup_search_directory_fails
{
my ($backup_path) = @_;
my $pathname = "$backup_path/base";
$master->init(allows_streaming => 1);
$master->start;
my $backup_path = $master->backup_dir . '/test_options';
-$master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync' ],
- "base backup ok");
+$master->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync' ],
+ "base backup ok");
# Verify that pg_verifybackup -q succeeds and produces no output.
my $stdout;
my $stderr;
-my $result = IPC::Run::run ['pg_verifybackup', '-q', $backup_path ],
- '>', \$stdout, '2>', \$stderr;
+my $result = IPC::Run::run [ 'pg_verifybackup', '-q', $backup_path ],
+ '>', \$stdout, '2>', \$stderr;
ok($result, "-q succeeds: exit code 0");
is($stdout, '', "-q succeeds: no stdout");
is($stderr, '', "-q succeeds: no stderr");
close($fh);
# Verify that pg_verifybackup -q now fails.
-command_fails_like(['pg_verifybackup', '-q', $backup_path ],
- qr/checksum mismatch for file \"PG_VERSION\"/,
- '-q checksum mismatch');
+command_fails_like(
+ [ 'pg_verifybackup', '-q', $backup_path ],
+ qr/checksum mismatch for file \"PG_VERSION\"/,
+ '-q checksum mismatch');
# Since we didn't change the length of the file, verification should succeed
# if we ignore checksums. Check that we get the right message, too.
-command_like(['pg_verifybackup', '-s', $backup_path ],
- qr/backup successfully verified/,
- '-s skips checksumming');
+command_like(
+ [ 'pg_verifybackup', '-s', $backup_path ],
+ qr/backup successfully verified/,
+ '-s skips checksumming');
# Validation should succeed if we ignore the problem file.
-command_like(['pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
- qr/backup successfully verified/,
- '-i ignores problem file');
+command_like(
+ [ 'pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
+ qr/backup successfully verified/,
+ '-i ignores problem file');
# PG_VERSION is already corrupt; let's try also removing all of pg_xact.
rmtree($backup_path . "/pg_xact");
# We're ignoring the problem with PG_VERSION, but not the problem with
# pg_xact, so verification should fail here.
-command_fails_like(['pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
- qr/pg_xact.*is present in the manifest but not on disk/,
- '-i does not ignore all problems');
+command_fails_like(
+ [ 'pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
+ qr/pg_xact.*is present in the manifest but not on disk/,
+ '-i does not ignore all problems');
# If we use -i twice, we should be able to ignore all of the problems.
-command_like(['pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact',
- $backup_path ],
- qr/backup successfully verified/,
- 'multiple -i options work');
+command_like(
+ [ 'pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact', $backup_path ],
+ qr/backup successfully verified/,
+ 'multiple -i options work');
# Verify that when -i is not used, both problems are reported.
-$result = IPC::Run::run ['pg_verifybackup', $backup_path ],
- '>', \$stdout, '2>', \$stderr;
+$result = IPC::Run::run [ 'pg_verifybackup', $backup_path ],
+ '>', \$stdout, '2>', \$stderr;
ok(!$result, "multiple problems: fails");
-like($stderr, qr/pg_xact.*is present in the manifest but not on disk/,
- "multiple problems: missing files reported");
-like($stderr, qr/checksum mismatch for file \"PG_VERSION\"/,
- "multiple problems: checksum mismatch reported");
+like(
+ $stderr,
+ qr/pg_xact.*is present in the manifest but not on disk/,
+ "multiple problems: missing files reported");
+like(
+ $stderr,
+ qr/checksum mismatch for file \"PG_VERSION\"/,
+ "multiple problems: checksum mismatch reported");
# Verify that when -e is used, only the problem detected first is reported.
-$result = IPC::Run::run ['pg_verifybackup', '-e', $backup_path ],
- '>', \$stdout, '2>', \$stderr;
+$result = IPC::Run::run [ 'pg_verifybackup', '-e', $backup_path ],
+ '>', \$stdout, '2>', \$stderr;
ok(!$result, "-e reports 1 error: fails");
-like($stderr, qr/pg_xact.*is present in the manifest but not on disk/,
- "-e reports 1 error: missing files reported");
-unlike($stderr, qr/checksum mismatch for file \"PG_VERSION\"/,
- "-e reports 1 error: checksum mismatch not reported");
+like(
+ $stderr,
+ qr/pg_xact.*is present in the manifest but not on disk/,
+ "-e reports 1 error: missing files reported");
+unlike(
+ $stderr,
+ qr/checksum mismatch for file \"PG_VERSION\"/,
+ "-e reports 1 error: checksum mismatch not reported");
# Test valid manifest with nonexistent backup directory.
-command_fails_like(['pg_verifybackup', '-m', "$backup_path/backup_manifest",
- "$backup_path/fake" ],
- qr/could not open directory/,
- 'nonexistent backup directory');
+command_fails_like(
+ [
+ 'pg_verifybackup', '-m',
+ "$backup_path/backup_manifest", "$backup_path/fake"
+ ],
+ qr/could not open directory/,
+ 'nonexistent backup directory');
my $tempdir = TestLib::tempdir;
-test_bad_manifest('input string ended unexpectedly',
- qr/could not parse backup manifest: The input string ended unexpectedly/,
- <
+test_bad_manifest(
+ 'input string ended unexpectedly',
+ qr/could not parse backup manifest: The input string ended unexpectedly/,
+ <
{
EOM
EOM
chomp($manifest_without_newline);
test_parse_error('last line not newline-terminated',
- $manifest_without_newline);
+ $manifest_without_newline);
test_fatal_error('invalid manifest checksum', <
{"PostgreSQL-Backup-Manifest-Version": 1, "Files": [],
my ($test_name, $manifest_contents) = @_;
test_bad_manifest($test_name,
- qr/could not parse backup manifest: $test_name/,
- $manifest_contents);
+ qr/could not parse backup manifest: $test_name/,
+ $manifest_contents);
return;
}
{
my ($test_name, $manifest_contents) = @_;
- test_bad_manifest($test_name,
- qr/fatal: $test_name/,
- $manifest_contents);
+ test_bad_manifest($test_name, qr/fatal: $test_name/, $manifest_contents);
return;
}
print $fh $manifest_contents;
close($fh);
- command_fails_like(['pg_verifybackup', $tempdir], $regexp,
- $test_name);
+ command_fails_like([ 'pg_verifybackup', $tempdir ], $regexp, $test_name);
return;
}
$master->init(allows_streaming => 1);
$master->start;
my $backup_path = $master->backup_dir . '/test_encoding';
-$master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync',
- '--manifest-force-encode' ],
- "backup ok with forced hex encoding");
+$master->command_ok(
+ [
+ 'pg_basebackup', '-D',
+ $backup_path, '--no-sync',
+ '--manifest-force-encode'
+ ],
+ "backup ok with forced hex encoding");
my $manifest = slurp_file("$backup_path/backup_manifest");
-my $count_of_encoded_path_in_manifest =
- (() = $manifest =~ /Encoded-Path/mig);
-cmp_ok($count_of_encoded_path_in_manifest, '>', 100,
- "many paths are encoded in the manifest");
+my $count_of_encoded_path_in_manifest = (() = $manifest =~ /Encoded-Path/mig);
+cmp_ok($count_of_encoded_path_in_manifest,
+ '>', 100, "many paths are encoded in the manifest");
-command_like(['pg_verifybackup', '-s', $backup_path ],
- qr/backup successfully verified/,
- 'backup with forced encoding verified');
+command_like(
+ [ 'pg_verifybackup', '-s', $backup_path ],
+ qr/backup successfully verified/,
+ 'backup with forced encoding verified');
$master->init(allows_streaming => 1);
$master->start;
my $backup_path = $master->backup_dir . '/test_wal';
-$master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync' ],
- "base backup ok");
+$master->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync' ],
+ "base backup ok");
# Rename pg_wal.
-my $original_pg_wal = $backup_path . '/pg_wal';
+my $original_pg_wal = $backup_path . '/pg_wal';
my $relocated_pg_wal = $master->backup_dir . '/relocated_pg_wal';
rename($original_pg_wal, $relocated_pg_wal) || die "rename pg_wal: $!";
# WAL verification should fail.
-command_fails_like(['pg_verifybackup', $backup_path ],
- qr/WAL parsing failed for timeline 1/,
- 'missing pg_wal causes failure');
+command_fails_like(
+ [ 'pg_verifybackup', $backup_path ],
+ qr/WAL parsing failed for timeline 1/,
+ 'missing pg_wal causes failure');
# Should work if we skip WAL verification.
-command_ok(['pg_verifybackup', '-n', $backup_path ],
- 'missing pg_wal OK if not verifying WAL');
+command_ok(
+ [ 'pg_verifybackup', '-n', $backup_path ],
+ 'missing pg_wal OK if not verifying WAL');
# Should also work if we specify the correct WAL location.
-command_ok(['pg_verifybackup', '-w', $relocated_pg_wal, $backup_path ],
- '-w can be used to specify WAL directory');
+command_ok([ 'pg_verifybackup', '-w', $relocated_pg_wal, $backup_path ],
+ '-w can be used to specify WAL directory');
# Move directory back to original location.
rename($relocated_pg_wal, $original_pg_wal) || die "rename pg_wal back: $!";
# Replace the contents of one of the files with garbage of equal length.
my $wal_corruption_target = $original_pg_wal . '/' . $walfiles[0];
-my $wal_size = -s $wal_corruption_target;
+my $wal_size = -s $wal_corruption_target;
open(my $fh, '>', $wal_corruption_target)
- || die "open $wal_corruption_target: $!";
+ || die "open $wal_corruption_target: $!";
print $fh 'w' x $wal_size;
close($fh);
# WAL verification should fail.
-command_fails_like(['pg_verifybackup', $backup_path ],
- qr/WAL parsing failed for timeline 1/,
- 'corrupt WAL file causes failure');
+command_fails_like(
+ [ 'pg_verifybackup', $backup_path ],
+ qr/WAL parsing failed for timeline 1/,
+ 'corrupt WAL file causes failure');
PART_NONE, /* no partitioning */
PART_RANGE, /* range partitioning */
PART_HASH /* hash partitioning */
-} partition_method_t;
+} partition_method_t;
static partition_method_t partition_method = PART_NONE;
static const char *PARTITION_METHOD[] = {"none", "range", "hash"};
if (unlikely(__pg_log_level <= PG_LOG_DEBUG))
{
- PQExpBufferData buf;
+ PQExpBufferData buf;
initPQExpBuffer(&buf);
snprintf(sql, sizeof(sql),
"insert into pgbench_accounts(aid,bid,abalance,filler) "
"select aid, (aid - 1) / %d + 1, 0, '' "
- "from generate_series(1, "INT64_FORMAT") as aid",
+ "from generate_series(1, " INT64_FORMAT ") as aid",
naccounts, (int64) naccounts * scale);
executeStatement(con, sql);
{
fprintf(stderr, "%s\n", line);
if (column >= 0)
- fprintf(stderr, "%*c error found here\n", column+1, '^');
+ fprintf(stderr, "%*c error found here\n", column + 1, '^');
}
exit(1);
# the next commands will issue a syntax error if the path contains a "'"
$node->safe_psql('postgres',
- "CREATE TABLESPACE regress_pgbench_tap_1_ts LOCATION '$ets';"
-);
+ "CREATE TABLESPACE regress_pgbench_tap_1_ts LOCATION '$ets';");
# Test concurrent OID generation via pg_enum_oid_index. This indirectly
# exercises LWLock and spinlock concurrency.
'-i', 0,
[qr{^$}],
[
- qr{creating tables}, qr{vacuuming},
- qr{creating primary keys}, qr{done in \d+\.\d\d s }
+ qr{creating tables},
+ qr{vacuuming},
+ qr{creating primary keys},
+ qr{done in \d+\.\d\d s }
],
'pgbench scale 1 initialization',);
qr{vacuuming},
qr{creating primary keys},
qr{creating foreign keys},
- qr{(?!vacuuming)}, # no vacuum
+ qr{(?!vacuuming)}, # no vacuum
qr{done in \d+\.\d\d s }
],
'pgbench scale 1 initialization');
qr{creating primary keys},
qr{generating data \(server-side\)},
qr{creating foreign keys},
- qr{(?!vacuuming)}, # no vacuum
+ qr{(?!vacuuming)}, # no vacuum
qr{done in \d+\.\d\d s }
],
'pgbench --init-steps');
# 1. Logging neither with errors nor with statements
$node->append_conf('postgresql.conf',
- "log_min_duration_statement = 0\n" .
- "log_parameter_max_length = 0\n" .
- "log_parameter_max_length_on_error = 0");
+ "log_min_duration_statement = 0\n"
+ . "log_parameter_max_length = 0\n"
+ . "log_parameter_max_length_on_error = 0");
$node->reload;
pgbench(
- '-n -t1 -c1 -M prepared',
- 2,
- [],
- [
+ '-n -t1 -c1 -M prepared',
+ 2,
+ [],
+ [
qr{ERROR: invalid input syntax for type json},
qr{(?!extended query with parameters)}
- ],
- 'server parameter logging',
- {
- '001_param_1' => q[select '{ invalid ' as value \gset
+ ],
+ 'server parameter logging',
+ {
+ '001_param_1' => q[select '{ invalid ' as value \gset
select $$'Valame Dios!' dijo Sancho; 'no le dije yo a vuestra merced que mirase bien lo que hacia?'$$ as long \gset
select column1::jsonb from (values (:value), (:long)) as q;
]
});
my $log = TestLib::slurp_file($node->logfile);
-unlike($log, qr[DETAIL: parameters: \$1 = '\{ invalid ',], "no parameters logged");
+unlike(
+ $log,
+ qr[DETAIL: parameters: \$1 = '\{ invalid ',],
+ "no parameters logged");
$log = undef;
# 2. Logging truncated parameters on error, full with statements
$node->append_conf('postgresql.conf',
- "log_parameter_max_length = -1\n" .
- "log_parameter_max_length_on_error = 64");
+ "log_parameter_max_length = -1\n"
+ . "log_parameter_max_length_on_error = 64");
$node->reload;
pgbench(
- '-n -t1 -c1 -M prepared',
- 2,
- [],
- [
+ '-n -t1 -c1 -M prepared',
+ 2,
+ [],
+ [
qr{ERROR: division by zero},
qr{CONTEXT: extended query with parameters: \$1 = '1', \$2 = NULL}
- ],
- 'server parameter logging',
- {
- '001_param_2' => q{select '1' as one \gset
+ ],
+ 'server parameter logging',
+ {
+ '001_param_2' => q{select '1' as one \gset
SELECT 1 / (random() / 2)::int, :one::int, :two::int;
}
});
pgbench(
- '-n -t1 -c1 -M prepared',
- 2,
- [],
- [
+ '-n -t1 -c1 -M prepared',
+ 2,
+ [],
+ [
qr{ERROR: invalid input syntax for type json},
qr[CONTEXT: JSON data, line 1: \{ invalid\.\.\.[\r\n]+extended query with parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que \.\.\.']m
- ],
- 'server parameter logging',
- {
- '001_param_3' => q[select '{ invalid ' as value \gset
+ ],
+ 'server parameter logging',
+ {
+ '001_param_3' => q[select '{ invalid ' as value \gset
select $$'Valame Dios!' dijo Sancho; 'no le dije yo a vuestra merced que mirase bien lo que hacia?'$$ as long \gset
select column1::jsonb from (values (:value), (:long)) as q;
]
});
$log = TestLib::slurp_file($node->logfile);
-like($log, qr[DETAIL: parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?'''],
- "parameter report does not truncate");
+like(
+ $log,
+ qr[DETAIL: parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?'''],
+ "parameter report does not truncate");
$log = undef;
# 3. Logging full parameters on error, truncated with statements
$node->append_conf('postgresql.conf',
- "log_min_duration_statement = -1\n" .
- "log_parameter_max_length = 7\n" .
- "log_parameter_max_length_on_error = -1");
+ "log_min_duration_statement = -1\n"
+ . "log_parameter_max_length = 7\n"
+ . "log_parameter_max_length_on_error = -1");
$node->reload;
pgbench(
- '-n -t1 -c1 -M prepared',
- 2,
- [],
- [
+ '-n -t1 -c1 -M prepared',
+ 2,
+ [],
+ [
qr{ERROR: division by zero},
qr{CONTEXT: extended query with parameters: \$1 = '1', \$2 = NULL}
- ],
- 'server parameter logging',
- {
- '001_param_4' => q{select '1' as one \gset
+ ],
+ 'server parameter logging',
+ {
+ '001_param_4' => q{select '1' as one \gset
SELECT 1 / (random() / 2)::int, :one::int, :two::int;
}
});
$node->append_conf('postgresql.conf', "log_min_duration_statement = 0");
$node->reload;
pgbench(
- '-n -t1 -c1 -M prepared',
- 2,
- [],
- [
+ '-n -t1 -c1 -M prepared',
+ 2,
+ [],
+ [
qr{ERROR: invalid input syntax for type json},
qr[CONTEXT: JSON data, line 1: \{ invalid\.\.\.[\r\n]+extended query with parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?']m
- ],
- 'server parameter logging',
- {
- '001_param_5' => q[select '{ invalid ' as value \gset
+ ],
+ 'server parameter logging',
+ {
+ '001_param_5' => q[select '{ invalid ' as value \gset
select $$'Valame Dios!' dijo Sancho; 'no le dije yo a vuestra merced que mirase bien lo que hacia?'$$ as long \gset
select column1::jsonb from (values (:value), (:long)) as q;
]
});
$log = TestLib::slurp_file($node->logfile);
-like($log, qr[DETAIL: parameters: \$1 = '\{ inval\.\.\.', \$2 = '''Valame\.\.\.'],
- "parameter report truncates");
+like(
+ $log,
+ qr[DETAIL: parameters: \$1 = '\{ inval\.\.\.', \$2 = '''Valame\.\.\.'],
+ "parameter report truncates");
$log = undef;
# Restore default logging config
$node->append_conf('postgresql.conf',
- "log_min_duration_statement = -1\n" .
- "log_parameter_max_length_on_error = 0\n" .
- "log_parameter_max_length = -1");
+ "log_min_duration_statement = -1\n"
+ . "log_parameter_max_length_on_error = 0\n"
+ . "log_parameter_max_length = -1");
$node->reload;
# test expressions
[
'invalid init step',
'-i -I dta',
- [ qr{unrecognized initialization step}, qr{Allowed step characters are} ]
+ [
+ qr{unrecognized initialization step},
+ qr{Allowed step characters are}
+ ]
],
[
'bad random seed',
qr{error while setting random seed from --random-seed option}
]
],
- [ 'bad partition method', '-i --partition-method=BAD', [qr{"range"}, qr{"hash"}, qr{"BAD"}] ],
- [ 'bad partition number', '-i --partitions -1', [ qr{invalid number of partitions: "-1"} ] ],
+ [
+ 'bad partition method',
+ '-i --partition-method=BAD',
+ [ qr{"range"}, qr{"hash"}, qr{"BAD"} ]
+ ],
+ [
+ 'bad partition number',
+ '-i --partitions -1',
+ [qr{invalid number of partitions: "-1"}]
+ ],
[
'partition method without partitioning',
'-i --partition-method=hash',
- [ qr{partition-method requires greater than zero --partitions} ]
+ [qr{partition-method requires greater than zero --partitions}]
],
# logging sub-options
'--show-script se',
0,
[qr{^$}],
- [ qr{select-only: }, qr{SELECT abalance FROM pgbench_accounts WHERE},
- qr{(?!UPDATE)}, qr{(?!INSERT)} ],
+ [
+ qr{select-only: }, qr{SELECT abalance FROM pgbench_accounts WHERE},
+ qr{(?!UPDATE)}, qr{(?!INSERT)}
+ ],
'pgbench builtin listing');
my @script_tests = (
static bool
PrintQueryTuples(const PGresult *results)
{
- bool result = true;
+ bool result = true;
/* write output to \g argument, if any */
if (pset.gfname)
/* Visually distinguish inherited triggers */
if (!PQgetisnull(result, i, 4))
appendPQExpBuffer(&buf, ", ON TABLE %s",
- PQgetvalue(result, i, 4));
+ PQgetvalue(result, i, 4));
printTableAddFooter(&cont, buf.data);
}
bool found_q = false;
/*
- * The assistance words, help/exit/quit, must have no
- * whitespace before them, and only whitespace after, with an
- * optional semicolon. This prevents indented use of these
- * words, perhaps as identifiers, from invoking the assistance
- * behavior.
+ * The assistance words, help/exit/quit, must have no whitespace
+ * before them, and only whitespace after, with an optional
+ * semicolon. This prevents indented use of these words, perhaps
+ * as identifiers, from invoking the assistance behavior.
*/
if (pg_strncasecmp(first_word, "help", 4) == 0)
{
/* ALTER INDEX SET|RESET ( */
else if (Matches("ALTER", "INDEX", MatchAny, "RESET", "("))
COMPLETE_WITH("fillfactor",
- "vacuum_cleanup_index_scale_factor", "deduplicate_items", /* BTREE */
+ "vacuum_cleanup_index_scale_factor", "deduplicate_items", /* BTREE */
"fastupdate", "gin_pending_list_limit", /* GIN */
"buffering", /* GiST */
"pages_per_range", "autosummarize" /* BRIN */
);
else if (Matches("ALTER", "INDEX", MatchAny, "SET", "("))
COMPLETE_WITH("fillfactor =",
- "vacuum_cleanup_index_scale_factor =", "deduplicate_items =", /* BTREE */
+ "vacuum_cleanup_index_scale_factor =", "deduplicate_items =", /* BTREE */
"fastupdate =", "gin_pending_list_limit =", /* GIN */
"buffering =", /* GiST */
"pages_per_range =", "autosummarize =" /* BRIN */
while ((c = getopt_long(argc, argv, "h:p:U:g:wWedDsSrRiIlLc:PE",
long_options, &optindex)) != -1)
{
- char *endptr;
+ char *endptr;
switch (c)
{
break;
case 'c':
conn_limit = strtol(optarg, &endptr, 10);
- if (*endptr != '\0' || conn_limit < -1) /* minimum valid value */
+ if (*endptr != '\0' || conn_limit < -1) /* minimum valid value */
{
pg_log_error("invalid value for --connection-limit: %s",
optarg);
qr/statement:\ REINDEX TABLE s1.t1;/,
'parallel reindexdb for schemas does a per-table REINDEX');
$node->command_ok(
- ['reindexdb', '-j', '2', '-S', 's3'],
+ [ 'reindexdb', '-j', '2', '-S', 's3' ],
'parallel reindexdb with empty schema');
$node->command_checks_all(
[ 'reindexdb', '-j', '2', '--concurrently', '-d', 'postgres' ],
$node->command_fails(
[ 'vacuumdb', '--analyze', '--table', 'vactable(c)', 'postgres' ],
'incorrect column name with ANALYZE');
-$node->command_fails(
- [ 'vacuumdb', '-P', -1, 'postgres' ],
+$node->command_fails([ 'vacuumdb', '-P', -1, 'postgres' ],
'negative parallel degree');
$node->issues_sql_like(
[ 'vacuumdb', '--analyze', '--table', 'vactable(a, b)', 'postgres' ],
static inline JsonParseErrorType json_lex_string(JsonLexContext *lex);
static inline JsonParseErrorType json_lex_number(JsonLexContext *lex, char *s,
- bool *num_err, int *total_len);
+ bool *num_err, int *total_len);
static inline JsonParseErrorType parse_scalar(JsonLexContext *lex, JsonSemAction *sem);
static JsonParseErrorType parse_object_field(JsonLexContext *lex, JsonSemAction *sem);
static JsonParseErrorType parse_object(JsonLexContext *lex, JsonSemAction *sem);
pg_parse_json(JsonLexContext *lex, JsonSemAction *sem)
{
JsonTokenType tok;
- JsonParseErrorType result;
+ JsonParseErrorType result;
/* get the initial token */
result = json_lex(lex);
result = parse_array(lex, sem);
break;
default:
- result = parse_scalar(lex, sem); /* json can be a bare scalar */
+ result = parse_scalar(lex, sem); /* json can be a bare scalar */
}
if (result == JSON_SUCCESS)
{
JsonLexContext copylex;
int count;
- JsonParseErrorType result;
+ JsonParseErrorType result;
/*
* It's safe to do this with a shallow copy because the lexical routines
}
}
result = lex_expect(JSON_PARSE_ARRAY_NEXT, ©lex,
- JSON_TOKEN_ARRAY_END);
+ JSON_TOKEN_ARRAY_END);
if (result != JSON_SUCCESS)
return result;
{
char *s;
int len;
- JsonParseErrorType result;
+ JsonParseErrorType result;
/* Skip leading whitespace. */
s = lex->token_terminator;
static char *
extract_token(JsonLexContext *lex)
{
- int toklen = lex->token_terminator - lex->token_start;
- char *token = palloc(toklen + 1);
+ int toklen = lex->token_terminator - lex->token_start;
+ char *token = palloc(toklen + 1);
memcpy(token, lex->token_start, toklen);
token[toklen] = '\0';
int32
pglz_maximum_compressed_size(int32 rawsize, int32 total_compressed_size)
{
- int32 compressed_size;
+ int32 compressed_size;
/*
* pglz uses one control bit per byte, so we need (rawsize * 9) bits. We
*/
char *
scram_build_secret(const char *salt, int saltlen, int iterations,
- const char *password)
+ const char *password)
{
uint8 salted_password[SCRAM_KEY_LEN];
uint8 stored_key[SCRAM_KEY_LEN];
my $nfkc_utf8 = codepoint_string_to_hex($nfkc);
my $nfkd_utf8 = codepoint_string_to_hex($nfkd);
- print $OUTPUT "\t{ $linenum, { $source_utf8 }, { { $nfc_utf8 }, { $nfd_utf8 }, { $nfkc_utf8 }, { $nfkd_utf8 } } },\n";
+ print $OUTPUT
+ "\t{ $linenum, { $source_utf8 }, { { $nfc_utf8 }, { $nfd_utf8 }, { $nfkc_utf8 }, { $nfkd_utf8 } } },\n";
}
# Output terminator entry
my $prev_codepoint;
my $count = 0;
-print "/* generated by src/common/unicode/generate-unicode_combining_table.pl, do not edit */\n\n";
+print
+ "/* generated by src/common/unicode/generate-unicode_combining_table.pl, do not edit */\n\n";
print "static const struct mbinterval combining[] = {\n";
{
- chomp $line;
- my @fields = split ';', $line;
- $codepoint = hex $fields[0];
-
- next if $codepoint > 0xFFFF;
-
- if ($fields[2] eq 'Me' || $fields[2] eq 'Mn')
- {
- # combining character, save for start of range
- if (!defined($range_start))
- {
- $range_start = $codepoint;
- }
- }
- else
- {
- # not a combining character, print out previous range if any
- if (defined($range_start))
- {
- printf "\t{0x%04X, 0x%04X},\n", $range_start, $prev_codepoint;
- $range_start = undef;
- }
- }
+ chomp $line;
+ my @fields = split ';', $line;
+ $codepoint = hex $fields[0];
+
+ next if $codepoint > 0xFFFF;
+
+ if ($fields[2] eq 'Me' || $fields[2] eq 'Mn')
+ {
+ # combining character, save for start of range
+ if (!defined($range_start))
+ {
+ $range_start = $codepoint;
+ }
+ }
+ else
+ {
+ # not a combining character, print out previous range if any
+ if (defined($range_start))
+ {
+ printf "\t{0x%04X, 0x%04X},\n", $range_start, $prev_codepoint;
+ $range_start = undef;
+ }
+ }
}
continue
{
- $prev_codepoint = $codepoint;
+ $prev_codepoint = $codepoint;
}
print "};\n";
# Decomposition size
# Print size of decomposition
my $decomp_size = scalar(@decomp_elts);
- die if $decomp_size > 0x1F; # to not overrun bitmask
+ die if $decomp_size > 0x1F; # to not overrun bitmask
my $first_decomp = shift @decomp_elts;
if ($decomp_size == 2)
{
# Should this be used for recomposition?
- if ($character_hash{$first_decomp}
+ if ( $character_hash{$first_decomp}
&& $character_hash{$first_decomp}->{class} != 0)
{
$flags .= " | DECOMP_NO_COMPOSE";
my %data;
-print "/* generated by src/common/unicode/generate-unicode_normprops_table.pl, do not edit */\n\n";
+print
+ "/* generated by src/common/unicode/generate-unicode_normprops_table.pl, do not edit */\n\n";
print <
#include "common/unicode_norm.h"
$first = $last = $codepoint;
}
- foreach my $cp (hex($first)..hex($last))
+ foreach my $cp (hex($first) .. hex($last))
{
$data{$prop}{$cp} = $value;
}
next if $prop eq "NFD_QC" || $prop eq "NFKD_QC";
print "\n";
- print "static const pg_unicode_normprops UnicodeNormProps_${prop}[] = {\n";
+ print
+ "static const pg_unicode_normprops UnicodeNormProps_${prop}[] = {\n";
- my %subdata = %{$data{$prop}};
+ my %subdata = %{ $data{$prop} };
foreach my $cp (sort { $a <=> $b } keys %subdata)
{
my $qc;
/*
* Fast path for Hangul characters not stored in tables to save memory as
* decomposition is algorithmic. See
- * https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details on
- * the matter.
+ * https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details
+ * on the matter.
*/
if (code >= SBASE && code < SBASE + SCOUNT)
{
/*
* Fast path for Hangul characters not stored in tables to save memory as
* decomposition is algorithmic. See
- * https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details on
- * the matter.
+ * https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details
+ * on the matter.
*/
if (code >= SBASE && code < SBASE + SCOUNT)
{
continue;
/*
- * Per Unicode (https://www.unicode.org/reports/tr15/tr15-18.html) annex 4,
- * a sequence of two adjacent characters in a string is an
+ * Per Unicode (https://www.unicode.org/reports/tr15/tr15-18.html)
+ * annex 4, a sequence of two adjacent characters in a string is an
* exchangeable pair if the combining class (from the Unicode
* Character Database) for the first character is greater than the
* combining class for the second, and the second is not a starter. A
return decomp_chars;
/*
- * The last phase of NFC and NFKC is the recomposition of the reordered Unicode
- * string using combining classes. The recomposed string cannot be longer
- * than the decomposed one, so make the allocation of the output string
- * based on that assumption.
+ * The last phase of NFC and NFKC is the recomposition of the reordered
+ * Unicode string using combining classes. The recomposed string cannot be
+ * longer than the decomposed one, so make the allocation of the output
+ * string based on that assumption.
*/
recomp_chars = (pg_wchar *) ALLOC((decomp_size + 1) * sizeof(pg_wchar));
if (!recomp_chars)
return result;
}
-#endif /* !FRONTEND */
+#endif /* !FRONTEND */
double *tups_recently_dead);
/*
- * React to VACUUM command on the relation. The VACUUM can be
- * triggered by a user or by autovacuum. The specific actions
- * performed by the AM will depend heavily on the individual AM.
+ * React to VACUUM command on the relation. The VACUUM can be triggered by
+ * a user or by autovacuum. The specific actions performed by the AM will
+ * depend heavily on the individual AM.
*
* On entry a transaction is already established, and the relation is
* locked with a ShareUpdateExclusive lock.
* TOAST tables for this AM. If the relation_needs_toast_table callback
* always returns false, this callback is not required.
*/
- Oid (*relation_toast_am) (Relation rel);
+ Oid (*relation_toast_am) (Relation rel);
/*
* This callback is invoked when detoasting a value stored in a toast
extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf);
extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen);
extern BlockNumber visibilitymap_prepare_truncate(Relation rel,
- BlockNumber nheapblocks);
+ BlockNumber nheapblocks);
#endif /* VISIBILITYMAP_H */
PUBLICATION_PART_ROOT,
PUBLICATION_PART_LEAF,
PUBLICATION_PART_ALL,
-} PublicationPartOpt;
+} PublicationPartOpt;
extern List *GetPublicationRelations(Oid pubid, PublicationPartOpt pub_partopt);
extern List *GetAllTablesPublications(void);
Oid stxnamespace; /* OID of statistics object's namespace */
Oid stxowner; /* statistics object's owner */
- int32 stxstattarget BKI_DEFAULT(-1); /* statistics target */
+ int32 stxstattarget BKI_DEFAULT(-1); /* statistics target */
/*
* variable-length fields start here, but we allow direct access to
typedef struct xl_dbase_drop_rec
{
Oid db_id;
- int ntablespaces; /* number of tablespace IDs */
+ int ntablespaces; /* number of tablespace IDs */
Oid tablespace_ids[FLEXIBLE_ARRAY_MEMBER];
} xl_dbase_drop_rec;
#define MinSizeOfDbaseDropRec offsetof(xl_dbase_drop_rec, tablespace_ids)
extern void scram_ServerKey(const uint8 *salted_password, uint8 *result);
extern char *scram_build_secret(const char *salt, int saltlen, int iterations,
- const char *password);
+ const char *password);
#endif /* SCRAM_COMMON_H */
{
unsigned int codepoint:21;
signed int quickcheck:4; /* really UnicodeNormalizationQC */
-} pg_unicode_normprops;
+} pg_unicode_normprops;
static const pg_unicode_normprops UnicodeNormProps_NFC_QC[] = {
{0x0300, UNICODE_NORM_QC_MAYBE},
ExprState *evaltrans; /* evaluation of transition functions */
- /* cached variants of the compiled expression */
- ExprState *evaltrans_cache
- [2] /* 0: outerops; 1: TTSOpsMinimalTuple */
- [2]; /* 0: no NULL check; 1: with NULL check */
+ /*----------
+ * Cached variants of the compiled expression.
+ * first subscript: 0: outerops; 1: TTSOpsMinimalTuple
+ * second subscript: 0: no NULL check; 1: with NULL check
+ *----------
+ */
+ ExprState *evaltrans_cache[2][2];
} AggStatePerPhaseData;
/*
* This is a separate static inline function, so it can be reliably be inlined
* into its wrapper functions even if SH_SCOPE is extern.
*/
-static inline SH_ELEMENT_TYPE *
+static inline SH_ELEMENT_TYPE *
SH_INSERT_HASH_INTERNAL(SH_TYPE * tb, SH_KEY_TYPE key, uint32 hash, bool *found)
{
uint32 startelem;
SH_SCOPE SH_ELEMENT_TYPE *
SH_INSERT(SH_TYPE * tb, SH_KEY_TYPE key, bool *found)
{
- uint32 hash = SH_HASH_KEY(tb, key);
+ uint32 hash = SH_HASH_KEY(tb, key);
return SH_INSERT_HASH_INTERNAL(tb, key, hash, found);
}
* This is a separate static inline function, so it can be reliably be inlined
* into its wrapper functions even if SH_SCOPE is extern.
*/
-static inline SH_ELEMENT_TYPE *
+static inline SH_ELEMENT_TYPE *
SH_LOOKUP_HASH_INTERNAL(SH_TYPE * tb, SH_KEY_TYPE key, uint32 hash)
{
const uint32 startelem = SH_INITIAL_BUCKET(tb, hash);
SH_SCOPE SH_ELEMENT_TYPE *
SH_LOOKUP(SH_TYPE * tb, SH_KEY_TYPE key)
{
- uint32 hash = SH_HASH_KEY(tb, key);
+ uint32 hash = SH_HASH_KEY(tb, key);
return SH_LOOKUP_HASH_INTERNAL(tb, key, hash);
}
}
sh_log("size: " UINT64_FORMAT ", members: %u, filled: %f, total chain: %u, max chain: %u, avg chain: %f, total_collisions: %u, max_collisions: %i, avg_collisions: %f",
- tb->size, tb->members, fillfactor, total_chain_length, max_chain_length, avg_chain_length,
- total_collisions, max_collisions, avg_collisions);
+ tb->size, tb->members, fillfactor, total_chain_length, max_chain_length, avg_chain_length,
+ total_collisions, max_collisions, avg_collisions);
}
#endif /* SH_DEFINE */
/* init hook for SSL, the default sets the password callback if appropriate */
#ifdef USE_OPENSSL
-typedef void(* openssl_tls_init_hook_typ)(SSL_CTX *context, bool isServerStart);
+typedef void (*openssl_tls_init_hook_typ) (SSL_CTX *context, bool isServerStart);
extern PGDLLIMPORT openssl_tls_init_hook_typ openssl_tls_init_hook;
#endif
/* Routines to handle and check SCRAM-SHA-256 secret */
extern char *pg_be_scram_build_secret(const char *password);
extern bool parse_scram_secret(const char *secret, int *iterations, char **salt,
- uint8 *stored_key, uint8 *server_key);
+ uint8 *stored_key, uint8 *server_key);
extern bool scram_verify_plain_password(const char *username,
const char *password, const char *secret);
long totalDiskSpaceUsed;
long maxMemorySpaceUsed;
long totalMemorySpaceUsed;
- bits32 sortMethods; /* bitmask of TuplesortMethod */
+ bits32 sortMethods; /* bitmask of TuplesortMethod */
} IncrementalSortGroupInfo;
typedef struct IncrementalSortInfo
/* these fields are used in AGG_HASHED and AGG_MIXED modes: */
bool table_filled; /* hash table filled yet? */
int num_hashes;
- MemoryContext hash_metacxt; /* memory for hash table itself */
+ MemoryContext hash_metacxt; /* memory for hash table itself */
struct HashTapeInfo *hash_tapeinfo; /* metadata for spill tapes */
- struct HashAggSpill *hash_spills; /* HashAggSpill for each grouping set,
- exists only during first pass */
- TupleTableSlot *hash_spill_slot; /* slot for reading from spill files */
+ struct HashAggSpill *hash_spills; /* HashAggSpill for each grouping set,
+ * exists only during first pass */
+ TupleTableSlot *hash_spill_slot; /* slot for reading from spill files */
List *hash_batches; /* hash batches remaining to be processed */
bool hash_ever_spilled; /* ever spilled during this execution? */
bool hash_spill_mode; /* we hit a limit during the current batch
- and we must not create new groups */
- Size hash_mem_limit; /* limit before spilling hash table */
- uint64 hash_ngroups_limit; /* limit before spilling hash table */
- int hash_planned_partitions; /* number of partitions planned
- for first pass */
+ * and we must not create new groups */
+ Size hash_mem_limit; /* limit before spilling hash table */
+ uint64 hash_ngroups_limit; /* limit before spilling hash table */
+ int hash_planned_partitions; /* number of partitions planned
+ * for first pass */
double hashentrysize; /* estimate revised during execution */
Size hash_mem_peak; /* peak hash table memory usage */
uint64 hash_ngroups_current; /* number of groups currently in
- memory in all hash tables */
+ * memory in all hash tables */
uint64 hash_disk_used; /* kB of disk space used */
int hash_batches_used; /* batches used during entire execution */
void *paramCompileArg;
ParserSetupHook parserSetup; /* parser setup hook */
void *parserSetupArg;
- char *paramValuesStr; /* params as a single string for errors */
+ char *paramValuesStr; /* params as a single string for errors */
int numParams; /* nominal/maximum # of Params represented */
/*
/* type of argument for ParamsErrorCallback */
typedef struct ParamsErrorCbData
{
- const char *portalName;
+ const char *portalName;
ParamListInfo params;
} ParamsErrorCbData;
extern void SerializeParamList(ParamListInfo paramLI, char **start_address);
extern ParamListInfo RestoreParamList(char **start_address);
extern char *BuildParamLogString(ParamListInfo params, char **paramTextValues,
- int valueLen);
+ int valueLen);
extern void ParamsErrorCallback(void *arg);
#endif /* PARAMS_H */
/* used for partitioned relations: */
PartitionScheme part_scheme; /* Partitioning scheme */
- int nparts; /* Number of partitions; -1 if not yet set;
- * in case of a join relation 0 means it's
+ int nparts; /* Number of partitions; -1 if not yet set; in
+ * case of a join relation 0 means it's
* considered unpartitioned */
struct PartitionBoundInfoData *boundinfo; /* Partition bounds */
bool partbounds_merged; /* True if partition bounds were created
typedef struct IncrementalSortPath
{
SortPath spath;
- int nPresortedCols; /* number of presorted columns */
+ int nPresortedCols; /* number of presorted columns */
} IncrementalSortPath;
/*
typedef struct IncrementalSort
{
Sort sort;
- int nPresortedCols; /* number of presorted columns */
+ int nPresortedCols; /* number of presorted columns */
} IncrementalSort;
/* ---------------
#endif
#ifndef HAVE_LINK
-extern int link(const char *src, const char *dst);
+extern int link(const char *src, const char *dst);
#endif
#ifndef HAVE_MKDTEMP
struct sockaddr_un
{
unsigned short sun_family;
- char sun_path[108];
+ char sun_path[108];
};
#define HAVE_STRUCT_SOCKADDR_UN 1
const char *spcoid,
const char *pathname, size_t size,
pg_time_t mtime,
- pg_checksum_context * checksum_ctx);
+ pg_checksum_context *checksum_ctx);
extern void AddWALInfoToBackupManifest(backup_manifest_info *manifest,
XLogRecPtr startptr,
TimeLineID starttli, XLogRecPtr endptr,
extern LogicalRepRelMapEntry *logicalrep_rel_open(LogicalRepRelId remoteid,
LOCKMODE lockmode);
extern LogicalRepRelMapEntry *logicalrep_partition_open(LogicalRepRelMapEntry *root,
- Relation partrel, AttrMap *map);
+ Relation partrel, AttrMap *map);
extern void logicalrep_rel_close(LogicalRepRelMapEntry *rel,
LOCKMODE lockmode);
TimeLineID receiveStartTLI;
/*
- * flushedUpto-1 is the last byte position that has already been
- * received, and receivedTLI is the timeline it came from. At the first
- * startup of walreceiver, these are set to receiveStart and
- * receiveStartTLI. After that, walreceiver updates these whenever it
- * flushes the received WAL to disk.
+ * flushedUpto-1 is the last byte position that has already been received,
+ * and receivedTLI is the timeline it came from. At the first startup of
+ * walreceiver, these are set to receiveStart and receiveStartTLI. After
+ * that, walreceiver updates these whenever it flushes the received WAL to
+ * disk.
*/
XLogRecPtr flushedUpto;
TimeLineID receivedTLI;
{
int nvalues; /* number of deduplicated values */
int nbytes; /* number of bytes (serialized) */
- int nbytes_aligned; /* size of deserialized data with alignment */
+ int nbytes_aligned; /* size of deserialized data with alignment */
int typlen; /* pg_type.typlen */
bool typbyval; /* pg_type.typbyval */
} DimensionInfo;
extern void BuildRelationExtStatistics(Relation onerel, double totalrows,
int numrows, HeapTuple *rows,
int natts, VacAttrStats **vacattrstats);
-extern int ComputeExtStatisticsRows(Relation onerel,
- int natts, VacAttrStats **stats);
+extern int ComputeExtStatisticsRows(Relation onerel,
+ int natts, VacAttrStats **stats);
extern bool statext_is_kind_built(HeapTuple htup, char kind);
extern Selectivity dependencies_clauselist_selectivity(PlannerInfo *root,
List *clauses,
char key[SHMEM_INDEX_KEYSIZE]; /* string name */
void *location; /* location in shared mem */
Size size; /* # bytes requested for the structure */
- Size allocated_size; /* # bytes actually allocated */
+ Size allocated_size; /* # bytes actually allocated */
} ShmemIndexEnt;
/*
extern Oid get_range_subtype(Oid rangeOid);
extern Oid get_range_collation(Oid rangeOid);
extern Oid get_index_column_opclass(Oid index_oid, int attno);
-extern bool get_index_isreplident(Oid index_oid);
+extern bool get_index_isreplident(Oid index_oid);
extern bool get_index_isvalid(Oid index_oid);
extern bool get_index_isclustered(Oid index_oid);
extern void range_set_contain_empty(RangeType *range);
extern RangeType *make_range(TypeCacheEntry *typcache, RangeBound *lower,
RangeBound *upper, bool empty);
-extern int range_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *b1,
- const RangeBound *b2);
-extern int range_cmp_bound_values(TypeCacheEntry *typcache, const RangeBound *b1,
- const RangeBound *b2);
+extern int range_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *b1,
+ const RangeBound *b2);
+extern int range_cmp_bound_values(TypeCacheEntry *typcache, const RangeBound *b1,
+ const RangeBound *b2);
extern bool bounds_adjacent(TypeCacheEntry *typcache, RangeBound bound1,
RangeBound bound2);
extern RangeType *make_empty_range(TypeCacheEntry *typcache);
if (risnull(CSTRINGTYPE, cp))
return 0;
- str = pnstrdup(cp, len); /* decimal_in always converts the complete
- * string */
+ str = pnstrdup(cp, len); /* decimal_in always converts the complete
+ * string */
if (!str)
ret = ECPG_INFORMIX_NUM_UNDERFLOW;
else
}
else
tm->tm_isdst = -1;
-#else /* not (HAVE_STRUCT_TM_TM_ZONE || HAVE_INT_TIMEZONE) */
+#else /* not (HAVE_STRUCT_TM_TM_ZONE ||
+ * HAVE_INT_TIMEZONE) */
if (tzp != NULL)
{
/* default to UTC */
if (tzn != NULL)
*tzn = TZNAME_GLOBAL[(tm->tm_isdst > 0)];
#endif
-#else /* not (HAVE_STRUCT_TM_TM_ZONE || HAVE_INT_TIMEZONE) */
+#else /* not (HAVE_STRUCT_TM_TM_ZONE ||
+ * HAVE_INT_TIMEZONE) */
*tzp = 0;
/* Mark this as *no* time zone available */
tm->tm_isdst = -1;
}
result = scram_build_secret(saltbuf, SCRAM_DEFAULT_SALT_LEN,
- SCRAM_DEFAULT_ITERATIONS, password);
+ SCRAM_DEFAULT_ITERATIONS, password);
if (prep_password)
free(prep_password);
static PostgresPollingStatusType open_client_SSL(PGconn *);
static char *SSLerrmessage(unsigned long ecode);
static void SSLerrfree(char *buf);
-static int PQssl_passwd_cb(char *buf, int size, int rwflag, void *userdata);
+static int PQssl_passwd_cb(char *buf, int size, int rwflag, void *userdata);
static int my_sock_read(BIO *h, char *buf, int size);
static int my_sock_write(BIO *h, const char *buf, int size);
}
/*
- * Delegate the client cert password prompt to the libpq wrapper
- * callback if any is defined.
+ * Delegate the client cert password prompt to the libpq wrapper callback
+ * if any is defined.
*
* If the application hasn't installed its own and the sslpassword
- * parameter is non-null, we install ours now to make sure we
- * supply PGconn->sslpassword to OpenSSL instead of letting it
- * prompt on stdin.
+ * parameter is non-null, we install ours now to make sure we supply
+ * PGconn->sslpassword to OpenSSL instead of letting it prompt on stdin.
*
- * This will replace OpenSSL's default PEM_def_callback (which
- * prompts on stdin), but we're only setting it for this SSL
- * context so it's harmless.
+ * This will replace OpenSSL's default PEM_def_callback (which prompts on
+ * stdin), but we're only setting it for this SSL context so it's
+ * harmless.
*/
if (PQsslKeyPassHook
|| (conn->sslpassword && strlen(conn->sslpassword) > 0))
/*
* We'll try to load the file in DER (binary ASN.1) format, and if
* that fails too, report the original error. This could mask
- * issues where there's something wrong with a DER-format cert, but
- * we'd have to duplicate openssl's format detection to be smarter
- * than this. We can't just probe for a leading -----BEGIN because
- * PEM can have leading non-matching lines and blanks. OpenSSL
- * doesn't expose its get_name(...) and its PEM routines don't
- * differentiate between failure modes in enough detail to let us
- * tell the difference between "not PEM, try DER" and "wrong
- * password".
+ * issues where there's something wrong with a DER-format cert,
+ * but we'd have to duplicate openssl's format detection to be
+ * smarter than this. We can't just probe for a leading -----BEGIN
+ * because PEM can have leading non-matching lines and blanks.
+ * OpenSSL doesn't expose its get_name(...) and its PEM routines
+ * don't differentiate between failure modes in enough detail to
+ * let us tell the difference between "not PEM, try DER" and
+ * "wrong password".
*/
if (SSL_use_PrivateKey_file(conn->ssl, fnbuf, SSL_FILETYPE_ASN1) != 1)
{
if (strlen(conn->sslpassword) + 1 > size)
fprintf(stderr, libpq_gettext("WARNING: sslpassword truncated\n"));
strncpy(buf, conn->sslpassword, size);
- buf[size-1] = '\0';
+ buf[size - 1] = '\0';
return strlen(buf);
}
else
static int
PQssl_passwd_cb(char *buf, int size, int rwflag, void *userdata)
{
- PGconn *conn = userdata;
+ PGconn *conn = userdata;
if (PQsslKeyPassHook)
return PQsslKeyPassHook(buf, size, conn);
/* == in fe-secure-openssl.c === */
/* Support for overriding sslpassword handling with a callback. */
-typedef int (*PQsslKeyPassHook_type)(char *buf, int size, PGconn *conn);
+typedef int (*PQsslKeyPassHook_type) (char *buf, int size, PGconn *conn);
extern PQsslKeyPassHook_type PQgetSSLKeyPassHook(void);
extern void PQsetSSLKeyPassHook(PQsslKeyPassHook_type hook);
-extern int PQdefaultSSLKeyPassHook(char *buf, int size, PGconn *conn);
+extern int PQdefaultSSLKeyPassHook(char *buf, int size, PGconn *conn);
#ifdef __cplusplus
}
PG_FINALLY();
{
/* Restore static pointer, then clean up the prodesc refcount if any */
- /* (We're being paranoid in case an error is thrown in context deletion) */
+ /*
+ * (We're being paranoid in case an error is thrown in context
+ * deletion)
+ */
pltcl_current_call_state = save_call_state;
if (current_call_state.prodesc != NULL)
{
if (callObjc != qdesc->nargs)
{
Tcl_SetObjResult(interp,
- Tcl_NewStringObj(
- "argument list length doesn't match number of arguments for query"
- ,-1));
+ Tcl_NewStringObj("argument list length doesn't match number of arguments for query",
+ -1));
return TCL_ERROR;
}
}
memset(buf, 0, len);
}
-static void (* volatile bzero_p)(void *, size_t) = bzero2;
+static void (*volatile bzero_p) (void *, size_t) = bzero2;
void
explicit_bzero(void *buf, size_t len)
use Test::More;
if (!$use_unix_sockets)
{
- plan skip_all => "authentication tests cannot run without Unix-domain sockets";
+ plan skip_all =>
+ "authentication tests cannot run without Unix-domain sockets";
}
else
{
use Test::More;
if (!$use_unix_sockets)
{
- plan skip_all => "authentication tests cannot run without Unix-domain sockets";
+ plan skip_all =>
+ "authentication tests cannot run without Unix-domain sockets";
}
else
{
/* Kind of relation options for dummy index */
relopt_kind di_relopt_kind;
-typedef enum DummyAmEnum {
+typedef enum DummyAmEnum
+{
DUMMY_AM_ENUM_ONE,
DUMMY_AM_ENUM_TWO
-} DummyAmEnum;
+} DummyAmEnum;
/* Dummy index options */
typedef struct DummyIndexOptions
int option_int;
double option_real;
bool option_bool;
- DummyAmEnum option_enum;
+ DummyAmEnum option_enum;
int option_string_val_offset;
int option_string_null_offset;
-} DummyIndexOptions;
+} DummyIndexOptions;
relopt_enum_elt_def dummyAmEnumValues[] =
{
{"one", DUMMY_AM_ENUM_ONE},
{"two", DUMMY_AM_ENUM_TWO},
- {(const char *)NULL} /* list terminator */
+ {(const char *) NULL} /* list terminator */
};
/* Handler for index AM */
static char *ssl_passphrase = NULL;
/* callback function */
-static int rot13_passphrase(char *buf, int size, int rwflag, void *userdata);
+static int rot13_passphrase(char *buf, int size, int rwflag, void *userdata);
+
/* hook function to set the callback */
static void set_rot13(SSL_CTX *context, bool isServerStart);
+
/*
* Module load callback
*/
set_rot13(SSL_CTX *context, bool isServerStart)
{
/* warn if the user has set ssl_passphrase_command */
- if(ssl_passphrase_command[0])
+ if (ssl_passphrase_command[0])
ereport(WARNING,
(errmsg("ssl_passphrase_command setting ignored by ssl_passphrase_func module")));
$params{has_streaming} = 0 unless defined $params{has_streaming};
$params{has_restoring} = 0 unless defined $params{has_restoring};
- $params{standby} = 1 unless defined $params{standby};
+ $params{standby} = 1 unless defined $params{standby};
print
"# Initializing node \"$node_name\" from backup \"$backup_name\" of node \"$root_name\"\n";
"unix_socket_directories = '$host'");
}
$self->enable_streaming($root_node) if $params{has_streaming};
- $self->enable_restoring($root_node, $params{standby}) if $params{has_restoring};
+ $self->enable_restoring($root_node, $params{standby})
+ if $params{has_restoring};
return;
}
+
=pod
=head1 NAME
# Specifies whether to use Unix sockets for test setups. On
# Windows we don't use them by default since it's not universally
# supported, but it can be overridden if desired.
- $use_unix_sockets = (!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS});
+ $use_unix_sockets =
+ (!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS});
}
=pod
'catalog xmin of cascaded slot still null with hs_feedback reset');
note "check change primary_conninfo without restart";
-$node_standby_2->append_conf('postgresql.conf',
- "primary_slot_name = ''");
+$node_standby_2->append_conf('postgresql.conf', "primary_slot_name = ''");
$node_standby_2->enable_streaming($node_master);
$node_standby_2->reload;
$node_standby_1->stop;
my $newval = $node_master->safe_psql('postgres',
-'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val'
+ 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val'
);
$node_master->wait_for_catchup($node_standby_2, 'replay',
$node_master->lsn('insert'));
my $phys_slot = 'phys_slot';
$node_master->safe_psql('postgres',
"SELECT pg_create_physical_replication_slot('$phys_slot', true);");
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
CREATE TABLE tab_phys_slot (a int);
INSERT INTO tab_phys_slot VALUES (generate_series(1,10));");
-my $current_lsn = $node_master->safe_psql('postgres',
- "SELECT pg_current_wal_lsn();");
+my $current_lsn =
+ $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
chomp($current_lsn);
my $psql_rc = $node_master->psql('postgres',
- "SELECT pg_replication_slot_advance('$phys_slot', '$current_lsn'::pg_lsn);");
+ "SELECT pg_replication_slot_advance('$phys_slot', '$current_lsn'::pg_lsn);"
+);
is($psql_rc, '0', 'slot advancing with physical slot');
my $phys_restart_lsn_pre = $node_master->safe_psql('postgres',
- "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';");
+ "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"
+);
chomp($phys_restart_lsn_pre);
# Slot advance should persist across clean restarts.
$node_master->restart;
my $phys_restart_lsn_post = $node_master->safe_psql('postgres',
- "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';");
+ "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"
+);
chomp($phys_restart_lsn_post);
-ok(($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0,
+ok( ($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0,
"physical slot advance persists across restarts");
# Check behavior when recovery ends before target is reached
$node_standby = get_new_node('standby_8');
-$node_standby->init_from_backup($node_master, 'my_backup',
- has_restoring => 1, standby => 0);
+$node_standby->init_from_backup(
+ $node_master, 'my_backup',
+ has_restoring => 1,
+ standby => 0);
$node_standby->append_conf('postgresql.conf',
- "recovery_target_name = 'does_not_exist'");
+ "recovery_target_name = 'does_not_exist'");
-run_log(['pg_ctl', '-D', $node_standby->data_dir,
- '-l', $node_standby->logfile, 'start']);
+run_log(
+ [
+ 'pg_ctl', '-D', $node_standby->data_dir, '-l',
+ $node_standby->logfile, 'start'
+ ]);
# wait up to 180s for postgres to terminate
-foreach my $i (0..1800)
+foreach my $i (0 .. 1800)
{
- last if ! -f $node_standby->data_dir . '/postmaster.pid';
+ last if !-f $node_standby->data_dir . '/postmaster.pid';
usleep(100_000);
}
$logfile = slurp_file($node_standby->logfile());
-ok($logfile =~ qr/FATAL: recovery ended before configured recovery target was reached/,
+ok( $logfile =~
+ qr/FATAL: recovery ended before configured recovery target was reached/,
'recovery end before target reached is a fatal error');
'include-xids' => '0',
'skip-empty-xacts' => '1');
chomp($stdout_recv);
-is($stdout_recv, '',
- 'pg_recvlogical acknowledged changes');
+is($stdout_recv, '', 'pg_recvlogical acknowledged changes');
$node_master->safe_psql('postgres', 'CREATE DATABASE otherdb');
# Test logical slot advancing and its durability.
my $logical_slot = 'logical_slot';
$node_master->safe_psql('postgres',
- "SELECT pg_create_logical_replication_slot('$logical_slot', 'test_decoding', false);");
-$node_master->psql('postgres', "
+ "SELECT pg_create_logical_replication_slot('$logical_slot', 'test_decoding', false);"
+);
+$node_master->psql(
+ 'postgres', "
CREATE TABLE tab_logical_slot (a int);
INSERT INTO tab_logical_slot VALUES (generate_series(1,10));");
-my $current_lsn = $node_master->safe_psql('postgres',
- "SELECT pg_current_wal_lsn();");
+my $current_lsn =
+ $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
chomp($current_lsn);
my $psql_rc = $node_master->psql('postgres',
- "SELECT pg_replication_slot_advance('$logical_slot', '$current_lsn'::pg_lsn);");
+ "SELECT pg_replication_slot_advance('$logical_slot', '$current_lsn'::pg_lsn);"
+);
is($psql_rc, '0', 'slot advancing with logical slot');
my $logical_restart_lsn_pre = $node_master->safe_psql('postgres',
- "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';");
+ "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';"
+);
chomp($logical_restart_lsn_pre);
# Slot advance should persist across clean restarts.
$node_master->restart;
my $logical_restart_lsn_post = $node_master->safe_psql('postgres',
- "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';");
+ "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';"
+);
chomp($logical_restart_lsn_post);
ok(($logical_restart_lsn_pre cmp $logical_restart_lsn_post) == 0,
"logical slot advance persists across restarts");
# Initialize master node, setting wal-segsize to 1MB
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1, extra => ['--wal-segsize=1']);
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
min_wal_size = 2MB
max_wal_size = 4MB
log_checkpoints = yes
));
$node_master->start;
-$node_master->safe_psql('postgres', "SELECT pg_create_physical_replication_slot('rep1')");
+$node_master->safe_psql('postgres',
+ "SELECT pg_create_physical_replication_slot('rep1')");
# The slot state and remain should be null before the first connection
-my $result = $node_master->safe_psql('postgres', "SELECT restart_lsn IS NULL, wal_status is NULL, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'");
+my $result = $node_master->safe_psql('postgres',
+ "SELECT restart_lsn IS NULL, wal_status is NULL, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"
+);
is($result, "t|t|t", 'check the state of non-reserved slot is "unknown"');
# Create a standby linking to it using the replication slot
my $node_standby = get_new_node('standby_1');
-$node_standby->init_from_backup($node_master, $backup_name, has_streaming => 1);
+$node_standby->init_from_backup($node_master, $backup_name,
+ has_streaming => 1);
$node_standby->append_conf('postgresql.conf', "primary_slot_name = 'rep1'");
$node_standby->start;
$node_standby->stop;
# Preparation done, the slot is the state "normal" now
-$result = $node_master->safe_psql('postgres', "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'");
+$result = $node_master->safe_psql('postgres',
+ "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"
+);
is($result, "normal|t", 'check the catching-up state');
# Advance WAL by five segments (= 5MB) on master
$node_master->safe_psql('postgres', "CHECKPOINT;");
# The slot is always "safe" when fitting max_wal_size
-$result = $node_master->safe_psql('postgres', "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'");
+$result = $node_master->safe_psql('postgres',
+ "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"
+);
is($result, "normal|t", 'check that it is safe if WAL fits in max_wal_size');
advance_wal($node_master, 4);
$node_master->safe_psql('postgres', "CHECKPOINT;");
# The slot is always "safe" when max_slot_wal_keep_size is not set
-$result = $node_master->safe_psql('postgres', "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'");
+$result = $node_master->safe_psql('postgres',
+ "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"
+);
is($result, "normal|t", 'check that slot is working');
# The standby can reconnect to master
# Set max_slot_wal_keep_size on master
my $max_slot_wal_keep_size_mb = 6;
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
max_slot_wal_keep_size = ${max_slot_wal_keep_size_mb}MB
));
$node_master->reload;
# be as almost (max_slot_wal_keep_size - 1) times large as the segment
# size
-$result = $node_master->safe_psql('postgres', "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'");
+$result = $node_master->safe_psql('postgres',
+ "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'");
is($result, "normal", 'check that max_slot_wal_keep_size is working');
# Advance WAL again then checkpoint, reducing remain by 2 MB.
$node_master->safe_psql('postgres', "CHECKPOINT;");
# The slot is still working
-$result = $node_master->safe_psql('postgres', "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'");
-is($result, "normal", 'check that min_safe_lsn gets close to the current LSN');
+$result = $node_master->safe_psql('postgres',
+ "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'");
+is($result, "normal",
+ 'check that min_safe_lsn gets close to the current LSN');
# The standby can reconnect to master
$node_standby->start;
$node_standby->stop;
# wal_keep_segments overrides max_slot_wal_keep_size
-$result = $node_master->safe_psql('postgres', "ALTER SYSTEM SET wal_keep_segments to 8; SELECT pg_reload_conf();");
+$result = $node_master->safe_psql('postgres',
+ "ALTER SYSTEM SET wal_keep_segments to 8; SELECT pg_reload_conf();");
# Advance WAL again then checkpoint, reducing remain by 6 MB.
advance_wal($node_master, 6);
-$result = $node_master->safe_psql('postgres', "SELECT wal_status as remain FROM pg_replication_slots WHERE slot_name = 'rep1'");
-is($result, "normal", 'check that wal_keep_segments overrides max_slot_wal_keep_size');
+$result = $node_master->safe_psql('postgres',
+ "SELECT wal_status as remain FROM pg_replication_slots WHERE slot_name = 'rep1'"
+);
+is($result, "normal",
+ 'check that wal_keep_segments overrides max_slot_wal_keep_size');
# restore wal_keep_segments
-$result = $node_master->safe_psql('postgres', "ALTER SYSTEM SET wal_keep_segments to 0; SELECT pg_reload_conf();");
+$result = $node_master->safe_psql('postgres',
+ "ALTER SYSTEM SET wal_keep_segments to 0; SELECT pg_reload_conf();");
# The standby can reconnect to master
$node_standby->start;
advance_wal($node_master, 6);
# Slot gets into 'reserved' state
-$result = $node_master->safe_psql('postgres', "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'");
+$result = $node_master->safe_psql('postgres',
+ "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'");
is($result, "reserved", 'check that the slot state changes to "reserved"');
# do checkpoint so that the next checkpoint runs too early
advance_wal($node_master, 1);
# Slot gets into 'lost' state
-$result = $node_master->safe_psql('postgres', "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'");
+$result = $node_master->safe_psql('postgres',
+ "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"
+);
is($result, "lost|t", 'check that the slot state changes to "lost"');
# The standby still can connect to master before a checkpoint
$node_standby->stop;
-ok(!find_in_log($node_standby,
- "requested WAL segment [0-9A-F]+ has already been removed"),
- 'check that required WAL segments are still available');
+ok( !find_in_log(
+ $node_standby,
+ "requested WAL segment [0-9A-F]+ has already been removed"),
+ 'check that required WAL segments are still available');
# Advance WAL again, the slot loses the oldest segment.
my $logstart = get_log_size($node_master);
$node_master->safe_psql('postgres', "CHECKPOINT;");
# WARNING should be issued
-ok(find_in_log($node_master,
- "invalidating slot \"rep1\" because its restart_lsn [0-9A-F/]+ exceeds max_slot_wal_keep_size",
- $logstart),
- 'check that the warning is logged');
+ok( find_in_log(
+ $node_master,
+ "invalidating slot \"rep1\" because its restart_lsn [0-9A-F/]+ exceeds max_slot_wal_keep_size",
+ $logstart),
+ 'check that the warning is logged');
# This slot should be broken
-$result = $node_master->safe_psql('postgres', "SELECT slot_name, active, restart_lsn IS NULL, wal_status, min_safe_lsn FROM pg_replication_slots WHERE slot_name = 'rep1'");
+$result = $node_master->safe_psql('postgres',
+ "SELECT slot_name, active, restart_lsn IS NULL, wal_status, min_safe_lsn FROM pg_replication_slots WHERE slot_name = 'rep1'"
+);
is($result, "rep1|f|t||", 'check that the slot became inactive');
# The standby no longer can connect to the master
my $failed = 0;
for (my $i = 0; $i < 10000; $i++)
{
- if (find_in_log($node_standby,
- "requested WAL segment [0-9A-F]+ has already been removed",
- $logstart))
+ if (find_in_log(
+ $node_standby,
+ "requested WAL segment [0-9A-F]+ has already been removed",
+ $logstart))
{
$failed = 1;
last;
my ($node, $n) = @_;
# Advance by $n segments (= (16 * $n) MB) on master
- for (my $i = 0 ; $i < $n ; $i++)
+ for (my $i = 0; $i < $n; $i++)
{
- $node->safe_psql('postgres', "CREATE TABLE t (); DROP TABLE t; SELECT pg_switch_wal();");
+ $node->safe_psql('postgres',
+ "CREATE TABLE t (); DROP TABLE t; SELECT pg_switch_wal();");
}
return;
}
#
# This changes ssl/client.key to ssl/client_tmp.key etc for the rest
# of the tests.
-my @keys = ("client", "client-revoked", "client-der", "client-encrypted-pem", "client-encrypted-der");
+my @keys = (
+ "client", "client-revoked",
+ "client-der", "client-encrypted-pem",
+ "client-encrypted-der");
foreach my $key (@keys)
{
- copy("ssl/${key}.key", "ssl/${key}_tmp.key")
- or die "couldn't copy ssl/${key}.key to ssl/${key}_tmp.key for permissions change: $!";
- chmod 0600, "ssl/${key}_tmp.key"
- or die "failed to change permissions on ssl/${key}_tmp.key: $!";
+ copy("ssl/${key}.key", "ssl/${key}_tmp.key")
+ or die
+ "couldn't copy ssl/${key}.key to ssl/${key}_tmp.key for permissions change: $!";
+ chmod 0600, "ssl/${key}_tmp.key"
+ or die "failed to change permissions on ssl/${key}_tmp.key: $!";
}
# Also make a copy of that explicitly world-readable. We can't
# Test compatibility of SSL protocols.
# TLSv1.1 is lower than TLSv1.2, so it won't work.
-$node->append_conf('postgresql.conf',
- qq{ssl_min_protocol_version='TLSv1.2'
+$node->append_conf(
+ 'postgresql.conf',
+ qq{ssl_min_protocol_version='TLSv1.2'
ssl_max_protocol_version='TLSv1.1'});
command_fails(
[ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
'restart fails with incorrect SSL protocol bounds');
# Go back to the defaults, this works.
-$node->append_conf('postgresql.conf',
- qq{ssl_min_protocol_version='TLSv1.2'
+$node->append_conf(
+ 'postgresql.conf',
+ qq{ssl_min_protocol_version='TLSv1.2'
ssl_max_protocol_version=''});
command_ok(
[ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
test_connect_ok(
$common_connstr,
"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
- "certificate authorization succeeds with correct client cert in PEM format");
+ "certificate authorization succeeds with correct client cert in PEM format"
+);
# correct client cert in unencrypted DER
test_connect_ok(
$common_connstr,
"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-der_tmp.key",
- "certificate authorization succeeds with correct client cert in DER format");
+ "certificate authorization succeeds with correct client cert in DER format"
+);
# correct client cert in encrypted PEM
test_connect_ok(
$common_connstr,
"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-pem_tmp.key sslpassword='dUmmyP^#+'",
- "certificate authorization succeeds with correct client cert in encrypted PEM format");
+ "certificate authorization succeeds with correct client cert in encrypted PEM format"
+);
# correct client cert in encrypted DER
test_connect_ok(
$common_connstr,
"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-der_tmp.key sslpassword='dUmmyP^#+'",
- "certificate authorization succeeds with correct client cert in encrypted DER format");
+ "certificate authorization succeeds with correct client cert in encrypted DER format"
+);
# correct client cert in encrypted PEM with wrong password
test_connect_fails(
$common_connstr,
"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-pem_tmp.key sslpassword='wrong'",
qr!\Qprivate key file "ssl/client-encrypted-pem_tmp.key": bad decrypt\E!,
- "certificate authorization fails with correct client cert and wrong password in encrypted PEM format");
+ "certificate authorization fails with correct client cert and wrong password in encrypted PEM format"
+);
TODO:
{
$common_connstr,
"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-pem_tmp.key sslpassword=''",
qr!\Qprivate key file "ssl/client-encrypted-pem_tmp.key": processing error\E!,
- "certificate authorization fails with correct client cert and empty password in encrypted PEM format");
+ "certificate authorization fails with correct client cert and empty password in encrypted PEM format"
+ );
# correct client cert in encrypted PEM with no password
test_connect_fails(
$common_connstr,
"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-pem_tmp.key",
qr!\Qprivate key file "ssl/client-encrypted-pem_tmp.key": processing error\E!,
- "certificate authorization fails with correct client cert and no password in encrypted PEM format");
+ "certificate authorization fails with correct client cert and no password in encrypted PEM format"
+ );
}
# clean up
foreach my $key (@keys)
{
- unlink("ssl/${key}_tmp.key");
+ unlink("ssl/${key}_tmp.key");
}
# The trigger should cause the update to be skipped on subscriber
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
-is($result, qq(2|1|2), 'check replica update column trigger applied on subscriber');
+is($result, qq(2|1|2),
+ 'check replica update column trigger applied on subscriber');
# Update on a column not specified in the trigger, but it will trigger
# anyway because logical replication ships all columns in an update.
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(id), max(id) FROM tab_fk_ref;");
-is($result, qq(2|1|2), 'check column trigger applied on even for other column');
+is($result, qq(2|1|2),
+ 'check column trigger applied on even for other column');
$node_subscriber->stop('fast');
$node_publisher->stop('fast');
# progressing.
# (https://www.postgresql.org/message-id/flat/a9139c29-7ddd-973b-aa7f-71fed9c38d75%40minerva.info)
-$node_publisher->safe_psql('postgres',
- "CREATE TABLE test_tab2 (a int)");
+$node_publisher->safe_psql('postgres', "CREATE TABLE test_tab2 (a int)");
-$node_subscriber->safe_psql('postgres',
- "CREATE TABLE test_tab2 (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab2 (a int)");
$node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION");
$node_subscriber->safe_psql('postgres',
"ALTER TABLE test_tab2 ADD COLUMN b serial PRIMARY KEY");
-$node_publisher->safe_psql('postgres',
- "INSERT INTO test_tab2 VALUES (1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO test_tab2 VALUES (1)");
$node_publisher->wait_for_catchup('tap_sub');
-is($node_subscriber->safe_psql('postgres',
- "SELECT count(*), min(a), max(a) FROM test_tab2"),
- qq(1|1|1),
- 'check replicated inserts on subscriber');
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT count(*), min(a), max(a) FROM test_tab2"),
+ qq(1|1|1),
+ 'check replicated inserts on subscriber');
$node_subscriber->stop;
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
# publisher
-$node_publisher->safe_psql('postgres',
- "CREATE PUBLICATION pub1");
+$node_publisher->safe_psql('postgres', "CREATE PUBLICATION pub1");
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION pub_all FOR ALL TABLES");
$node_publisher->safe_psql('postgres',
# subpartitioned. This tests the tuple routing code on the
# subscriber.
$node_subscriber1->safe_psql('postgres',
- "CREATE TABLE tab1 (c text, a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+ "CREATE TABLE tab1 (c text, a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
$node_subscriber1->safe_psql('postgres',
- "CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)");
+ "CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)"
+);
$node_subscriber1->safe_psql('postgres',
"ALTER TABLE tab1 ATTACH PARTITION tab1_1 FOR VALUES IN (1, 2, 3)");
$node_subscriber1->safe_psql('postgres',
- "CREATE TABLE tab1_2 PARTITION OF tab1 (c DEFAULT 'sub1_tab1') FOR VALUES IN (4, 5, 6) PARTITION BY LIST (a)");
+ "CREATE TABLE tab1_2 PARTITION OF tab1 (c DEFAULT 'sub1_tab1') FOR VALUES IN (4, 5, 6) PARTITION BY LIST (a)"
+);
$node_subscriber1->safe_psql('postgres',
"CREATE TABLE tab1_2_1 (c text, b text, a int NOT NULL)");
$node_subscriber1->safe_psql('postgres',
$node_subscriber1->safe_psql('postgres',
"CREATE TABLE tab1_2_2 PARTITION OF tab1_2 FOR VALUES IN (4, 6)");
$node_subscriber1->safe_psql('postgres',
- "CREATE TABLE tab1_def PARTITION OF tab1 (c DEFAULT 'sub1_tab1') DEFAULT");
+ "CREATE TABLE tab1_def PARTITION OF tab1 (c DEFAULT 'sub1_tab1') DEFAULT"
+);
$node_subscriber1->safe_psql('postgres',
- "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1");
+ "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1"
+);
# subscriber 2
#
# This does not use partitioning. The tables match the leaf tables on
# the publisher.
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text)");
+ "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text)"
+);
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab1_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_1', b text)");
+ "CREATE TABLE tab1_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_1', b text)"
+);
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab1_2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_2', b text)");
+ "CREATE TABLE tab1_2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_2', b text)"
+);
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab1_def (a int PRIMARY KEY, b text, c text DEFAULT 'sub2_tab1_def')");
+ "CREATE TABLE tab1_def (a int PRIMARY KEY, b text, c text DEFAULT 'sub2_tab1_def')"
+);
$node_subscriber2->safe_psql('postgres',
- "CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr' PUBLICATION pub_all");
+ "CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr' PUBLICATION pub_all"
+);
# Wait for initial sync of all subscriptions
my $synced_query =
# Tests for replication using leaf partition identity and schema
# insert
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab1 VALUES (1)");
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab1_1 (a) VALUES (3)");
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab1_2 VALUES (5)");
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab1 VALUES (0)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1_1 (a) VALUES (3)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1_2 VALUES (5)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (0)");
$node_publisher->wait_for_catchup('sub1');
$node_publisher->wait_for_catchup('sub2');
my $result = $node_subscriber1->safe_psql('postgres',
"SELECT c, a FROM tab1 ORDER BY 1, 2");
-is($result, qq(sub1_tab1|0
+is( $result, qq(sub1_tab1|0
sub1_tab1|1
sub1_tab1|3
sub1_tab1|5), 'inserts into tab1 and its partitions replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab1_1 ORDER BY 1, 2");
-is($result, qq(sub2_tab1_1|1
+is( $result, qq(sub2_tab1_1|1
sub2_tab1_1|3), 'inserts into tab1_1 replicated');
$result = $node_subscriber2->safe_psql('postgres',
is($result, qq(sub2_tab1_def|0), 'inserts into tab1_def replicated');
# update (replicated as update)
-$node_publisher->safe_psql('postgres',
- "UPDATE tab1 SET a = 2 WHERE a = 1");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 2 WHERE a = 1");
# All of the following cause an update to be applied to a partitioned
# table on subscriber1: tab1_2 is leaf partition on publisher, whereas
# it's sub-partitioned on subscriber1.
-$node_publisher->safe_psql('postgres',
- "UPDATE tab1 SET a = 6 WHERE a = 5");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab1 SET a = 4 WHERE a = 6");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab1 SET a = 6 WHERE a = 4");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 5");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 4 WHERE a = 6");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 4");
$node_publisher->wait_for_catchup('sub1');
$node_publisher->wait_for_catchup('sub2');
$result = $node_subscriber1->safe_psql('postgres',
"SELECT c, a FROM tab1 ORDER BY 1, 2");
-is($result, qq(sub1_tab1|0
+is( $result, qq(sub1_tab1|0
sub1_tab1|2
sub1_tab1|3
sub1_tab1|6), 'update of tab1_1, tab1_2 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab1_1 ORDER BY 1, 2");
-is($result, qq(sub2_tab1_1|2
+is( $result, qq(sub2_tab1_1|2
sub2_tab1_1|3), 'update of tab1_1 replicated');
$result = $node_subscriber2->safe_psql('postgres',
is($result, qq(sub2_tab1_def|0), 'tab1_def unchanged');
# update (replicated as delete+insert)
-$node_publisher->safe_psql('postgres',
- "UPDATE tab1 SET a = 1 WHERE a = 0");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab1 SET a = 4 WHERE a = 1");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 1 WHERE a = 0");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 4 WHERE a = 1");
$node_publisher->wait_for_catchup('sub1');
$node_publisher->wait_for_catchup('sub2');
$result = $node_subscriber1->safe_psql('postgres',
"SELECT c, a FROM tab1 ORDER BY 1, 2");
-is($result, qq(sub1_tab1|2
+is( $result, qq(sub1_tab1|2
sub1_tab1|3
sub1_tab1|4
-sub1_tab1|6), 'update of tab1 (delete from tab1_def + insert into tab1_1) replicated');
+sub1_tab1|6),
+ 'update of tab1 (delete from tab1_def + insert into tab1_1) replicated');
$result = $node_subscriber1->safe_psql('postgres',
"SELECT a FROM tab1_2_2 ORDER BY 1");
-is($result, qq(4
+is( $result, qq(4
6), 'updates of tab1 (delete + insert) replicated into tab1_2_2 correctly');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab1_1 ORDER BY 1, 2");
-is($result, qq(sub2_tab1_1|2
+is( $result, qq(sub2_tab1_1|2
sub2_tab1_1|3), 'tab1_1 unchanged');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab1_2 ORDER BY 1, 2");
-is($result, qq(sub2_tab1_2|4
+is( $result, qq(sub2_tab1_2|4
sub2_tab1_2|6), 'insert into tab1_2 replicated');
$result = $node_subscriber2->safe_psql('postgres',
# delete
$node_publisher->safe_psql('postgres',
"DELETE FROM tab1 WHERE a IN (2, 3, 5)");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab1_2");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab1_2");
$node_publisher->wait_for_catchup('sub1');
$node_publisher->wait_for_catchup('sub2');
-$result = $node_subscriber1->safe_psql('postgres',
- "SELECT a FROM tab1");
+$result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1");
is($result, qq(), 'delete from tab1_1, tab1_2 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab1_1");
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1_1");
is($result, qq(), 'delete from tab1_1 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab1_2");
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1_2");
is($result, qq(), 'delete from tab1_2 replicated');
# truncate
$node_subscriber1->safe_psql('postgres',
"INSERT INTO tab1 (a) VALUES (1), (2), (5)");
-$node_subscriber2->safe_psql('postgres',
- "INSERT INTO tab1_2 (a) VALUES (2)");
-$node_publisher->safe_psql('postgres',
- "TRUNCATE tab1_2");
+$node_subscriber2->safe_psql('postgres', "INSERT INTO tab1_2 (a) VALUES (2)");
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1_2");
$node_publisher->wait_for_catchup('sub1');
$node_publisher->wait_for_catchup('sub2');
-$result = $node_subscriber1->safe_psql('postgres',
- "SELECT a FROM tab1 ORDER BY 1");
-is($result, qq(1
+$result =
+ $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1");
+is( $result, qq(1
2), 'truncate of tab1_2 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab1_2 ORDER BY 1");
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1_2 ORDER BY 1");
is($result, qq(), 'truncate of tab1_2 replicated');
-$node_publisher->safe_psql('postgres',
- "TRUNCATE tab1");
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1");
$node_publisher->wait_for_catchup('sub1');
$node_publisher->wait_for_catchup('sub2');
-$result = $node_subscriber1->safe_psql('postgres',
- "SELECT a FROM tab1 ORDER BY 1");
+$result =
+ $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1");
is($result, qq(), 'truncate of tab1_1 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab1 ORDER BY 1");
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1");
is($result, qq(), 'truncate of tab1 replicated');
# Tests for replication using root table identity and schema
# publisher
-$node_publisher->safe_psql('postgres',
- "DROP PUBLICATION pub1");
+$node_publisher->safe_psql('postgres', "DROP PUBLICATION pub1");
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab2 (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
$node_publisher->safe_psql('postgres',
# Note: tab3_1's parent is not in the publication, in which case its
# changes are published using own identity.
$node_publisher->safe_psql('postgres',
- "CREATE PUBLICATION pub_viaroot FOR TABLE tab2, tab3_1 WITH (publish_via_partition_root = true)");
+ "CREATE PUBLICATION pub_viaroot FOR TABLE tab2, tab3_1 WITH (publish_via_partition_root = true)"
+);
# subscriber 1
+$node_subscriber1->safe_psql('postgres', "DROP SUBSCRIPTION sub1");
$node_subscriber1->safe_psql('postgres',
- "DROP SUBSCRIPTION sub1");
+ "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub1_tab2', b text) PARTITION BY RANGE (a)"
+);
$node_subscriber1->safe_psql('postgres',
- "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub1_tab2', b text) PARTITION BY RANGE (a)");
-$node_subscriber1->safe_psql('postgres',
- "CREATE TABLE tab2_1 (c text DEFAULT 'sub1_tab2', b text, a int NOT NULL)");
+ "CREATE TABLE tab2_1 (c text DEFAULT 'sub1_tab2', b text, a int NOT NULL)"
+);
$node_subscriber1->safe_psql('postgres',
"ALTER TABLE tab2 ATTACH PARTITION tab2_1 FOR VALUES FROM (0) TO (10)");
$node_subscriber1->safe_psql('postgres',
- "CREATE TABLE tab3_1 (c text DEFAULT 'sub1_tab3_1', b text, a int NOT NULL PRIMARY KEY)");
+ "CREATE TABLE tab3_1 (c text DEFAULT 'sub1_tab3_1', b text, a int NOT NULL PRIMARY KEY)"
+);
$node_subscriber1->safe_psql('postgres',
- "CREATE SUBSCRIPTION sub_viaroot CONNECTION '$publisher_connstr' PUBLICATION pub_viaroot");
+ "CREATE SUBSCRIPTION sub_viaroot CONNECTION '$publisher_connstr' PUBLICATION pub_viaroot"
+);
# subscriber 2
+$node_subscriber2->safe_psql('postgres', "DROP TABLE tab1");
$node_subscriber2->safe_psql('postgres',
- "DROP TABLE tab1");
-$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text) PARTITION BY HASH (a)");
+ "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text) PARTITION BY HASH (a)"
+);
# Note: tab1's partitions are named tab1_1 and tab1_2 on the publisher.
$node_subscriber2->safe_psql('postgres',
"CREATE TABLE tab1_part1 (b text, c text, a int NOT NULL)");
$node_subscriber2->safe_psql('postgres',
- "ALTER TABLE tab1 ATTACH PARTITION tab1_part1 FOR VALUES WITH (MODULUS 2, REMAINDER 0)");
+ "ALTER TABLE tab1 ATTACH PARTITION tab1_part1 FOR VALUES WITH (MODULUS 2, REMAINDER 0)"
+);
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab1_part2 PARTITION OF tab1 FOR VALUES WITH (MODULUS 2, REMAINDER 1)");
+ "CREATE TABLE tab1_part2 PARTITION OF tab1 FOR VALUES WITH (MODULUS 2, REMAINDER 1)"
+);
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab2', b text)");
+ "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab2', b text)"
+);
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab3 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3', b text)");
+ "CREATE TABLE tab3 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3', b text)"
+);
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab3_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3_1', b text)");
+ "CREATE TABLE tab3_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3_1', b text)"
+);
# Publication that sub2 points to now publishes via root, so must update
# subscription target relations.
$node_subscriber2->safe_psql('postgres',
or die "Timed out while waiting for subscriber to synchronize data";
# insert
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab1 VALUES (1), (0)");
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab1_1 (a) VALUES (3)");
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab1_2 VALUES (5)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (0)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1_1 (a) VALUES (3)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab1_2 VALUES (5)");
$node_publisher->safe_psql('postgres',
"INSERT INTO tab2 VALUES (1), (0), (3), (5)");
$node_publisher->safe_psql('postgres',
$result = $node_subscriber1->safe_psql('postgres',
"SELECT c, a FROM tab2 ORDER BY 1, 2");
-is($result, qq(sub1_tab2|0
+is( $result, qq(sub1_tab2|0
sub1_tab2|1
sub1_tab2|3
sub1_tab2|5), 'inserts into tab2 replicated');
$result = $node_subscriber1->safe_psql('postgres',
"SELECT c, a FROM tab3_1 ORDER BY 1, 2");
-is($result, qq(sub1_tab3_1|0
+is( $result, qq(sub1_tab3_1|0
sub1_tab3_1|1
sub1_tab3_1|3
sub1_tab3_1|5), 'inserts into tab3_1 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab1 ORDER BY 1, 2");
-is($result, qq(sub2_tab1|0
+is( $result, qq(sub2_tab1|0
sub2_tab1|1
sub2_tab1|3
sub2_tab1|5), 'inserts into tab1 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab2 ORDER BY 1, 2");
-is($result, qq(sub2_tab2|0
+is( $result, qq(sub2_tab2|0
sub2_tab2|1
sub2_tab2|3
sub2_tab2|5), 'inserts into tab2 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab3 ORDER BY 1, 2");
-is($result, qq(sub2_tab3|0
+is( $result, qq(sub2_tab3|0
sub2_tab3|1
sub2_tab3|3
sub2_tab3|5), 'inserts into tab3 replicated');
# update (replicated as update)
-$node_publisher->safe_psql('postgres',
- "UPDATE tab1 SET a = 6 WHERE a = 5");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab2 SET a = 6 WHERE a = 5");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab3 SET a = 6 WHERE a = 5");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 5");
+$node_publisher->safe_psql('postgres', "UPDATE tab2 SET a = 6 WHERE a = 5");
+$node_publisher->safe_psql('postgres', "UPDATE tab3 SET a = 6 WHERE a = 5");
$node_publisher->wait_for_catchup('sub_viaroot');
$node_publisher->wait_for_catchup('sub2');
$result = $node_subscriber1->safe_psql('postgres',
"SELECT c, a FROM tab2 ORDER BY 1, 2");
-is($result, qq(sub1_tab2|0
+is( $result, qq(sub1_tab2|0
sub1_tab2|1
sub1_tab2|3
sub1_tab2|6), 'update of tab2 replicated');
$result = $node_subscriber1->safe_psql('postgres',
"SELECT c, a FROM tab3_1 ORDER BY 1, 2");
-is($result, qq(sub1_tab3_1|0
+is( $result, qq(sub1_tab3_1|0
sub1_tab3_1|1
sub1_tab3_1|3
sub1_tab3_1|6), 'update of tab3_1 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab1 ORDER BY 1, 2");
-is($result, qq(sub2_tab1|0
+is( $result, qq(sub2_tab1|0
sub2_tab1|1
sub2_tab1|3
sub2_tab1|6), 'inserts into tab1 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab2 ORDER BY 1, 2");
-is($result, qq(sub2_tab2|0
+is( $result, qq(sub2_tab2|0
sub2_tab2|1
sub2_tab2|3
sub2_tab2|6), 'inserts into tab2 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab3 ORDER BY 1, 2");
-is($result, qq(sub2_tab3|0
+is( $result, qq(sub2_tab3|0
sub2_tab3|1
sub2_tab3|3
sub2_tab3|6), 'inserts into tab3 replicated');
# update (replicated as delete+insert)
-$node_publisher->safe_psql('postgres',
- "UPDATE tab1 SET a = 2 WHERE a = 6");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab2 SET a = 2 WHERE a = 6");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab3 SET a = 2 WHERE a = 6");
+$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 2 WHERE a = 6");
+$node_publisher->safe_psql('postgres', "UPDATE tab2 SET a = 2 WHERE a = 6");
+$node_publisher->safe_psql('postgres', "UPDATE tab3 SET a = 2 WHERE a = 6");
$node_publisher->wait_for_catchup('sub_viaroot');
$node_publisher->wait_for_catchup('sub2');
$result = $node_subscriber1->safe_psql('postgres',
"SELECT c, a FROM tab2 ORDER BY 1, 2");
-is($result, qq(sub1_tab2|0
+is( $result, qq(sub1_tab2|0
sub1_tab2|1
sub1_tab2|2
sub1_tab2|3), 'update of tab2 replicated');
$result = $node_subscriber1->safe_psql('postgres',
"SELECT c, a FROM tab3_1 ORDER BY 1, 2");
-is($result, qq(sub1_tab3_1|0
+is( $result, qq(sub1_tab3_1|0
sub1_tab3_1|1
sub1_tab3_1|2
sub1_tab3_1|3), 'update of tab3_1 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab1 ORDER BY 1, 2");
-is($result, qq(sub2_tab1|0
+is( $result, qq(sub2_tab1|0
sub2_tab1|1
sub2_tab1|2
sub2_tab1|3), 'update of tab1 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab2 ORDER BY 1, 2");
-is($result, qq(sub2_tab2|0
+is( $result, qq(sub2_tab2|0
sub2_tab2|1
sub2_tab2|2
sub2_tab2|3), 'update of tab2 replicated');
$result = $node_subscriber2->safe_psql('postgres',
"SELECT c, a FROM tab3 ORDER BY 1, 2");
-is($result, qq(sub2_tab3|0
+is( $result, qq(sub2_tab3|0
sub2_tab3|1
sub2_tab3|2
sub2_tab3|3), 'update of tab3 replicated');
# delete
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab1");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab2");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab3");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab1");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab2");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab3");
$node_publisher->wait_for_catchup('sub_viaroot');
$node_publisher->wait_for_catchup('sub2');
-$result = $node_subscriber1->safe_psql('postgres',
- "SELECT a FROM tab2");
+$result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab2");
is($result, qq(), 'delete tab2 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab1");
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1");
is($result, qq(), 'delete from tab1 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab2");
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab2");
is($result, qq(), 'delete from tab2 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab3");
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3");
is($result, qq(), 'delete from tab3 replicated');
# truncate
$node_publisher->safe_psql('postgres',
"INSERT INTO tab2 VALUES (1), (2), (5)");
# these will NOT be replicated
-$node_publisher->safe_psql('postgres',
- "TRUNCATE tab1_2, tab2_1, tab3_1");
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1_2, tab2_1, tab3_1");
$node_publisher->wait_for_catchup('sub_viaroot');
$node_publisher->wait_for_catchup('sub2');
-$result = $node_subscriber1->safe_psql('postgres',
- "SELECT a FROM tab2 ORDER BY 1");
-is($result, qq(1
+$result =
+ $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab2 ORDER BY 1");
+is( $result, qq(1
2
5), 'truncate of tab2_1 NOT replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab1 ORDER BY 1");
-is($result, qq(1
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1");
+is( $result, qq(1
2
5), 'truncate of tab1_2 NOT replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab2 ORDER BY 1");
-is($result, qq(1
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab2 ORDER BY 1");
+is( $result, qq(1
2
5), 'truncate of tab2_1 NOT replicated');
-$node_publisher->safe_psql('postgres',
- "TRUNCATE tab1, tab2, tab3");
+$node_publisher->safe_psql('postgres', "TRUNCATE tab1, tab2, tab3");
$node_publisher->wait_for_catchup('sub_viaroot');
$node_publisher->wait_for_catchup('sub2');
-$result = $node_subscriber1->safe_psql('postgres',
- "SELECT a FROM tab2");
+$result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab2");
is($result, qq(), 'truncate of tab2 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab1");
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1");
is($result, qq(), 'truncate of tab1 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab2");
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab2");
is($result, qq(), 'truncate of tab2 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab3");
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3");
is($result, qq(), 'truncate of tab3 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab3_1");
+$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3_1");
is($result, qq(), 'truncate of tab3_1 replicated');
'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ],
};
my @contrib_excludes = (
- 'bool_plperl',
- 'commit_ts', 'hstore_plperl',
- 'hstore_plpython', 'intagg',
- 'jsonb_plperl', 'jsonb_plpython',
- 'ltree_plpython', 'pgcrypto',
- 'sepgsql', 'brin',
- 'test_extensions', 'test_misc',
- 'test_pg_dump', 'snapshot_too_old',
- 'unsafe_tests');
+ 'bool_plperl', 'commit_ts',
+ 'hstore_plperl', 'hstore_plpython',
+ 'intagg', 'jsonb_plperl',
+ 'jsonb_plpython', 'ltree_plpython',
+ 'pgcrypto', 'sepgsql',
+ 'brin', 'test_extensions',
+ 'test_misc', 'test_pg_dump',
+ 'snapshot_too_old', 'unsafe_tests');
# Set of variables for frontend modules
my $frontend_defines = { 'initdb' => 'FRONTEND' };
our @pgcommonallfiles = qw(
archive.c base64.c checksum_helper.c
- config_info.c controldata_utils.c d2s.c encnames.c exec.c
+ config_info.c controldata_utils.c d2s.c encnames.c exec.c
f2s.c file_perm.c hashfn.c ip.c jsonapi.c
keywords.c kwlookup.c link-canary.c md5.c
pg_lzcompress.c pgfnames.c psprintf.c relpath.c rmtree.c
$libecpgcompat->AddIncludeDir('src/interfaces/ecpg/include');
$libecpgcompat->AddIncludeDir('src/interfaces/libpq');
$libecpgcompat->UseDef('src/interfaces/ecpg/compatlib/compatlib.def');
- $libecpgcompat->AddReference($pgtypes, $libecpg, $libpgport, $libpgcommon);
+ $libecpgcompat->AddReference($pgtypes, $libecpg, $libpgport,
+ $libpgcommon);
my $ecpg = $solution->AddProject('ecpg', 'exe', 'interfaces',
'src/interfaces/ecpg/preproc');
# 'Can't spawn "conftest.exe"'; suppress that.
no warnings;
- no strict 'subs'; ## no critic (ProhibitNoStrict)
+ no strict 'subs'; ## no critic (ProhibitNoStrict)
# Disable error dialog boxes like we do in the postmaster.
# Here, we run code that triggers relevant errors.
- use if ($^O eq "MSWin32"), 'Win32API::File', qw(SetErrorMode :SEM_);
+ use
+ if ($^O eq "MSWin32"), 'Win32API::File',
+ qw(SetErrorMode :SEM_);
my $oldmode = SetErrorMode(
SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
system(".\\$exe");
# Add transform modules dependent on plperl
my $bool_plperl = AddTransformModule(
- 'bool_plperl', 'contrib/bool_plperl',
- 'plperl', 'src/pl/plperl');
+ 'bool_plperl', 'contrib/bool_plperl',
+ 'plperl', 'src/pl/plperl');
my $hstore_plperl = AddTransformModule(
'hstore_plperl', 'contrib/hstore_plperl',
'plperl', 'src/pl/plperl',
# Examine CL help output to determine if we are in 32 or 64-bit mode.
my $output = `cl /? 2>&1`;
$? >> 8 == 0 or die "cl command not found";
- $self->{platform} = ($output =~ /^\/favor:<.+AMD64/m) ? 'x64' : 'Win32';
+ $self->{platform} =
+ ($output =~ /^\/favor:<.+AMD64/m) ? 'x64' : 'Win32';
}
else
{
sub GenerateFiles
{
- my $self = shift;
- my $bits = $self->{platform} eq 'Win32' ? 32 : 64;
+ my $self = shift;
+ my $bits = $self->{platform} eq 'Win32' ? 32 : 64;
my $ac_init_found = 0;
my $package_name;
my $package_version;
|| confess("Could not open configure.in for reading\n");
while (<$c>)
{
- if (/^AC_INIT\(\[([^\]]+)\], \[([^\]]+)\], \[([^\]]+)\], \[([^\]]*)\], \[([^\]]+)\]/)
+ if (/^AC_INIT\(\[([^\]]+)\], \[([^\]]+)\], \[([^\]]+)\], \[([^\]]*)\], \[([^\]]+)\]/
+ )
{
$ac_init_found = 1;
$package_version = $2;
$package_bugreport = $3;
#$package_tarname = $4;
- $package_url = $5;
+ $package_url = $5;
if ($package_version !~ /^(\d+)(?:\.(\d+))?/)
{
inline => '__inline',
pg_restrict => '__restrict',
# not defined, because it'd conflict with __declspec(restrict)
- restrict => undef,
- typeof => undef,);
+ restrict => undef,
+ typeof => undef,);
if ($self->{options}->{uuid})
{
}
}
- $self->GenerateConfigHeader('src/include/pg_config.h', \%define, 1);
+ $self->GenerateConfigHeader('src/include/pg_config.h', \%define, 1);
$self->GenerateConfigHeader('src/include/pg_config_ext.h', \%define, 0);
- $self->GenerateConfigHeader('src/interfaces/ecpg/include/ecpg_config.h', \%define, 0);
+ $self->GenerateConfigHeader('src/interfaces/ecpg/include/ecpg_config.h',
+ \%define, 0);
$self->GenerateDefFile(
"src/interfaces/libpq/libpqdll.def",
my $config_header_in = $config_header . '.in';
- if (IsNewer($config_header, $config_header_in) ||
- IsNewer($config_header, __FILE__))
+ if ( IsNewer($config_header, $config_header_in)
+ || IsNewer($config_header, __FILE__))
{
my %defines_copy = %$defines;
{
if (defined $defines->{$macro})
{
- print $o "#${ws}define $macro ", $defines->{$macro}, "\n";
+ print $o "#${ws}define $macro ", $defines->{$macro},
+ "\n";
}
else
{
}
else
{
- croak "undefined symbol: $macro at $config_header line $.";
+ croak
+ "undefined symbol: $macro at $config_header line $.";
}
}
else
print "\nSetting up new cluster\n\n";
standard_initdb() or exit 1;
print "\nRunning pg_upgrade\n\n";
- @args = (
- 'pg_upgrade', '-d', "$data.old", '-D', $data, '-b',
- $bindir);
+ @args = ('pg_upgrade', '-d', "$data.old", '-D', $data, '-b', $bindir);
system(@args) == 0 or exit 1;
print "\nStarting new cluster\n\n";
@args = ('pg_ctl', '-l', "$logdir/postmaster2.log", 'start');
-ABITVEC
ACCESS_ALLOWED_ACE
ACL
ACL_SIZE_INFORMATION
AclMode
AclResult
AcquireSampleRowsFunc
+ActionList
ActiveSnapshotElt
AddForeignUpdateTargets_function
AffixNode
AlterRoleSetStmt
AlterRoleStmt
AlterSeqStmt
+AlterStatsStmt
AlterSubscriptionStmt
AlterSubscriptionType
AlterSystemStmt
AlterTableSpaceOptionsStmt
AlterTableStmt
AlterTableType
+AlterTableUtilityContext
+AlterTypeRecurseParams
+AlterTypeStmt
AlterUserMappingStmt
AlteredTableInfo
AlternativeSubPlan
BIGNUM
BIO
BIO_METHOD
-BITVEC
BITVECP
BMS_Comparison
BMS_Membership
BTArrayKeyInfo
BTBuildState
BTCycleId
+BTDedupInterval
+BTDedupState
+BTDedupStateData
BTIndexStat
BTInsertState
BTInsertStateData
BTStackData
BTVacInfo
BTVacState
+BTVacuumPosting
+BTVacuumPostingData
BTWriteState
BYTE
Backend
CState
CTEMaterialize
CV
-C_block
CachedExpression
CachedPlan
CachedPlanSource
Command
CommandDest
CommandId
+CommandTag
+CommandTagBehavior
CommentItem
CommentStmt
CommitTimestampEntry
DropTableSpaceStmt
DropUserMappingStmt
DropdbStmt
-DummyAmEnum
-DummyIndexOptions
DumpComponents
DumpId
DumpOptions
ExplainOneQuery_hook_type
ExplainState
ExplainStmt
+ExplainWorkersState
ExportedSnapshot
Expr
ExprContext
FdwRoutine
FetchDirection
FetchStmt
-FieldNot
FieldSelect
FieldStore
File
FmgrBuiltin
FmgrHookEventType
FmgrInfo
+ForBothCellState
+ForBothState
+ForEachState
+ForFiveState
+ForFourState
+ForThreeState
ForeignDataWrapper
ForeignKeyCacheInfo
ForeignKeyOptInfo
FormData_pg_operator
FormData_pg_opfamily
FormData_pg_partitioned_table
-FormData_pg_pltemplate
FormData_pg_policy
FormData_pg_proc
FormData_pg_publication
Form_pg_operator
Form_pg_opfamily
Form_pg_partitioned_table
-Form_pg_pltemplate
Form_pg_policy
Form_pg_proc
Form_pg_publication
GENERAL_NAME
GISTBuildBuffers
GISTBuildState
+GISTDeletedPageContents
GISTENTRY
GISTInsertStack
GISTInsertState
GinTupleCollector
GinVacuumState
GistBufferingMode
-GistBulkDeleteResult
GistEntryVector
GistHstoreOptions
GistInetKey
HTSV_Result
HV
Hash
+HashAggBatch
+HashAggSpill
HashAllocFunc
HashBuildState
HashCompareFunc
HashScanPosItem
HashSkewBucket
HashState
+HashTapeInfo
HashValueFunc
HbaLine
HbaToken
IncludeWal
InclusionOpaque
IncrementVarSublevelsUp_context
+IncrementalSort
+IncrementalSortExecutionStatus
+IncrementalSortGroupInfo
+IncrementalSortInfo
+IncrementalSortPath
+IncrementalSortState
Index
IndexAMProperty
IndexAmRoutine
JsonIterateStringValuesAction
JsonLexContext
JsonLikeRegexContext
+JsonManifestFileField
+JsonManifestParseContext
+JsonManifestParseState
+JsonManifestSemanticState
+JsonManifestWALRangeField
JsonParseContext
+JsonParseErrorType
JsonPath
JsonPathBool
JsonPathExecContext
LPSECURITY_ATTRIBUTES
LPSERVICE_STATUS
LPSTR
-LPTHREAD_START_ROUTINE
LPTSTR
LPVOID
LPWSTR
LexizeData
LibraryInfo
Limit
+LimitOption
LimitPath
LimitState
LimitStateCond
LogicalRepBeginData
LogicalRepCommitData
LogicalRepCtxStruct
+LogicalRepPartMapEntry
LogicalRepRelId
LogicalRepRelMapEntry
LogicalRepRelation
MultiXactOffset
MultiXactStateData
MultiXactStatus
-MyData
NDBOX
NODE
NUMCacheEntry
NodeTag
NonEmptyRange
Notification
+NotificationHash
+NotificationList
NotifyStmt
Nsrt
NullIfExpr
OSInfo
OSSLCipher
OSSLDigest
-OSVERSIONINFO
OVERLAPPED
ObjectAccessDrop
ObjectAccessNamespaceSearch
OldSnapshotControlData
OldToNewMapping
OldToNewMappingData
-OldTriggerInfo
OnCommitAction
OnCommitItem
OnConflictAction
PGresult_data
PHANDLE
PLAINTREE
-PLTemplate
PLUID_AND_ATTRIBUTES
PLcword
PLpgSQL_arrayelem
PQnoticeProcessor
PQnoticeReceiver
PQprintOpt
+PQsslKeyPassHook_type
PREDICATELOCK
PREDICATELOCKTAG
PREDICATELOCKTARGET
ParamListInfo
ParamPathInfo
ParamRef
+ParamsErrorCbData
ParentMapEntry
ParseCallbackState
ParseExprKind
+ParseNamespaceColumn
ParseNamespaceItem
ParseParamRefHook
ParseState
PartitionHashBound
PartitionKey
PartitionListValue
+PartitionMap
PartitionPruneCombineOp
PartitionPruneContext
PartitionPruneInfo
Pattern_Type
PendingFsyncEntry
PendingRelDelete
+PendingRelSync
PendingUnlinkEntry
PendingWriteback
PerlInterpreter
PgStat_MsgResetcounter
PgStat_MsgResetsharedcounter
PgStat_MsgResetsinglecounter
+PgStat_MsgResetslrucounter
+PgStat_MsgSLRU
PgStat_MsgTabpurge
PgStat_MsgTabstat
PgStat_MsgTempFile
PgStat_MsgVacuum
+PgStat_SLRUStats
PgStat_Shared_Reset_Target
PgStat_Single_Reset_Type
PgStat_StatDBEntry
PopulateArrayContext
PopulateArrayState
PopulateRecordCache
-PopulateRecordsetCache
PopulateRecordsetState
Port
Portal
PredXactListElement
PredicateLockData
PredicateLockTargetType
+PrefetchBufferResult
PrepParallelRestorePtrType
PrepareStmt
PreparedParamsData
PreparedStatement
+PresortedKeyData
PrewarmType
PrintExtraTocPtrType
PrintTocDataPtrType
PrivateRefCountEntry
ProcArrayStruct
ProcLangInfo
+ProcSignalBarrierType
+ProcSignalHeader
ProcSignalReason
ProcSignalSlot
ProcState
Publication
PublicationActions
PublicationInfo
+PublicationPartOpt
PublicationRelInfo
PullFilter
PullFilterOps
QualCost
QualItem
Query
+QueryCompletion
QueryDesc
QueryEnvironment
QueryInfo
SIZE_T
SMgrRelation
SMgrRelationData
+SMgrSortArray
SOCKADDR
SOCKET
SPELL
ScanKeywordList
ScanState
ScanTypeControl
+ScannerCallbackState
SchemaQuery
SecBuffer
SecBufferDesc
SecLabelItem
SecLabelStmt
SeenRelsEntry
+SelectLimit
SelectStmt
Selectivity
SemTPadded
SharedExecutorInstrumentation
SharedFileSet
SharedHashInfo
+SharedIncrementalSortInfo
SharedInvalCatalogMsg
SharedInvalCatcacheMsg
SharedInvalRelcacheMsg
SimpleEcontextStackEntry
SimpleOidList
SimpleOidListCell
+SimplePtrList
+SimplePtrListCell
SimpleStats
SimpleStringList
SimpleStringListCell
SlabChunk
SlabContext
SlabSlot
+SlotAcquireBehavior
SlotErrCallbackArg
SlotNumber
SlruCtl
Syn
SyncOps
SyncRepConfigData
+SyncRepStandbyData
SyncRequestType
SysScanDesc
SyscacheCallbackFunction
TSQuerySign
TSReadPointer
TSTemplateInfo
+TSTernaryValue
TSTokenTypeStorage
TSVector
TSVectorBuildState
TSVectorStat
TState
TStoreState
+TXNEntryFile
TYPCATEGORY
T_Action
T_WorkerStatus
Tcl_NotifierProcs
Tcl_Obj
Tcl_Time
+TempNamespaceStatus
TestDecodingData
TestSpec
TextFreq
TwoPhaseRecordOnDisk
TwoPhaseRmgrId
TwoPhaseStateData
-TxidEpoch
-TxidSnapshot
Type
TypeCacheEntry
TypeCacheEnumData
ULONG_PTR
UV
UVersionInfo
+UnicodeNormalizationForm
+UnicodeNormalizationQC
Unique
UniquePath
UniquePathMethod
UniqueState
UnlistenStmt
+UnpackTarState
UnresolvedTup
UnresolvedTupData
UpdateStmt
VirtualTransactionId
VirtualTupleTableSlot
Vsrt
-WAITORTIMERCALLBACK
WAIT_ORDER
+WALAvailability
WALInsertLock
WALInsertLockPadded
+WALOpenSegment
+WALReadError
+WALSegmentCloseCB
+WALSegmentContext
+WALSegmentOpenCB
WCHAR
WCOKind
WFW_WaitOption
Working_State
WriteBufPtrType
WriteBytePtrType
+WriteDataCallback
WriteDataPtrType
WriteExtraTocPtrType
WriteFunc
+WriteManifestState
+WriteTarState
WritebackContext
X509
X509_EXTENSION
XLogPageHeaderData
XLogPageReadCB
XLogPageReadPrivate
+XLogReaderRoutine
XLogReaderState
XLogRecData
XLogRecPtr
__CreateRestrictedToken
__IsProcessInJob
__QueryInformationJobObject
-__RegisterWaitForSingleObject
__SetInformationJobObject
_resultmap
_stringlist
avl_tree
avw_dbase
backslashResult
+backup_manifest_info
+backup_manifest_option
base_yy_extra_type
basebackup_options
bgworker_main_type
destructor
dev_t
digit
-directory_fctx
disassembledLeaf
dlist_head
dlist_iter
ec_member_matches_arg
emit_log_hook_type
eval_const_expressions_context
-event_trigger_command_tag_check_result
-event_trigger_support_data
exec_thread_arg
execution_state
explain_get_index_name_hook_type
jmp_buf
join_search_hook_type
json_aelem_action
+json_manifest_error_callback
+json_manifest_perfile_callback
+json_manifest_perwalrange_callback
json_ofield_action
json_scalar_action
json_struct_action
leaf_item
line_t
lineno_t
-list_qsort_comparator
+list_sort_comparator
local_relopt
local_relopts
locale_t
macaddr
macaddr8
macaddr_sortsupport_state
+manifest_file
+manifest_files_hash
+manifest_files_iterator
+manifest_wal_range
map_variable_attnos_context
max_parallel_hazard_context
mb2wchar_with_len_converter
mxact
mxtruncinfo
needs_fmgr_hook_type
+network_sortsupport_state
nodeitem
normal_rand_fctx
ntile_context
oidvector
on_dsm_detach_callback
on_exit_nicely_callback
+openssl_tls_init_hook_typ
ossl_EVP_cipher_func
other
output_type
pairingheap_node
parallel_worker_main_type
parse_error_callback_arg
+parser_context
+partition_method_t
pendingPosition
pgParameterStatus
pg_atomic_flag
pg_atomic_uint32
pg_atomic_uint64
+pg_checksum_context
+pg_checksum_raw_context
+pg_checksum_type
pg_conn_host
pg_conn_host_type
pg_conv_map
pg_tz_cache
pg_tzenum
pg_unicode_decomposition
+pg_unicode_normprops
pg_utf_to_local_combined
pg_uuid_t
pg_wc_probefunc
pltcl_proc_ptr
pltcl_query_desc
pointer
+polymorphic_actuals
pos_trgm
post_parse_analyze_hook_type
pqbool
pullup_replace_vars_context
pushdown_safety_info
qsort_arg_comparator
+qsort_comparator
query_pathkeys_callback
radius_attribute
radius_packet
rm_detail_t
role_auth_extra
row_security_policy_hook_type
+rsv_callback
save_buffer
scram_HMAC_ctx
scram_state
trgm_mb_char
trivalue
tsKEY
-ts_db_fctx
ts_parserstate
ts_tokenizer
ts_tokentype
tsearch_readline_state
tuplehash_hash
tuplehash_iterator
-txid
type
tzEntry
u1byte
varattrib_1b_e
varattrib_4b
vbits
+verifier_context
walrcv_check_conninfo_fn
walrcv_connect_fn
walrcv_create_slot_fn
walrcv_disconnect_fn
walrcv_endstreaming_fn
walrcv_exec_fn
+walrcv_get_backend_pid_fn
walrcv_get_conninfo_fn
walrcv_get_senderinfo_fn
walrcv_identify_system_fn
xl_brin_revmap_extend
xl_brin_samepage_update
xl_brin_update
+xl_btree_dedup
xl_btree_delete
xl_btree_insert
xl_btree_mark_page_halfdead
xl_btree_reuse_page
xl_btree_split
xl_btree_unlink_page
+xl_btree_update
xl_btree_vacuum
xl_clog_truncate
xl_commit_ts_set
xl_xact_parsed_abort
xl_xact_parsed_commit
xl_xact_parsed_prepare
+xl_xact_prepare
xl_xact_relfilenodes
xl_xact_subxacts
xl_xact_twophase
if ($minor =~ m/^\d+$/)
{
- $dotneeded = 1;
+ $dotneeded = 1;
}
elsif ($minor eq "devel")
{
- $dotneeded = 0;
+ $dotneeded = 0;
}
elsif ($minor =~ m/^alpha\d+$/)
{
- $dotneeded = 0;
+ $dotneeded = 0;
}
elsif ($minor =~ m/^beta\d+$/)
{
- $dotneeded = 0;
+ $dotneeded = 0;
}
elsif ($minor =~ m/^rc\d+$/)
{
- $dotneeded = 0;
+ $dotneeded = 0;
}
else
{