Also "make reformat-dat-files".
The only change worthy of note is that pgindent messed up the formatting
of launcher.c's struct LogicalRepWorkerId, which led me to notice that
that struct wasn't used at all anymore, so I just took it out.
#
fresh_test_table('test');
$node->safe_psql('postgres', q(VACUUM (FREEZE, DISABLE_PAGE_SKIPPING) test));
-detects_no_corruption(
- "verify_heapam('test')",
+detects_no_corruption("verify_heapam('test')",
"all-frozen not corrupted table");
corrupt_first_page('test');
detects_heap_corruption("verify_heapam('test')",
return false;
case XID_COMMITTED:
+
/*
* The tuple is dead, because the xvac transaction moved
- * it off and committed. It's checkable, but also prunable.
+ * it off and committed. It's checkable, but also
+ * prunable.
*/
return true;
case XID_ABORTED:
+
/*
* The original xmin must have committed, because the xvac
* transaction tried to move it later. Since xvac is
return false;
case XID_COMMITTED:
+
/*
* The original xmin must have committed, because the xvac
* transaction moved it later. Whether it's still alive
break;
case XID_ABORTED:
+
/*
* The tuple is dead, because the xvac transaction moved
- * it off and committed. It's checkable, but also prunable.
+ * it off and committed. It's checkable, but also
+ * prunable.
*/
return true;
}
{
/*
* Inserting transaction is not in progress, and not committed, so
- * it might have changed the TupleDesc in ways we don't know about.
- * Thus, don't try to check the tuple structure.
+ * it might have changed the TupleDesc in ways we don't know
+ * about. Thus, don't try to check the tuple structure.
*
* If xmin_status happens to be XID_IS_CURRENT_XID, then in theory
- * any such DDL changes ought to be visible to us, so perhaps
- * we could check anyway in that case. But, for now, let's be
+ * any such DDL changes ought to be visible to us, so perhaps we
+ * could check anyway in that case. But, for now, let's be
* conservative and treat this like any other uncommitted insert.
*/
return false;
{
/*
* xmax is a multixact, so sanity-check the MXID. Note that we do this
- * prior to checking for HEAP_XMAX_INVALID or HEAP_XMAX_IS_LOCKED_ONLY.
- * This might therefore complain about things that wouldn't actually
- * be a problem during a normal scan, but eventually we're going to
- * have to freeze, and that process will ignore hint bits.
+ * prior to checking for HEAP_XMAX_INVALID or
+ * HEAP_XMAX_IS_LOCKED_ONLY. This might therefore complain about
+ * things that wouldn't actually be a problem during a normal scan,
+ * but eventually we're going to have to freeze, and that process will
+ * ignore hint bits.
*
* Even if the MXID is out of range, we still know that the original
* insert committed, so we can check the tuple itself. However, we
* can't rule out the possibility that this tuple is dead, so don't
* clear ctx->tuple_could_be_pruned. Possibly we should go ahead and
* clear that flag anyway if HEAP_XMAX_INVALID is set or if
- * HEAP_XMAX_IS_LOCKED_ONLY is true, but for now we err on the side
- * of avoiding possibly-bogus complaints about missing TOAST entries.
+ * HEAP_XMAX_IS_LOCKED_ONLY is true, but for now we err on the side of
+ * avoiding possibly-bogus complaints about missing TOAST entries.
*/
xmax = HeapTupleHeaderGetRawXmax(tuphdr);
switch (check_mxid_valid_in_rel(xmax, ctx))
* away depends on how old the deleting transaction is.
*/
ctx->tuple_could_be_pruned = TransactionIdPrecedes(xmax,
- ctx->safe_xmin);
+ ctx->safe_xmin);
break;
case XID_ABORTED:
+
/*
* The delete aborted or crashed. The tuple is still live.
*/
break;
case XID_COMMITTED:
+
/*
* The delete committed. Whether the toast can be vacuumed away
* depends on how old the deleting transaction is.
*/
ctx->tuple_could_be_pruned = TransactionIdPrecedes(xmax,
- ctx->safe_xmin);
+ ctx->safe_xmin);
break;
case XID_ABORTED:
+
/*
* The delete aborted or crashed. The tuple is still live.
*/
ta->toast_pointer.va_valueid,
chunk_seq, chunksize, expected_size));
}
+
/*
* Check the current attribute as tracked in ctx, recording any corruption
* found in ctx->tupstore.
ereport(DEBUG1,
(errcode(ERRCODE_NO_DATA),
errmsg_internal("harmless fast root mismatch in index \"%s\"",
- RelationGetRelationName(rel)),
+ RelationGetRelationName(rel)),
errdetail_internal("Fast root block %u (level %u) differs from true root block %u (level %u).",
metad->btm_fastroot, metad->btm_fastlevel,
metad->btm_root, metad->btm_level)));
ereport(DEBUG1,
(errcode(ERRCODE_NO_DATA),
errmsg_internal("block %u of index \"%s\" concurrently deleted",
- current, RelationGetRelationName(state->rel))));
+ current, RelationGetRelationName(state->rel))));
goto nextpage;
}
else if (nextleveldown.leftmost == InvalidBlockNumber)
Buffer newtargetbuf;
Page page;
BTPageOpaque opaque;
- BlockNumber newtargetblock;
+ BlockNumber newtargetblock;
/* Couple locks in the usual order for nbtree: Left to right */
lbuf = ReadBufferExtended(state->rel, MAIN_FORKNUM, leftcurrent,
ereport(DEBUG1,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg_internal("harmless concurrent page split detected in index \"%s\"",
- RelationGetRelationName(state->rel)),
+ RelationGetRelationName(state->rel)),
errdetail_internal("Block=%u new right sibling=%u original right sibling=%u.",
leftcurrent, newtargetblock,
state->targetblock)));
ereport(DEBUG2,
(errcode(ERRCODE_NO_DATA),
errmsg_internal("level %u sibling page in block %u of index \"%s\" was found deleted or half dead",
- opaque->btpo_level, targetnext, RelationGetRelationName(state->rel)),
+ opaque->btpo_level, targetnext, RelationGetRelationName(state->rel)),
errdetail_internal("Deleted page found when building scankey from right sibling.")));
targetnext = opaque->btpo_next;
ereport(DEBUG2,
(errcode(ERRCODE_NO_DATA),
errmsg_internal("%s block %u of index \"%s\" has no first data item",
- P_ISLEAF(opaque) ? "leaf" : "internal", targetnext,
- RelationGetRelationName(state->rel))));
+ P_ISLEAF(opaque) ? "leaf" : "internal", targetnext,
+ RelationGetRelationName(state->rel))));
return NULL;
}
ereport(DEBUG1,
(errcode(ERRCODE_NO_DATA),
errmsg_internal("harmless interrupted page split detected in index \"%s\"",
- RelationGetRelationName(state->rel)),
+ RelationGetRelationName(state->rel)),
errdetail_internal("Block=%u level=%u left sibling=%u page lsn=%X/%X.",
blkno, opaque->btpo_level,
opaque->btpo_prev,
*/
typedef struct
{
- int current_index;
- int head_offset;
- TimestampTz head_timestamp;
- int count_used;
- TransactionId xid_by_minute[FLEXIBLE_ARRAY_MEMBER];
+ int current_index;
+ int head_offset;
+ TimestampTz head_timestamp;
+ int count_used;
+ TransactionId xid_by_minute[FLEXIBLE_ARRAY_MEMBER];
} OldSnapshotTimeMapping;
#define NUM_TIME_MAPPING_COLUMNS 3
if (SRF_IS_FIRSTCALL())
{
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
static HeapTuple
MakeOldSnapshotTimeMappingTuple(TupleDesc tupdesc, OldSnapshotTimeMapping *mapping)
{
- Datum values[NUM_TIME_MAPPING_COLUMNS];
- bool nulls[NUM_TIME_MAPPING_COLUMNS];
- int array_position;
- TimestampTz timestamp;
+ Datum values[NUM_TIME_MAPPING_COLUMNS];
+ bool nulls[NUM_TIME_MAPPING_COLUMNS];
+ int array_position;
+ TimestampTz timestamp;
/*
* Figure out the array position corresponding to the current index.
*
* Index 0 means the oldest entry in the mapping, which is stored at
- * mapping->head_offset. Index 1 means the next-oldest entry, which is a the
- * following index, and so on. We wrap around when we reach the end of the array.
+ * mapping->head_offset. Index 1 means the next-oldest entry, which is a
+ * the following index, and so on. We wrap around when we reach the end of
+ * the array.
*/
array_position = (mapping->head_offset + mapping->current_index)
% OLD_SNAPSHOT_TIME_MAP_ENTRIES;
/*
- * No explicit timestamp is stored for any entry other than the oldest one,
- * but each entry corresponds to 1-minute period, so we can just add.
+ * No explicit timestamp is stored for any entry other than the oldest
+ * one, but each entry corresponds to 1-minute period, so we can just add.
*/
timestamp = TimestampTzPlusMilliseconds(mapping->head_timestamp,
mapping->current_index * 60000);
* Force utility statements to get queryId zero. We do this even in cases
* where the statement contains an optimizable statement for which a
* queryId could be derived (such as EXPLAIN or DECLARE CURSOR). For such
- * cases, runtime control will first go through ProcessUtility and then the
- * executor, and we don't want the executor hooks to do anything, since we
- * are already measuring the statement's costs at the utility level.
+ * cases, runtime control will first go through ProcessUtility and then
+ * the executor, and we don't want the executor hooks to do anything,
+ * since we are already measuring the statement's costs at the utility
+ * level.
*
* Note that this is only done if pg_stat_statements is enabled and
* configured to track utility statements, in the unlikely possibility
int values_end_len, int num_cols,
int num_rows)
{
- int i, j;
+ int i,
+ j;
int pindex;
bool first;
appendBinaryStringInfo(buf, orig_query, values_end_len);
/*
- * Add records to VALUES clause (we already have parameters for the
- * first row, so start at the right offset).
+ * Add records to VALUES clause (we already have parameters for the first
+ * row, so start at the right offset).
*/
pindex = num_cols + 1;
for (i = 0; i < num_rows; i++)
/* for remote query execution */
PGconn *conn; /* connection for the scan */
- PgFdwConnState *conn_state; /* extra per-connection state */
+ PgFdwConnState *conn_state; /* extra per-connection state */
unsigned int cursor_number; /* quasi-unique ID for my cursor */
bool cursor_exists; /* have we created the cursor? */
int numParams; /* number of parameters passed to query */
bool eof_reached; /* true if last fetch reached EOF */
/* for asynchronous execution */
- bool async_capable; /* engage asynchronous-capable logic? */
+ bool async_capable; /* engage asynchronous-capable logic? */
/* working memory contexts */
MemoryContext batch_cxt; /* context holding current batch of tuples */
/* for remote query execution */
PGconn *conn; /* connection for the scan */
- PgFdwConnState *conn_state; /* extra per-connection state */
+ PgFdwConnState *conn_state; /* extra per-connection state */
char *p_name; /* name of prepared statement, if created */
/* extracted fdw_private data */
/* for remote query execution */
PGconn *conn; /* connection for the update */
- PgFdwConnState *conn_state; /* extra per-connection state */
+ PgFdwConnState *conn_state; /* extra per-connection state */
int numParams; /* number of parameters passed to query */
FmgrInfo *param_flinfo; /* output conversion functions for them */
List *param_exprs; /* executable expressions for param values */
TupleTableSlot *slot,
TupleTableSlot *planSlot);
static TupleTableSlot **postgresExecForeignBatchInsert(EState *estate,
- ResultRelInfo *resultRelInfo,
- TupleTableSlot **slots,
- TupleTableSlot **planSlots,
- int *numSlots);
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int *numSlots);
static int postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo);
static TupleTableSlot *postgresExecForeignUpdate(EState *estate,
ResultRelInfo *resultRelInfo,
bool has_returning,
List *retrieved_attrs);
static TupleTableSlot **execute_foreign_modify(EState *estate,
- ResultRelInfo *resultRelInfo,
- CmdType operation,
- TupleTableSlot **slots,
- TupleTableSlot **planSlots,
- int *numSlots);
+ ResultRelInfo *resultRelInfo,
+ CmdType operation,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int *numSlots);
static void prepare_foreign_modify(PgFdwModifyState *fmstate);
static const char **convert_prep_stmt_params(PgFdwModifyState *fmstate,
ItemPointer tupleid,
static void merge_fdw_options(PgFdwRelationInfo *fpinfo,
const PgFdwRelationInfo *fpinfo_o,
const PgFdwRelationInfo *fpinfo_i);
-static int get_batch_size_option(Relation rel);
+static int get_batch_size_option(Relation rel);
/*
target_attrs = (List *) list_nth(fdw_private,
FdwModifyPrivateTargetAttnums);
values_end_len = intVal(list_nth(fdw_private,
- FdwModifyPrivateLen));
+ FdwModifyPrivateLen));
has_returning = intVal(list_nth(fdw_private,
FdwModifyPrivateHasReturning));
retrieved_attrs = (List *) list_nth(fdw_private,
{
PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
TupleTableSlot **rslot;
- int numSlots = 1;
+ int numSlots = 1;
/*
* If the fmstate has aux_fmstate set, use the aux_fmstate (see
*/
static TupleTableSlot **
postgresExecForeignBatchInsert(EState *estate,
- ResultRelInfo *resultRelInfo,
- TupleTableSlot **slots,
- TupleTableSlot **planSlots,
- int *numSlots)
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int *numSlots)
{
PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
TupleTableSlot **rslot;
static int
postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
{
- int batch_size;
+ int batch_size;
PgFdwModifyState *fmstate = resultRelInfo->ri_FdwState ?
- (PgFdwModifyState *) resultRelInfo->ri_FdwState :
- NULL;
+ (PgFdwModifyState *) resultRelInfo->ri_FdwState :
+ NULL;
/* should be called only once */
Assert(resultRelInfo->ri_BatchSize == 0);
/*
- * Should never get called when the insert is being performed as part of
- * a row movement operation.
+ * Should never get called when the insert is being performed as part of a
+ * row movement operation.
*/
Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
TupleTableSlot *planSlot)
{
TupleTableSlot **rslot;
- int numSlots = 1;
+ int numSlots = 1;
rslot = execute_foreign_modify(estate, resultRelInfo, CMD_UPDATE,
- &slot, &planSlot, &numSlots);
+ &slot, &planSlot, &numSlots);
return rslot ? rslot[0] : NULL;
}
TupleTableSlot *planSlot)
{
TupleTableSlot **rslot;
- int numSlots = 1;
+ int numSlots = 1;
rslot = execute_foreign_modify(estate, resultRelInfo, CMD_DELETE,
- &slot, &planSlot, &numSlots);
+ &slot, &planSlot, &numSlots);
return rslot ? rslot[0] : NULL;
}
/*
* If the foreign table is a partition that doesn't have a corresponding
- * RTE entry, we need to create a new RTE
- * describing the foreign table for use by deparseInsertSql and
- * create_foreign_modify() below, after first copying the parent's RTE and
- * modifying some fields to describe the foreign partition to work on.
- * However, if this is invoked by UPDATE, the existing RTE may already
- * correspond to this partition if it is one of the UPDATE subplan target
- * rels; in that case, we can just use the existing RTE as-is.
+ * RTE entry, we need to create a new RTE describing the foreign table for
+ * use by deparseInsertSql and create_foreign_modify() below, after first
+ * copying the parent's RTE and modifying some fields to describe the
+ * foreign partition to work on. However, if this is invoked by UPDATE,
+ * the existing RTE may already correspond to this partition if it is one
+ * of the UPDATE subplan target rels; in that case, we can just use the
+ * existing RTE as-is.
*/
if (resultRelInfo->ri_RangeTableIndex == 0)
{
ExplainPropertyText("Remote SQL", sql, es);
/*
- * For INSERT we should always have batch size >= 1, but UPDATE
- * and DELETE don't support batching so don't show the property.
+ * For INSERT we should always have batch size >= 1, but UPDATE and
+ * DELETE don't support batching so don't show the property.
*/
if (rinfo->ri_BatchSize > 0)
ExplainPropertyInteger("Batch Size", NULL, rinfo->ri_BatchSize, es);
static int
get_batch_size_option(Relation rel)
{
- Oid foreigntableid = RelationGetRelid(rel);
+ Oid foreigntableid = RelationGetRelid(rel);
ForeignTable *table;
ForeignServer *server;
List *options;
ListCell *lc;
/* we use 1 by default, which means "no batching" */
- int batch_size = 1;
+ int batch_size = 1;
/*
- * Load options for table and server. We append server options after
- * table options, because table options take precedence.
+ * Load options for table and server. We append server options after table
+ * options, because table options take precedence.
*/
table = GetForeignTable(foreigntableid);
server = GetForeignServer(table->serverid);
* range values; if so, have the pages in the range added
* to the output bitmap.
*
- * The opclass may or may not support processing of multiple
- * scan keys. We can determine that based on the number of
- * arguments - functions with extra parameter (number of scan
- * keys) do support this, otherwise we have to simply pass the
- * scan keys one by one.
+ * The opclass may or may not support processing of
+ * multiple scan keys. We can determine that based on the
+ * number of arguments - functions with extra parameter
+ * (number of scan keys) do support this, otherwise we
+ * have to simply pass the scan keys one by one.
*/
if (consistentFn[attno - 1].fn_nargs >= 4)
{
/*
* Check keys one by one
*
- * When there are multiple scan keys, failure to meet the
- * criteria for a single one of them is enough to discard
- * the range as a whole, so break out of the loop as soon
- * as a false return value is obtained.
+ * When there are multiple scan keys, failure to meet
+ * the criteria for a single one of them is enough to
+ * discard the range as a whole, so break out of the
+ * loop as soon as a false return value is obtained.
*/
int keyno;
/* data of the bloom filter */
char data[FLEXIBLE_ARRAY_MEMBER];
-} BloomFilter;
+} BloomFilter;
/*
* Add value to the bloom filter.
*/
static BloomFilter *
-bloom_add_value(BloomFilter * filter, uint32 value, bool *updated)
+bloom_add_value(BloomFilter *filter, uint32 value, bool *updated)
{
int i;
uint64 h1,
* Check if the bloom filter contains a particular value.
*/
static bool
-bloom_contains_value(BloomFilter * filter, uint32 value)
+bloom_contains_value(BloomFilter *filter, uint32 value)
{
int i;
uint64 h1,
*/
FmgrInfo extra_procinfos[BLOOM_MAX_PROCNUMS];
bool extra_proc_missing[BLOOM_MAX_PROCNUMS];
-} BloomOpaque;
+} BloomOpaque;
static FmgrInfo *bloom_get_procinfo(BrinDesc *bdesc, uint16 attno,
uint16 procnum);
bool extra_proc_missing[MINMAX_MAX_PROCNUMS];
Oid cached_subtype;
FmgrInfo strategy_procinfos[BTMaxStrategyNumber];
-} MinmaxMultiOpaque;
+} MinmaxMultiOpaque;
/*
* Storage type for BRIN's minmax reloptions
{
FmgrInfo *cmpFn;
Oid colloid;
-} compare_context;
+} compare_context;
static int compare_values(const void *a, const void *b, void *arg);
/*
* For values passed by value, we need to copy just the
* significant bytes - we can't use memcpy directly, as that
- * assumes little endian behavior. store_att_byval does
- * almost what we need, but it requires properly aligned
- * buffer - the output buffer does not guarantee that. So we
- * simply use a local Datum variable (which guarantees proper
- * alignment), and then copy the value from it.
+ * assumes little endian behavior. store_att_byval does almost
+ * what we need, but it requires properly aligned buffer - the
+ * output buffer does not guarantee that. So we simply use a local
+ * Datum variable (which guarantees proper alignment), and then
+ * copy the value from it.
*/
store_att_byval(&tmp, range->values[i], typlen);
dataptr = NULL;
for (i = 0; (i < nvalues) && (!typbyval); i++)
{
- if (typlen > 0) /* fixed-length by-ref types */
+ if (typlen > 0) /* fixed-length by-ref types */
datalen += MAXALIGN(typlen);
else if (typlen == -1) /* varlena */
{
}
else if (typlen == -2) /* cstring */
{
- Size slen = strlen(ptr) + 1;
+ Size slen = strlen(ptr) + 1;
+
range->values[i] = PointerGetDatum(dataptr);
memcpy(dataptr, ptr, slen);
/*
* Delta is (fractional) number of days between the intervals. Assume
- * months have 30 days for consistency with interval_cmp_internal.
- * We don't need to be exact, in the worst case we'll build a bit less
+ * months have 30 days for consistency with interval_cmp_internal. We
+ * don't need to be exact, in the worst case we'll build a bit less
* efficient ranges. But we should not contradict interval_cmp.
*/
dayfraction = result->time % USECS_PER_DAY;
/*
* The length is calculated from the mask length, because we sort the
- * addresses by first address in the range, so A.B.C.D/24 < A.B.C.1
- * (the first range starts at A.B.C.0, which is before A.B.C.1). We
- * don't want to produce negative delta in this case, so we just cut
- * the extra bytes.
+ * addresses by first address in the range, so A.B.C.D/24 < A.B.C.1 (the
+ * first range starts at A.B.C.0, which is before A.B.C.1). We don't want
+ * to produce negative delta in this case, so we just cut the extra bytes.
*
- * XXX Maybe this should be a bit more careful and cut the bits, not
- * just whole bytes.
+ * XXX Maybe this should be a bit more careful and cut the bits, not just
+ * whole bytes.
*/
lena = ip_bits(ipa);
lenb = ip_bits(ipb);
/* apply the network mask to both addresses */
for (i = 0; i < len; i++)
{
- unsigned char mask;
- int nbits;
+ unsigned char mask;
+ int nbits;
nbits = lena - (i * 8);
if (nbits < 8)
regBuf = ReadBuffer(idxrel, ItemPointerGetBlockNumber(iptr));
LockBuffer(regBuf, BUFFER_LOCK_EXCLUSIVE);
regPg = BufferGetPage(regBuf);
+
/*
* We're only removing data, not reading it, so there's no need to
* TestForOldSnapshot here.
datumno < brdesc->bd_info[keyno]->oi_nstored;
datumno++)
{
- Datum value = tuple->bt_columns[keyno].bv_values[datumno];
+ Datum value = tuple->bt_columns[keyno].bv_values[datumno];
#ifdef TOAST_INDEX_HACK
/* We must look at the stored type, not at the index descriptor. */
- TypeCacheEntry *atttype = brdesc->bd_info[keyno]->oi_typcache[datumno];
+ TypeCacheEntry *atttype = brdesc->bd_info[keyno]->oi_typcache[datumno];
/* Do we need to free the value at the end? */
- bool free_value = false;
+ bool free_value = false;
/* For non-varlena types we don't need to do anything special */
if (atttype->typlen != -1)
* If value is stored EXTERNAL, must fetch it so we are not
* depending on outside storage.
*
- * XXX Is this actually true? Could it be that the summary is
- * NULL even for range with non-NULL data? E.g. degenerate bloom
- * filter may be thrown away, etc.
+ * XXX Is this actually true? Could it be that the summary is NULL
+ * even for range with non-NULL data? E.g. degenerate bloom filter
+ * may be thrown away, etc.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(value)))
{
}
/*
- * If value is above size target, and is of a compressible datatype,
- * try to compress it in-line.
+ * If value is above size target, and is of a compressible
+ * datatype, try to compress it in-line.
*/
if (!VARATT_IS_EXTENDED(DatumGetPointer(value)) &&
VARSIZE(DatumGetPointer(value)) > TOAST_INDEX_TARGET &&
(atttype->typstorage == TYPSTORAGE_EXTENDED ||
atttype->typstorage == TYPSTORAGE_MAIN))
{
- Datum cvalue;
- char compression;
+ Datum cvalue;
+ char compression;
Form_pg_attribute att = TupleDescAttr(brdesc->bd_tupdesc,
keyno);
(att->attstorage == TYPSTORAGE_EXTENDED ||
att->attstorage == TYPSTORAGE_MAIN))
{
- Datum cvalue;
- char compression = att->attcompression;
+ Datum cvalue;
+ char compression = att->attcompression;
/*
* If the compression method is not valid, use the default. We
* don't expect this to happen for regular index columns, which
- * inherit the setting from the corresponding table column, but
- * we do expect it to happen whenever an expression is indexed.
+ * inherit the setting from the corresponding table column, but we
+ * do expect it to happen whenever an expression is indexed.
*/
if (!CompressionMethodIsValid(compression))
compression = GetDefaultToastCompression();
#include "utils/builtins.h"
/* GUC */
-int default_toast_compression = TOAST_PGLZ_COMPRESSION;
+int default_toast_compression = TOAST_PGLZ_COMPRESSION;
#define NO_LZ4_SUPPORT() \
ereport(ERROR, \
*/
struct varlena *
pglz_decompress_datum_slice(const struct varlena *value,
- int32 slicelength)
+ int32 slicelength)
{
struct varlena *result;
int32 rawsize;
ToastCompressionId
toast_get_compression_id(struct varlena *attr)
{
- ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
+ ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
/*
- * If it is stored externally then fetch the compression method id from the
- * external toast pointer. If compressed inline, fetch it from the toast
- * compression header.
+ * If it is stored externally then fetch the compression method id from
+ * the external toast pointer. If compressed inline, fetch it from the
+ * toast compression header.
*/
if (VARATT_IS_EXTERNAL_ONDISK(attr))
{
{
struct varlena *tmp = NULL;
int32 valsize;
- ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
+ ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
Assert(!VARATT_IS_EXTERNAL(DatumGetPointer(value)));
Assert(!VARATT_IS_COMPRESSED(DatumGetPointer(value)));
Bitmapset *
execute_attr_map_cols(AttrMap *attrMap, Bitmapset *in_cols)
{
- Bitmapset *out_cols;
+ Bitmapset *out_cols;
int out_attnum;
/* fast path for the common trivial case */
static uint64 point_zorder_internal(float4 x, float4 y);
static uint64 part_bits32_by2(uint32 x);
static uint32 ieee_float32_to_uint32(float f);
-static int gist_bbox_zorder_cmp(Datum a, Datum b, SortSupport ssup);
+static int gist_bbox_zorder_cmp(Datum a, Datum b, SortSupport ssup);
static Datum gist_bbox_zorder_abbrev_convert(Datum original, SortSupport ssup);
-static int gist_bbox_zorder_cmp_abbrev(Datum z1, Datum z2, SortSupport ssup);
+static int gist_bbox_zorder_cmp_abbrev(Datum z1, Datum z2, SortSupport ssup);
static bool gist_bbox_zorder_abbrev_abort(int memtupcount, SortSupport ssup);
continue; /* got it */
if (i == GIST_DISTANCE_PROC || i == GIST_FETCH_PROC ||
i == GIST_COMPRESS_PROC || i == GIST_DECOMPRESS_PROC ||
- i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC)
+ i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC)
continue; /* optional methods */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
* transactions on the primary might still be invisible to a read-only
* transaction in the standby. We partly handle this problem by tracking
* the minimum xmin of visible tuples as the cut-off XID while marking a
- * page all-visible on the primary and WAL log that along with the visibility
- * map SET operation. In hot standby, we wait for (or abort) all
- * transactions that can potentially may not see one or more tuples on the
- * page. That's how index-only scans work fine in hot standby. A crucial
- * difference between index-only scans and heap scans is that the
+ * page all-visible on the primary and WAL log that along with the
+ * visibility map SET operation. In hot standby, we wait for (or abort)
+ * all transactions that can potentially may not see one or more tuples on
+ * the page. That's how index-only scans work fine in hot standby. A
+ * crucial difference between index-only scans and heap scans is that the
* index-only scan completely relies on the visibility map where as heap
* scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
* the page-level flag can be trusted in the same way, because it might
/*
- * If we're inserting frozen entry into an empty page,
- * set visibility map bits and PageAllVisible() hint.
+ * If we're inserting frozen entry into an empty page, set visibility map
+ * bits and PageAllVisible() hint.
*
- * If we're inserting frozen entry into already all_frozen page,
- * preserve this state.
+ * If we're inserting frozen entry into already all_frozen page, preserve
+ * this state.
*/
if (options & HEAP_INSERT_FROZEN)
{
if (visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer))
vmstatus = visibilitymap_get_status(relation,
- BufferGetBlockNumber(buffer), &vmbuffer);
+ BufferGetBlockNumber(buffer), &vmbuffer);
if ((starting_with_empty_page || vmstatus & VISIBILITYMAP_ALL_FROZEN))
all_frozen_set = true;
(options & HEAP_INSERT_SPECULATIVE) != 0);
/*
- * If the page is all visible, need to clear that, unless we're only
- * going to add further frozen rows to it.
+ * If the page is all visible, need to clear that, unless we're only going
+ * to add further frozen rows to it.
*
* If we're only adding already frozen rows to a page that was empty or
* marked as all visible, mark it as all-visible.
END_CRIT_SECTION();
/*
- * If we've frozen everything on the page, update the visibilitymap.
- * We're already holding pin on the vmbuffer.
+ * If we've frozen everything on the page, update the visibilitymap. We're
+ * already holding pin on the vmbuffer.
*
- * No need to update the visibilitymap if it had all_frozen bit set
- * before this insertion.
+ * No need to update the visibilitymap if it had all_frozen bit set before
+ * this insertion.
*/
if (all_frozen_set && ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0))
{
Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer));
/*
- * It's fine to use InvalidTransactionId here - this is only used
- * when HEAP_INSERT_FROZEN is specified, which intentionally
- * violates visibility rules.
+ * It's fine to use InvalidTransactionId here - this is only used when
+ * HEAP_INSERT_FROZEN is specified, which intentionally violates
+ * visibility rules.
*/
visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer,
- InvalidXLogRecPtr, vmbuffer,
- InvalidTransactionId,
- VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
+ InvalidXLogRecPtr, vmbuffer,
+ InvalidTransactionId,
+ VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
}
UnlockReleaseBuffer(buffer);
tupledata = scratchptr;
/* check that the mutually exclusive flags are not both set */
- Assert (!(all_visible_cleared && all_frozen_set));
+ Assert(!(all_visible_cleared && all_frozen_set));
xlrec->flags = 0;
if (all_visible_cleared)
xl_heap_header xlhdr;
XLogRecPtr recptr;
- /* For logical decode we need combo CIDs to properly decode the catalog */
+ /*
+ * For logical decode we need combo CIDs to properly decode the
+ * catalog
+ */
if (RelationIsAccessibleInLogicalDecoding(relation))
log_heap_new_cid(relation, &tp);
* TIDs as each other. The goal is to ignore relatively small differences
* in the total number of promising entries, so that the whole process can
* give a little weight to heapam factors (like heap block locality)
- * instead. This isn't a trade-off, really -- we have nothing to lose.
- * It would be foolish to interpret small differences in npromisingtids
+ * instead. This isn't a trade-off, really -- we have nothing to lose. It
+ * would be foolish to interpret small differences in npromisingtids
* values as anything more than noise.
*
* We tiebreak on nhtids when sorting block group subsets that have the
* same npromisingtids, but this has the same issues as npromisingtids,
- * and so nhtids is subject to the same power-of-two bucketing scheme.
- * The only reason that we don't fix nhtids in the same way here too is
- * that we'll need accurate nhtids values after the sort. We handle
- * nhtids bucketization dynamically instead (in the sort comparator).
+ * and so nhtids is subject to the same power-of-two bucketing scheme. The
+ * only reason that we don't fix nhtids in the same way here too is that
+ * we'll need accurate nhtids values after the sort. We handle nhtids
+ * bucketization dynamically instead (in the sort comparator).
*
* See bottomup_nblocksfavorable() for a full explanation of when and how
* heap locality/favorable blocks can significantly influence when and how
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
/* check that the mutually exclusive flags are not both set */
- Assert (!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
- (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
+ Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
+ (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
/*
* The visibility map may need to be fixed even if the heap page is
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
/* check that the mutually exclusive flags are not both set */
- Assert (!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
- (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
+ Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
+ (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
/*
* The visibility map may need to be fixed even if the heap page is
offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
/*
- * If a HOT tuple points to a root that we don't know
- * about, obtain root items afresh. If that still fails,
- * report it as corruption.
+ * If a HOT tuple points to a root that we don't know about,
+ * obtain root items afresh. If that still fails, report it as
+ * corruption.
*/
if (root_offsets[offnum - 1] == InvalidOffsetNumber)
{
- Page page = BufferGetPage(hscan->rs_cbuf);
+ Page page = BufferGetPage(hscan->rs_cbuf);
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
heap_get_root_tuples(page, root_offsets);
else if (!isnull[i] && TupleDescAttr(newTupDesc, i)->attlen == -1)
{
struct varlena *new_value;
- ToastCompressionId cmid;
- char cmethod;
+ ToastCompressionId cmid;
+ char cmethod;
new_value = (struct varlena *) DatumGetPointer(values[i]);
cmid = toast_get_compression_id(new_value);
/*
* another transaction might have (tried to) delete this tuple or
- * cmin/cmax was stored in a combo CID. So we need to lookup the actual
- * values externally.
+ * cmin/cmax was stored in a combo CID. So we need to lookup the
+ * actual values externally.
*/
resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
htup, buffer,
* elog inside ResolveCminCmaxDuringDecoding.
*
* XXX For the streaming case, we can track the largest combo CID
- * assigned, and error out based on this (when unable to resolve
- * combo CID below that observed maximum value).
+ * assigned, and error out based on this (when unable to resolve combo
+ * CID below that observed maximum value).
*/
if (!resolved)
return false;
* elog inside ResolveCminCmaxDuringDecoding.
*
* XXX For the streaming case, we can track the largest combo CID
- * assigned, and error out based on this (when unable to resolve
- * combo CID below that observed maximum value).
+ * assigned, and error out based on this (when unable to resolve combo
+ * CID below that observed maximum value).
*/
if (!resolved || cmax == InvalidCommandId)
return true;
}
/*
- * If the FSM knows nothing of the rel, try the last page before we
- * give up and extend. This avoids one-tuple-per-page syndrome during
+ * If the FSM knows nothing of the rel, try the last page before we give
+ * up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
/*
* We can't write WAL in recovery mode, so there's no point trying to
- * clean the page. The primary will likely issue a cleaning WAL record soon
- * anyway, so this is no particular loss.
+ * clean the page. The primary will likely issue a cleaning WAL record
+ * soon anyway, so this is no particular loss.
*/
if (RecoveryInProgress())
return;
*
* Deliberately avoid telling the stats collector about LP_DEAD items that
* remain in the table due to VACUUM bypassing index and heap vacuuming.
- * ANALYZE will consider the remaining LP_DEAD items to be dead tuples.
- * It seems like a good idea to err on the side of not vacuuming again too
+ * ANALYZE will consider the remaining LP_DEAD items to be dead tuples. It
+ * seems like a good idea to err on the side of not vacuuming again too
* soon in cases where the failsafe prevented significant amounts of heap
* vacuuming.
*/
lazy_vacuum_heap_rel(LVRelState *vacrel)
{
int tupindex;
- BlockNumber vacuumed_pages;
+ BlockNumber vacuumed_pages;
PGRUsage ru0;
Buffer vmbuffer = InvalidBuffer;
LVSavedErrInfo saved_err_info;
UnregisterSnapshot(sysscan->snapshot);
/*
- * Reset the bsysscan flag at the end of the systable scan. See
- * detailed comments in xact.c where these variables are declared.
+ * Reset the bsysscan flag at the end of the systable scan. See detailed
+ * comments in xact.c where these variables are declared.
*/
if (TransactionIdIsValid(CheckXidAlive))
bsysscan = false;
LockBuffer(buf, access);
/*
- * It doesn't matter that _bt_unlockbuf() won't get called in the
- * event of an nbtree error (e.g. a unique violation error). That
- * won't cause Valgrind false positives.
+ * It doesn't matter that _bt_unlockbuf() won't get called in the event of
+ * an nbtree error (e.g. a unique violation error). That won't cause
+ * Valgrind false positives.
*
- * The nbtree client requests are superimposed on top of the
- * bufmgr.c buffer pin client requests. In the event of an nbtree
- * error the buffer will certainly get marked as defined when the
- * backend once again acquires its first pin on the buffer. (Of
- * course, if the backend never touches the buffer again then it
- * doesn't matter that it remains non-accessible to Valgrind.)
+ * The nbtree client requests are superimposed on top of the bufmgr.c
+ * buffer pin client requests. In the event of an nbtree error the buffer
+ * will certainly get marked as defined when the backend once again
+ * acquires its first pin on the buffer. (Of course, if the backend never
+ * touches the buffer again then it doesn't matter that it remains
+ * non-accessible to Valgrind.)
*
- * Note: When an IndexTuple C pointer gets computed using an
- * ItemId read from a page while a lock was held, the C pointer
- * becomes unsafe to dereference forever as soon as the lock is
- * released. Valgrind can only detect cases where the pointer
- * gets dereferenced with no _current_ lock/pin held, though.
+ * Note: When an IndexTuple C pointer gets computed using an ItemId read
+ * from a page while a lock was held, the C pointer becomes unsafe to
+ * dereference forever as soon as the lock is released. Valgrind can only
+ * detect cases where the pointer gets dereferenced with no _current_
+ * lock/pin held, though.
*/
if (!RelationUsesLocalBuffers(rel))
VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
while (P_ISDELETED(opaque) || opaque->btpo_next != target)
{
- bool leftsibvalid = true;
+ bool leftsibvalid = true;
/*
* Before we follow the link from the page that was the left
* top parent link when deleting leafbuf because it's the last page
* we'll delete in the subtree undergoing deletion.
*/
- Buffer leafbuf;
- IndexTupleData trunctuple;
+ Buffer leafbuf;
+ IndexTupleData trunctuple;
Assert(!isleaf);
/* Log the info */
ereport(DEBUG1,
(errmsg_internal("MultiXactId wrap limit is %u, limited by database with OID %u",
- multiWrapLimit, oldest_datoid)));
+ multiWrapLimit, oldest_datoid)));
/*
* Computing the actual limits is only possible once the data directory is
if (oldestOffsetKnown)
ereport(DEBUG1,
(errmsg_internal("oldest MultiXactId member is at offset %u",
- oldestOffset)));
+ oldestOffset)));
else
ereport(LOG,
(errmsg("MultiXact member wraparound protections are disabled because oldest checkpointed MultiXact %u does not exist on disk",
ereport(DEBUG1,
(errmsg_internal("MultiXact member stop limit is now %u based on MultiXact %u",
- offsetStopLimit, oldestMultiXactId)));
+ offsetStopLimit, oldestMultiXactId)));
}
else if (prevOldestOffsetKnown)
{
xlrec->moff + xlrec->nmembers);
/*
- * Make sure nextXid is beyond any XID mentioned in the record.
- * This should be unnecessary, since any XID found here ought to have
- * other evidence in the XLOG, but let's be safe.
+ * Make sure nextXid is beyond any XID mentioned in the record. This
+ * should be unnecessary, since any XID found here ought to have other
+ * evidence in the XLOG, but let's be safe.
*/
max_xid = XLogRecGetXid(record);
for (i = 0; i < xlrec->nmembers; i++)
gxact->prepare_start_lsn = ProcLastRecPtr;
/*
- * Mark the prepared transaction as valid. As soon as xact.c marks
- * MyProc as not running our XID (which it will do immediately after
- * this function returns), others can commit/rollback the xact.
+ * Mark the prepared transaction as valid. As soon as xact.c marks MyProc
+ * as not running our XID (which it will do immediately after this
+ * function returns), others can commit/rollback the xact.
*
* NB: a side effect of this is to make a dummy ProcArray entry for the
* prepared XID. This must happen before we clear the XID from MyProc /
ExtendSUBTRANS(xid);
/*
- * Now advance the nextXid counter. This must not happen until after
- * we have successfully completed ExtendCLOG() --- if that routine fails,
- * we want the next incoming transaction to try it again. We cannot
- * assign more XIDs until there is CLOG space for them.
+ * Now advance the nextXid counter. This must not happen until after we
+ * have successfully completed ExtendCLOG() --- if that routine fails, we
+ * want the next incoming transaction to try it again. We cannot assign
+ * more XIDs until there is CLOG space for them.
*/
FullTransactionIdAdvance(&ShmemVariableCache->nextXid);
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
- * Note that readers of ProcGlobal->xids/PGPROC->xid should be careful
- * to fetch the value for each proc only once, rather than assume they can
+ * Note that readers of ProcGlobal->xids/PGPROC->xid should be careful to
+ * fetch the value for each proc only once, rather than assume they can
* read a value multiple times and get the same answer each time. Note we
* are assuming that TransactionId and int fetch/store are atomic.
*
uint32 epoch;
/*
- * It is safe to read nextXid without a lock, because this is only
- * called from the startup process or single-process mode, meaning that no
- * other process can modify it.
+ * It is safe to read nextXid without a lock, because this is only called
+ * from the startup process or single-process mode, meaning that no other
+ * process can modify it.
*/
Assert(AmStartupProcess() || !IsUnderPostmaster);
/* Log the info */
ereport(DEBUG1,
(errmsg_internal("transaction ID wrap limit is %u, limited by database with OID %u",
- xidWrapLimit, oldest_datoid)));
+ xidWrapLimit, oldest_datoid)));
/*
* If past the autovacuum force point, immediately signal an autovac
* We can't acquire XidGenLock, as this may be called with XidGenLock
* already held (or with other locks that don't allow XidGenLock to be
* nested). That's ok for our purposes though, since we already rely on
- * 32bit reads to be atomic. While nextXid is 64 bit, we only look at
- * the lower 32bit, so a skewed read doesn't hurt.
+ * 32bit reads to be atomic. While nextXid is 64 bit, we only look at the
+ * lower 32bit, so a skewed read doesn't hurt.
*
* There's no increased danger of falling outside [oldest, next] by
* accessing them without a lock. xid needs to have been created with
*/
TimestampTz currentChunkStartTime;
/* Recovery pause state */
- RecoveryPauseState recoveryPauseState;
+ RecoveryPauseState recoveryPauseState;
ConditionVariable recoveryNotPausedCV;
/*
ereport(DEBUG2,
(errmsg_internal("updated min recovery point to %X/%X on timeline %u",
- LSN_FORMAT_ARGS(minRecoveryPoint),
- newMinRecoveryPointTLI)));
+ LSN_FORMAT_ARGS(minRecoveryPoint),
+ newMinRecoveryPointTLI)));
}
}
LWLockRelease(ControlFileLock);
blocks = wal_segment_size / XLOG_BLCKSZ;
for (int i = 0; i < blocks;)
{
- int iovcnt = Min(blocks - i, lengthof(iov));
+ int iovcnt = Min(blocks - i, lengthof(iov));
off_t offset = i * XLOG_BLCKSZ;
if (pg_pwritev_with_retry(fd, iov, iovcnt, offset) < 0)
* however, unless we actually find a valid segment. That way if there is
* neither a timeline history file nor a WAL segment in the archive, and
* streaming replication is set up, we'll read the timeline history file
- * streamed from the primary when we start streaming, instead of recovering
- * with a dummy history generated here.
+ * streamed from the primary when we start streaming, instead of
+ * recovering with a dummy history generated here.
*/
if (expectedTLEs)
tles = expectedTLEs;
{
ereport(DEBUG2,
(errmsg_internal("recycled write-ahead log file \"%s\"",
- segname)));
+ segname)));
CheckpointStats.ckpt_segs_recycled++;
/* Needn't recheck that slot on future iterations */
(*endlogSegNo)++;
ereport(DEBUG2,
(errmsg_internal("removing write-ahead log file \"%s\"",
- segname)));
+ segname)));
#ifdef WIN32
RecoveryPauseState
GetRecoveryPauseState(void)
{
- RecoveryPauseState state;
+ RecoveryPauseState state;
SpinLockAcquire(&XLogCtl->info_lck);
state = XLogCtl->recoveryPauseState;
ereport(WARNING,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("promotion is not possible because of insufficient parameter settings"),
- /* Repeat the detail from above so it's easy to find in the log. */
+
+ /*
+ * Repeat the detail from above so it's easy to find
+ * in the log.
+ */
errdetail("%s = %d is a lower setting than on the primary server, where its value was %d.",
param_name,
currValue,
}
/*
- * If recovery pause is requested then set it paused. While we
- * are in the loop, user might resume and pause again so set
- * this every time.
+ * If recovery pause is requested then set it paused. While
+ * we are in the loop, user might resume and pause again so
+ * set this every time.
*/
ConfirmRecoveryPaused();
/*
- * We wait on a condition variable that will wake us as soon as
- * the pause ends, but we use a timeout so we can check the
+ * We wait on a condition variable that will wake us as soon
+ * as the pause ends, but we use a timeout so we can check the
* above conditions periodically too.
*/
ConditionVariableTimedSleep(&XLogCtl->recoveryNotPausedCV, 1000,
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("recovery aborted because of insufficient parameter settings"),
- /* Repeat the detail from above so it's easy to find in the log. */
+ /* Repeat the detail from above so it's easy to find in the log. */
errdetail("%s = %d is a lower setting than on the primary server, where its value was %d.",
param_name,
currValue,
StartupReorderBuffer();
/*
- * Startup CLOG. This must be done after ShmemVariableCache->nextXid
- * has been initialized and before we accept connections or begin WAL
- * replay.
+ * Startup CLOG. This must be done after ShmemVariableCache->nextXid has
+ * been initialized and before we accept connections or begin WAL replay.
*/
StartupCLOG();
* ourselves - the history file of the recovery target timeline covers all
* the previous timelines in the history too - a cascading standby server
* might be interested in them. Or, if you archive the WAL from this
- * server to a different archive than the primary, it'd be good for all the
- * history files to get archived there after failover, so that you can use
- * one of the old timelines as a PITR target. Timeline history files are
- * small, so it's better to copy them unnecessarily than not copy them and
- * regret later.
+ * server to a different archive than the primary, it'd be good for all
+ * the history files to get archived there after failover, so that you can
+ * use one of the old timelines as a PITR target. Timeline history files
+ * are small, so it's better to copy them unnecessarily than not copy them
+ * and regret later.
*/
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
ProcArrayInitRecovery(XidFromFullTransactionId(ShmemVariableCache->nextXid));
/*
- * Startup subtrans only. CLOG, MultiXact and commit
- * timestamp have already been started up and other SLRUs are not
- * maintained during recovery and need not be started yet.
+ * Startup subtrans only. CLOG, MultiXact and commit timestamp
+ * have already been started up and other SLRUs are not maintained
+ * during recovery and need not be started yet.
*/
StartupSUBTRANS(oldestActiveXID);
error_context_stack = &errcallback;
/*
- * ShmemVariableCache->nextXid must be beyond record's
- * xid.
+ * ShmemVariableCache->nextXid must be beyond record's xid.
*/
AdvanceNextFullTransactionIdPastXid(record->xl_xid);
WalSndWakeup();
/*
- * If this was a promotion, request an (online) checkpoint now. This
- * isn't required for consistency, but the last restartpoint might be far
- * back, and in case of a crash, recovering from it might take a longer
- * than is appropriate now that we're not in standby mode anymore.
+ * If this was a promotion, request an (online) checkpoint now. This isn't
+ * required for consistency, but the last restartpoint might be far back,
+ * and in case of a crash, recovering from it might take a longer than is
+ * appropriate now that we're not in standby mode anymore.
*/
if (promoted)
RequestCheckpoint(CHECKPOINT_FORCE);
{
if (restartpoint)
ereport(LOG,
- /* translator: the placeholders show checkpoint options */
+ /* translator: the placeholders show checkpoint options */
(errmsg("restartpoint starting:%s%s%s%s%s%s%s%s",
(flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
(flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
(flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "")));
else
ereport(LOG,
- /* translator: the placeholders show checkpoint options */
+ /* translator: the placeholders show checkpoint options */
(errmsg("checkpoint starting:%s%s%s%s%s%s%s%s",
(flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
(flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1)
ereport(DEBUG1,
(errmsg_internal("backup time %s in file \"%s\"",
- backuptime, BACKUP_LABEL_FILE)));
+ backuptime, BACKUP_LABEL_FILE)));
if (fscanf(lfp, "LABEL: %1023[^\n]\n", backuplabel) == 1)
ereport(DEBUG1,
(errmsg_internal("backup label %s in file \"%s\"",
- backuplabel, BACKUP_LABEL_FILE)));
+ backuplabel, BACKUP_LABEL_FILE)));
/*
* START TIMELINE is new as of 11. Its parsing is not mandatory, still use
ereport(DEBUG1,
(errmsg_internal("backup timeline %u in file \"%s\"",
- tli_from_file, BACKUP_LABEL_FILE)));
+ tli_from_file, BACKUP_LABEL_FILE)));
}
if (ferror(lfp) || FreeFile(lfp))
Assert(readFile != -1);
/*
- * If the current segment is being streamed from the primary, calculate how
- * much of the current page we have received already. We know the
+ * If the current segment is being streamed from the primary, calculate
+ * how much of the current page we have received already. We know the
* requested record has been received, but this is for the benefit of
* future calls, to allow quick exit at the top of this function.
*/
* and replay reaches a record that's split across two WAL segments. The
* first page is only available locally, in pg_wal, because it's already
* been recycled on the primary. The second page, however, is not present
- * in pg_wal, and we should stream it from the primary. There is a recycled
- * WAL segment present in pg_wal, with garbage contents, however. We would
- * read the first page from the local WAL segment, but when reading the
- * second page, we would read the bogus, recycled, WAL segment. If we
- * didn't catch that case here, we would never recover, because
- * ReadRecord() would retry reading the whole record from the beginning.
+ * in pg_wal, and we should stream it from the primary. There is a
+ * recycled WAL segment present in pg_wal, with garbage contents, however.
+ * We would read the first page from the local WAL segment, but when
+ * reading the second page, we would read the bogus, recycled, WAL
+ * segment. If we didn't catch that case here, we would never recover,
+ * because ReadRecord() would retry reading the whole record from the
+ * beginning.
*
* Of course, this only catches errors in the page header, which is what
* happens in the case of a recycled WAL segment. Other kinds of errors or
* Failure while streaming. Most likely, we got here
* because streaming replication was terminated, or
* promotion was triggered. But we also get here if we
- * find an invalid record in the WAL streamed from the primary,
- * in which case something is seriously wrong. There's
- * little chance that the problem will just go away, but
- * PANIC is not good for availability either, especially
- * in hot standby mode. So, we treat that the same as
- * disconnection, and retry from archive/pg_wal again. The
- * WAL in the archive should be identical to what was
- * streamed, so it's unlikely that it helps, but one can
- * hope...
+ * find an invalid record in the WAL streamed from the
+ * primary, in which case something is seriously wrong.
+ * There's little chance that the problem will just go
+ * away, but PANIC is not good for availability either,
+ * especially in hot standby mode. So, we treat that the
+ * same as disconnection, and retry from archive/pg_wal
+ * again. The WAL in the archive should be identical to
+ * what was streamed, so it's unlikely that it helps, but
+ * one can hope...
*/
/*
Datum
pg_get_wal_replay_pause_state(PG_FUNCTION_ARGS)
{
- char *statestr = NULL;
+ char *statestr = NULL;
if (!RecoveryInProgress())
ereport(ERROR,
errhint("Recovery control functions can only be executed during recovery.")));
/* get the recovery pause state */
- switch(GetRecoveryPauseState())
+ switch (GetRecoveryPauseState())
{
case RECOVERY_NOT_PAUSED:
statestr = "not paused";
for (j = batch_start; j < i; j++)
{
/*
- * The page may be uninitialized. If so, we can't set the LSN because that
- * would corrupt the page.
+ * The page may be uninitialized. If so, we can't set the LSN
+ * because that would corrupt the page.
*/
if (!PageIsNew(pages[j]))
{
FormData_pg_type am_typ;
};
-static List *Typ = NIL; /* List of struct typmap* */
+static List *Typ = NIL; /* List of struct typmap* */
static struct typmap *Ap = NULL;
static Datum values[MAXATTR]; /* current row's attribute values */
{
if (Typ != NIL)
{
- ListCell *lc;
+ ListCell *lc;
- foreach (lc, Typ)
+ foreach(lc, Typ)
{
struct typmap *app = lfirst(lc);
+
if (strncmp(NameStr(app->am_typ.typname), type, NAMEDATALEN) == 0)
{
Ap = app;
populate_typ_list();
/*
- * Calling gettype would result in infinite recursion for types missing
- * in pg_type, so just repeat the lookup.
+ * Calling gettype would result in infinite recursion for types
+ * missing in pg_type, so just repeat the lookup.
*/
- foreach (lc, Typ)
+ foreach(lc, Typ)
{
struct typmap *app = lfirst(lc);
+
if (strncmp(NameStr(app->am_typ.typname), type, NAMEDATALEN) == 0)
{
Ap = app;
{
/* We have the boot-time contents of pg_type, so use it */
struct typmap *ap = NULL;
- ListCell *lc;
+ ListCell *lc;
- foreach (lc, Typ)
+ foreach(lc, Typ)
{
ap = lfirst(lc);
if (ap->am_oid == typid)
push @{ $catalog{toasting} },
{ parent_table => $1, toast_oid => $2, toast_index_oid => $3 };
}
- elsif (/^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(.+)\)/)
+ elsif (
+ /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(.+)\)/)
{
push @{ $catalog{indexing} },
- {
+ {
is_unique => $1 ? 1 : 0,
- is_pkey => $2 ? 1 : 0,
+ is_pkey => $2 ? 1 : 0,
index_name => $3,
index_oid => $4,
index_decl => $5
};
}
- elsif (/^DECLARE_(ARRAY_)?FOREIGN_KEY(_OPT)?\(\s*\(([^)]+)\),\s*(\w+),\s*\(([^)]+)\)\)/)
+ elsif (
+ /^DECLARE_(ARRAY_)?FOREIGN_KEY(_OPT)?\(\s*\(([^)]+)\),\s*(\w+),\s*\(([^)]+)\)\)/
+ )
{
push @{ $catalog{foreign_keys} },
{
ReleaseSysCache(tuple);
/*
- * Check if ACL_SELECT is being checked and, if so, and not set already
- * as part of the result, then check if the user is a member of the
+ * Check if ACL_SELECT is being checked and, if so, and not set already as
+ * part of the result, then check if the user is a member of the
* pg_read_all_data role, which allows read access to all relations.
*/
if (mask & ACL_SELECT && !(result & ACL_SELECT) &&
result |= ACL_SELECT;
/*
- * Check if ACL_INSERT, ACL_UPDATE, or ACL_DELETE is being checked
- * and, if so, and not set already as part of the result, then check
- * if the user is a member of the pg_write_all_data role, which
- * allows INSERT/UPDATE/DELETE access to all relations (except
- * system catalogs, which requires superuser, see above).
+ * Check if ACL_INSERT, ACL_UPDATE, or ACL_DELETE is being checked and, if
+ * so, and not set already as part of the result, then check if the user
+ * is a member of the pg_write_all_data role, which allows
+ * INSERT/UPDATE/DELETE access to all relations (except system catalogs,
+ * which requires superuser, see above).
*/
if (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE) &&
- !(result & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)) &&
+ !(result & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)) &&
has_privs_of_role(roleid, ROLE_PG_WRITE_ALL_DATA))
result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE));
ReleaseSysCache(tuple);
/*
- * Check if ACL_USAGE is being checked and, if so, and not set already
- * as part of the result, then check if the user is a member of the
- * pg_read_all_data or pg_write_all_data roles, which allow usage
- * access to all schemas.
+ * Check if ACL_USAGE is being checked and, if so, and not set already as
+ * part of the result, then check if the user is a member of the
+ * pg_read_all_data or pg_write_all_data roles, which allow usage access
+ * to all schemas.
*/
if (mask & ACL_USAGE && !(result & ACL_USAGE) &&
(has_privs_of_role(roleid, ROLE_PG_READ_ALL_DATA) ||
*/
AclResult
pg_attribute_aclcheck_ext(Oid table_oid, AttrNumber attnum,
- Oid roleid, AclMode mode, bool *is_missing)
+ Oid roleid, AclMode mode, bool *is_missing)
{
if (pg_attribute_aclmask_ext(table_oid, attnum, roleid, mode,
ACLMASK_ANY, is_missing) != 0)
*/
ereport(DEBUG2,
(errmsg_internal("drop auto-cascades to %s",
- objDesc)));
+ objDesc)));
}
else if (behavior == DROP_RESTRICT)
{
close $constraints;
# Finally, rename the completed files into place.
-Catalog::RenameTempFile($bkifile, $tmpext);
-Catalog::RenameTempFile($schemafile, $tmpext);
-Catalog::RenameTempFile($fk_info_file, $tmpext);
+Catalog::RenameTempFile($bkifile, $tmpext);
+Catalog::RenameTempFile($schemafile, $tmpext);
+Catalog::RenameTempFile($fk_info_file, $tmpext);
Catalog::RenameTempFile($constraints_file, $tmpext);
exit 0;
* For expression columns, set attcompression invalid, since
* there's no table column from which to copy the value. Whenever
* we actually need to compress a value, we'll use whatever the
- * current value of default_compression_method is at that point
- * in time.
+ * current value of default_compression_method is at that point in
+ * time.
*/
to->attcompression = InvalidCompressionMethod;
if (indexInfo->ii_ParallelWorkers == 0)
ereport(DEBUG1,
(errmsg_internal("building index \"%s\" on table \"%s\" serially",
- RelationGetRelationName(indexRelation),
- RelationGetRelationName(heapRelation))));
+ RelationGetRelationName(indexRelation),
+ RelationGetRelationName(heapRelation))));
else
ereport(DEBUG1,
(errmsg_internal("building index \"%s\" on table \"%s\" with request for %d parallel workers",
- RelationGetRelationName(indexRelation),
- RelationGetRelationName(heapRelation),
- indexInfo->ii_ParallelWorkers)));
+ RelationGetRelationName(indexRelation),
+ RelationGetRelationName(heapRelation),
+ indexInfo->ii_ParallelWorkers)));
/*
* Switch to the table owner's userid, so that any index functions are run
SetRelationTableSpace(iRel, params->tablespaceOid, InvalidOid);
/*
- * Schedule unlinking of the old index storage at transaction
- * commit.
+ * Schedule unlinking of the old index storage at transaction commit.
*/
RelationDropStorage(iRel);
RelationAssumeNewRelfilenode(iRel);
*/
typedef struct
{
- const char *class_descr; /* string describing the catalog, for internal error messages */
+ const char *class_descr; /* string describing the catalog, for internal
+ * error messages */
Oid class_oid; /* oid of catalog */
Oid oid_index_oid; /* oid of index on system oid column */
int oid_catcache_id; /* id of catcache on system oid column */
char *attname = get_attname(object->objectId,
object->objectSubId,
missing_ok);
+
if (!attname)
break;
bits16 flags = FORMAT_PROC_INVALID_AS_NULL;
char *proname = format_procedure_extended(object->objectId,
flags);
+
if (proname == NULL)
break;
bits16 flags = FORMAT_TYPE_INVALID_AS_NULL;
char *typname = format_type_extended(object->objectId, -1,
flags);
+
if (typname == NULL)
break;
{
char *pubname = get_publication_name(object->objectId,
missing_ok);
+
if (pubname)
appendStringInfo(&buffer, _("publication %s"), pubname);
break;
{
char *subname = get_subscription_name(object->objectId,
missing_ok);
+
if (subname)
appendStringInfo(&buffer, _("subscription %s"), subname);
break;
bits16 flags = FORMAT_PROC_FORCE_QUALIFY | FORMAT_PROC_INVALID_AS_NULL;
char *proname = format_procedure_extended(object->objectId,
flags);
+
if (proname == NULL)
break;
bits16 flags = FORMAT_OPERATOR_FORCE_QUALIFY | FORMAT_OPERATOR_INVALID_AS_NULL;
char *oprname = format_operator_extended(object->objectId,
flags);
+
if (oprname == NULL)
break;
parent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent;
if (!OidIsValid(inhparent) || parent == inhparent)
{
- bool detach_pending;
+ bool detach_pending;
detach_pending =
((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhdetachpending;
while (HeapTupleIsValid(inheritsTuple = systable_getnext(scan)))
{
- bool detached;
+ bool detached;
detached =
((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhdetachpending;
else
{
/*
- * We can't do full prechecking of the function definition if there
- * are any polymorphic input types, because actual datatypes of
- * expression results will be unresolvable. The check will be done at
- * runtime instead.
+ * We can't do full prechecking of the function definition if
+ * there are any polymorphic input types, because actual datatypes
+ * of expression results will be unresolvable. The check will be
+ * done at runtime instead.
*
* We can run the text through the raw parser though; this will at
* least catch silly syntactic errors.
if (!haspolyarg)
{
/*
- * OK to do full precheck: analyze and rewrite the queries, then
- * verify the result type.
+ * OK to do full precheck: analyze and rewrite the queries,
+ * then verify the result type.
*/
SQLFunctionParseInfoPtr pinfo;
recordDependencyOnTablespace(Oid classId, Oid objectId, Oid tablespace)
{
ObjectAddress myself,
- referenced;
+ referenced;
ObjectAddressSet(myself, classId, objectId);
ObjectAddressSet(referenced, TableSpaceRelationId, tablespace);
get_subscription_name(subrel->srsubid, false)),
errdetail("Table synchronization for relation \"%s\" is in progress and is in state \"%c\".",
get_rel_name(relid), subrel->srsubstate),
+
/*
* translator: first %s is a SQL ALTER command and second %s is a
* SQL DROP command
table_close(class_rel, RowExclusiveLock);
/*
- * Register dependency from the toast table to the main, so that the
- * toast table will be deleted if the main is. Skip this in bootstrap
- * mode.
+ * Register dependency from the toast table to the main, so that the toast
+ * table will be deleted if the main is. Skip this in bootstrap mode.
*/
if (!IsBootstrapProcessingMode())
{
/*
* Ignore attempts to create toast tables on catalog tables after initdb.
- * Which catalogs get toast tables is explicitly chosen in
- * catalog/pg_*.h. (We could get here via some ALTER TABLE command if
- * the catalog doesn't have a toast table.)
+ * Which catalogs get toast tables is explicitly chosen in catalog/pg_*.h.
+ * (We could get here via some ALTER TABLE command if the catalog doesn't
+ * have a toast table.)
*/
if (IsCatalogRelation(rel) && !IsBootstrapProcessingMode())
return false;
*
* We assume that VACUUM hasn't set pg_class.reltuples already, even
* during a VACUUM ANALYZE. Although VACUUM often updates pg_class,
- * exceptions exist. A "VACUUM (ANALYZE, INDEX_CLEANUP OFF)" command
- * will never update pg_class entries for index relations. It's also
- * possible that an individual index's pg_class entry won't be updated
- * during VACUUM if the index AM returns NULL from its amvacuumcleanup()
- * routine.
+ * exceptions exist. A "VACUUM (ANALYZE, INDEX_CLEANUP OFF)" command will
+ * never update pg_class entries for index relations. It's also possible
+ * that an individual index's pg_class entry won't be updated during
+ * VACUUM if the index AM returns NULL from its amvacuumcleanup() routine.
*/
if (!inh)
{
else if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
/*
- * Partitioned tables don't have storage, so we don't set any fields in
- * their pg_class entries except for reltuples, which is necessary for
- * auto-analyze to work properly.
+ * Partitioned tables don't have storage, so we don't set any fields
+ * in their pg_class entries except for reltuples, which is necessary
+ * for auto-analyze to work properly.
*/
vac_update_relstats(onerel, -1, totalrows,
0, false, InvalidTransactionId,
typedef struct
{
DestReceiver pub; /* publicly-known function pointers */
- CopyToState cstate; /* CopyToStateData for the command */
+ CopyToState cstate; /* CopyToStateData for the command */
uint64 processed; /* # of tuples processed */
} DR_copy;
List *attnamelist,
List *options)
{
- CopyToState cstate;
+ CopyToState cstate;
bool pipe = (filename == NULL);
TupleDesc tupDesc;
int num_phys_attrs;
oldcontext = MemoryContextSwitchTo(cstate->copycontext);
/* Extract options from the statement node tree */
- ProcessCopyOptions(pstate, &cstate->opts, false /* is_from */, options);
+ ProcessCopyOptions(pstate, &cstate->opts, false /* is_from */ , options);
/* Process the source/target relation or query */
if (rel)
else
tupDesc = cstate->queryDesc->tupDesc;
num_phys_attrs = tupDesc->natts;
- cstate->opts.null_print_client = cstate->opts.null_print; /* default */
+ cstate->opts.null_print_client = cstate->opts.null_print; /* default */
/* We use fe_msgbuf as a per-row buffer regardless of copy_dest */
cstate->fe_msgbuf = makeStringInfo();
*/
if (cstate->need_transcoding)
cstate->opts.null_print_client = pg_server_to_any(cstate->opts.null_print,
- cstate->opts.null_print_len,
- cstate->file_encoding);
+ cstate->opts.null_print_len,
+ cstate->file_encoding);
/* if a header has been requested send the line */
if (cstate->opts.header_line)
copy_dest_receive(TupleTableSlot *slot, DestReceiver *self)
{
DR_copy *myState = (DR_copy *) self;
- CopyToState cstate = myState->cstate;
+ CopyToState cstate = myState->cstate;
/* Send the data */
CopyOneRowTo(cstate, slot);
ExplainState *es = NewExplainState();
TupOutputState *tstate;
JumbleState *jstate = NULL;
- Query *query;
+ Query *query;
List *rewritten;
ListCell *lc;
bool timing_set = false;
else if (ctas->objtype == OBJECT_MATVIEW)
ExplainDummyGroup("CREATE MATERIALIZED VIEW", NULL, es);
else
- elog(ERROR, "unexpected object type: %d",
+ elog(ERROR, "unexpected object type: %d",
(int) ctas->objtype);
return;
}
if (es->verbose && plannedstmt->queryId != UINT64CONST(0))
{
- char buf[MAXINT8LEN+1];
+ char buf[MAXINT8LEN + 1];
pg_lltoa(plannedstmt->queryId, buf);
ExplainPropertyText("Query Identifier", buf, es);
if (aggstate->hash_batches_used > 1)
{
appendStringInfo(es->str, " Disk Usage: " UINT64_FORMAT "kB",
- aggstate->hash_disk_used);
+ aggstate->hash_disk_used);
}
}
case OBJECT_SUBSCRIPTION:
case OBJECT_TABLESPACE:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cannot add an object of this type to an extension")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("cannot add an object of this type to an extension")));
break;
default:
/* OK */
ereport(DEBUG1,
(errmsg_internal("%s %s will create implicit index \"%s\" for table \"%s\"",
- is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
- constraint_type,
- indexRelationName, RelationGetRelationName(rel))));
+ is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
+ constraint_type,
+ indexRelationName, RelationGetRelationName(rel))));
}
/*
InvalidXLogRecPtr);
ereport(DEBUG1,
(errmsg_internal("table \"%s.%s\" added to subscription \"%s\"",
- rv->schemaname, rv->relname, sub->name)));
+ rv->schemaname, rv->relname, sub->name)));
}
}
ereport(DEBUG1,
(errmsg_internal("table \"%s.%s\" removed from subscription \"%s\"",
- get_namespace_name(get_rel_namespace(relid)),
- get_rel_name(relid),
- sub->name)));
+ get_namespace_name(get_rel_namespace(relid)),
+ get_rel_name(relid),
+ sub->name)));
}
}
static void ATExecSetRowSecurity(Relation rel, bool rls);
static void ATExecForceNoForceRowSecurity(Relation rel, bool force_rls);
static ObjectAddress ATExecSetCompression(AlteredTableInfo *tab, Relation rel,
- const char *column, Node *newValue, LOCKMODE lockmode);
+ const char *column, Node *newValue, LOCKMODE lockmode);
static void index_copy_data(Relation rel, RelFileNode newrnode);
static const char *storage_name(char c);
if (CompressionMethodIsValid(attribute->attcompression))
{
const char *compression =
- GetCompressionMethodName(attribute->attcompression);
+ GetCompressionMethodName(attribute->attcompression);
if (def->compression == NULL)
def->compression = pstrdup(compression);
def->location = -1;
if (CompressionMethodIsValid(attribute->attcompression))
def->compression = pstrdup(GetCompressionMethodName(
- attribute->attcompression));
+ attribute->attcompression));
else
def->compression = NULL;
inhSchema = lappend(inhSchema, def);
/* No command-specific prep needed */
pass = AT_PASS_MISC;
break;
- case AT_SetCompression: /* ALTER COLUMN SET COMPRESSION */
+ case AT_SetCompression: /* ALTER COLUMN SET COMPRESSION */
ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW);
/* This command never recurses */
/* No command-specific prep needed */
if (newrel)
ereport(DEBUG1,
(errmsg_internal("rewriting table \"%s\"",
- RelationGetRelationName(oldrel))));
+ RelationGetRelationName(oldrel))));
else
ereport(DEBUG1,
(errmsg_internal("verifying table \"%s\"",
- RelationGetRelationName(oldrel))));
+ RelationGetRelationName(oldrel))));
if (newrel)
{
{
ereport(DEBUG1,
(errmsg_internal("existing constraints on column \"%s.%s\" are sufficient to prove that it does not contain nulls",
- RelationGetRelationName(rel), NameStr(attr->attname))));
+ RelationGetRelationName(rel), NameStr(attr->attname))));
return true;
}
}
else if (IsA(stm, CreateStatsStmt))
{
- CreateStatsStmt *stmt = (CreateStatsStmt *) stm;
+ CreateStatsStmt *stmt = (CreateStatsStmt *) stm;
AlterTableCmd *newcmd;
/* keep the statistics object's comment */
if (strcmp(child_expr, parent_expr) != 0)
ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" in child table has a conflicting generation expression",
- attributeName)));
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("column \"%s\" in child table has a conflicting generation expression",
+ attributeName)));
}
/*
MarkInheritDetached(Relation child_rel, Relation parent_rel)
{
Relation catalogRelation;
- SysScanDesc scan;
+ SysScanDesc scan;
ScanKeyData key;
HeapTuple inheritsTuple;
bool found = false;
if (!IsStorageCompressible(typstorage))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("column data type %s does not support compression",
+ errmsg("column data type %s does not support compression",
format_type_be(atttableform->atttypid))));
/* get the attribute compression method. */
if (!validate_default)
ereport(DEBUG1,
(errmsg_internal("partition constraint for table \"%s\" is implied by existing constraints",
- RelationGetRelationName(scanrel))));
+ RelationGetRelationName(scanrel))));
else
ereport(DEBUG1,
(errmsg_internal("updated partition constraint for default partition \"%s\" is implied by existing constraints",
- RelationGetRelationName(scanrel))));
+ RelationGetRelationName(scanrel))));
return;
}
AccessExclusiveLock);
/*
- * Check inheritance conditions and either delete the pg_inherits row
- * (in non-concurrent mode) or just set the inhdetachpending flag.
+ * Check inheritance conditions and either delete the pg_inherits row (in
+ * non-concurrent mode) or just set the inhdetachpending flag.
*/
if (!concurrent)
RemoveInheritance(partRel, rel, false);
*/
if (concurrent)
{
- Oid partrelid,
- parentrelid;
+ Oid partrelid,
+ parentrelid;
LOCKTAG tag;
- char *parentrelname;
- char *partrelname;
+ char *parentrelname;
+ char *partrelname;
/*
* Add a new constraint to the partition being detached, which
StartTransactionCommand();
/*
- * Now wait. This ensures that all queries that were planned including
- * the partition are finished before we remove the rest of catalog
- * entries. We don't need or indeed want to acquire this lock, though
- * -- that would block later queries.
+ * Now wait. This ensures that all queries that were planned
+ * including the partition are finished before we remove the rest of
+ * catalog entries. We don't need or indeed want to acquire this
+ * lock, though -- that would block later queries.
*
* We don't need to concern ourselves with waiting for a lock on the
* partition itself, since we will acquire AccessExclusiveLock below.
static ObjectAddress
ATExecDetachPartitionFinalize(Relation rel, RangeVar *name)
{
- Relation partRel;
+ Relation partRel;
ObjectAddress address;
Snapshot snap = GetActiveSnapshot();
/* Create it if not already done. */
if (!table->storeslot)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
/*
* We only need this slot only until AfterTriggerEndQuery, but making
/* Create the multirange that goes with it */
if (multirangeTypeName)
{
- Oid old_typoid;
+ Oid old_typoid;
/*
* Look to see if multirange type already exists.
ObjectIdGetDatum(multirangeNamespace));
/*
- * If it's not a shell, see if it's an autogenerated array type, and if so
- * rename it out of the way.
+ * If it's not a shell, see if it's an autogenerated array type, and
+ * if so rename it out of the way.
*/
if (OidIsValid(old_typoid) && get_typisdefined(old_typoid))
{
mltrngaddress =
TypeCreate(multirangeOid, /* force assignment of this type OID */
multirangeTypeName, /* type name */
- multirangeNamespace, /* namespace */
+ multirangeNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
GetUserId(), /* owner's ID */
TypeCreate(multirangeArrayOid, /* force assignment of this type OID */
multirangeArrayName, /* type name */
- multirangeNamespace, /* namespace */
+ multirangeNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
GetUserId(), /* owner's ID */
PROKIND_FUNCTION,
false, /* security_definer */
false, /* leakproof */
- true, /* isStrict */
+ true, /* isStrict */
PROVOLATILE_IMMUTABLE, /* volatility */
PROPARALLEL_SAFE, /* parallel safety */
argtypes, /* parameterTypes */
PROKIND_FUNCTION,
false, /* security_definer */
false, /* leakproof */
- true, /* isStrict */
+ true, /* isStrict */
PROVOLATILE_IMMUTABLE, /* volatility */
PROPARALLEL_SAFE, /* parallel safety */
argtypes, /* parameterTypes */
/*
* Similar to above, determine the index skipping age to use for
- * multixact. In any case no less than autovacuum_multixact_freeze_max_age
- * * 1.05.
+ * multixact. In any case no less than autovacuum_multixact_freeze_max_age *
+ * 1.05.
*/
skip_index_vacuum = Max(vacuum_multixact_failsafe_age,
autovacuum_multixact_freeze_max_age * 1.05);
{
case T_IndexScan:
case T_IndexOnlyScan:
+
/*
* Not all index types support mark/restore.
*/
ExecAsyncRequest(AsyncRequest *areq)
{
if (areq->requestee->chgParam != NULL) /* something changed? */
- ExecReScan(areq->requestee); /* let ReScan handle this */
+ ExecReScan(areq->requestee); /* let ReScan handle this */
/* must provide our own instrumentation support */
if (areq->requestee->instrument)
default:
/* If the node doesn't support async, caller messed up. */
elog(ERROR, "unrecognized node type: %d",
- (int) nodeTag(areq->requestor));
+ (int) nodeTag(areq->requestor));
}
}
/*
* In some cases (e.g. an EXECUTE statement) a query execution will skip
* parse analysis, which means that the query_id won't be reported. Note
- * that it's harmless to report the query_id multiple time, as the call will
- * be ignored if the top level query_id has already been reported.
+ * that it's harmless to report the query_id multiple time, as the call
+ * will be ignored if the top level query_id has already been reported.
*/
pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
/*
- * Determine if the FDW supports batch insert and determine the batch
- * size (a FDW may support batching, but it may be disabled for the
+ * Determine if the FDW supports batch insert and determine the batch size
+ * (a FDW may support batching, but it may be disabled for the
* server/table or for this particular query).
*
* If the FDW does not support batching, we set the batch size to 1.
int64 *ntuples; /* number of tuples in each partition */
uint32 mask; /* mask to find partition from hash value */
int shift; /* after masking, shift by this amount */
- hyperLogLogState *hll_card; /* cardinality estimate for contents */
+ hyperLogLogState *hll_card; /* cardinality estimate for contents */
} HashAggSpill;
/*
/* used to find referenced colnos */
typedef struct FindColsContext
{
- bool is_aggref; /* is under an aggref */
- Bitmapset *aggregated; /* column references under an aggref */
- Bitmapset *unaggregated; /* other column references */
+ bool is_aggref; /* is under an aggref */
+ Bitmapset *aggregated; /* column references under an aggref */
+ Bitmapset *unaggregated; /* other column references */
} FindColsContext;
static void select_current_set(AggState *aggstate, int setno, bool is_hash);
static void
find_cols(AggState *aggstate, Bitmapset **aggregated, Bitmapset **unaggregated)
{
- Agg *agg = (Agg *) aggstate->ss.ps.plan;
+ Agg *agg = (Agg *) aggstate->ss.ps.plan;
FindColsContext context;
context.is_aggref = false;
for (int i = 0; i < scanDesc->natts; i++)
{
- int colno = i + 1;
+ int colno = i + 1;
+
if (bms_is_member(colno, aggstate->colnos_needed))
aggstate->max_colno_needed = colno;
else
for (i = 0; i < spill->npartitions; i++)
{
- LogicalTapeSet *tapeset = aggstate->hash_tapeinfo->tapeset;
- int tapenum = spill->partitions[i];
- HashAggBatch *new_batch;
- double cardinality;
+ LogicalTapeSet *tapeset = aggstate->hash_tapeinfo->tapeset;
+ int tapenum = spill->partitions[i];
+ HashAggBatch *new_batch;
+ double cardinality;
/* if the partition is empty, don't create a new batch of work */
if (spill->ntuples[i] == 0)
/*
* If first call then have the bms member function choose the first valid
- * sync subplan by initializing whichplan to -1. If there happen to be
- * no valid sync subplans then the bms member function will handle that
- * by returning a negative number which will allow us to exit returning a
+ * sync subplan by initializing whichplan to -1. If there happen to be no
+ * valid sync subplans then the bms member function will handle that by
+ * returning a negative number which will allow us to exit returning a
* false value.
*/
if (whichplan == INVALID_SUBPLAN_INDEX)
/*
* If all sync subplans are complete, we're totally done scanning the
- * given node. Otherwise, we're done with the asynchronous stuff but
- * must continue scanning the sync subplans.
+ * given node. Otherwise, we're done with the asynchronous stuff but must
+ * continue scanning the sync subplans.
*/
if (node->as_syncdone)
{
{
int nevents = node->as_nasyncplans + 1;
long timeout = node->as_syncdone ? -1 : 0;
- WaitEvent occurred_event[EVENT_BUFFER_SIZE];
+ WaitEvent occurred_event[EVENT_BUFFER_SIZE];
int noccurred;
int i;
/*
* Mark it as no longer needing a callback. We must do this
- * before dispatching the callback in case the callback resets
- * the flag.
+ * before dispatching the callback in case the callback resets the
+ * flag.
*/
Assert(areq->callback_pending);
areq->callback_pending = false;
PlanState *outerPlan = outerPlanState(gatherstate);
TupleTableSlot *outerTupleSlot;
TupleTableSlot *fslot = gatherstate->funnel_slot;
- MinimalTuple tup;
+ MinimalTuple tup;
while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
{
if (HeapTupleIsValid(tup))
{
- ExecStoreMinimalTuple(tup, /* tuple to store */
+ ExecStoreMinimalTuple(tup, /* tuple to store */
fslot, /* slot to store the tuple */
false); /* don't pfree tuple */
return fslot;
Assert(tup);
/* Build the TupleTableSlot for the given tuple */
- ExecStoreMinimalTuple(tup, /* tuple to store */
- gm_state->gm_slots[reader], /* slot in which to store
- * the tuple */
+ ExecStoreMinimalTuple(tup, /* tuple to store */
+ gm_state->gm_slots[reader], /* slot in which to
+ * store the tuple */
true); /* pfree tuple when done with it */
return true;
}
/*
- * If chgParam of subnode is not null, then the plan will be re-scanned
- * by the first ExecProcNode.
+ * If chgParam of subnode is not null, then the plan will be re-scanned by
+ * the first ExecProcNode.
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan);
} MTTargetRelLookup;
static void ExecBatchInsert(ModifyTableState *mtstate,
- ResultRelInfo *resultRelInfo,
- TupleTableSlot **slots,
- TupleTableSlot **planSlots,
- int numSlots,
- EState *estate,
- bool canSetTag);
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int numSlots,
+ EState *estate,
+ bool canSetTag);
static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
ResultRelInfo *resultRelInfo,
ItemPointer conflictTid,
if (resultRelInfo->ri_BatchSize > 1)
{
/*
- * If a certain number of tuples have already been accumulated,
- * or a tuple has come for a different relation than that for
- * the accumulated tuples, perform the batch insert
+ * If a certain number of tuples have already been accumulated, or
+ * a tuple has come for a different relation than that for the
+ * accumulated tuples, perform the batch insert
*/
if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
{
ExecBatchInsert(mtstate, resultRelInfo,
- resultRelInfo->ri_Slots,
- resultRelInfo->ri_PlanSlots,
- resultRelInfo->ri_NumSlots,
- estate, canSetTag);
+ resultRelInfo->ri_Slots,
+ resultRelInfo->ri_PlanSlots,
+ resultRelInfo->ri_NumSlots,
+ estate, canSetTag);
resultRelInfo->ri_NumSlots = 0;
}
if (resultRelInfo->ri_Slots == NULL)
{
resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
- resultRelInfo->ri_BatchSize);
+ resultRelInfo->ri_BatchSize);
resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
- resultRelInfo->ri_BatchSize);
+ resultRelInfo->ri_BatchSize);
}
resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
*/
static void
ExecBatchInsert(ModifyTableState *mtstate,
- ResultRelInfo *resultRelInfo,
- TupleTableSlot **slots,
- TupleTableSlot **planSlots,
- int numSlots,
- EState *estate,
- bool canSetTag)
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int numSlots,
+ EState *estate,
+ bool canSetTag)
{
int i;
int numInserted = numSlots;
* insert into foreign table: let the FDW do it
*/
rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
- resultRelInfo,
- slots,
- planSlots,
- &numInserted);
+ resultRelInfo,
+ slots,
+ planSlots,
+ &numInserted);
for (i = 0; i < numInserted; i++)
{
resultRelInfo = lfirst(lc);
if (resultRelInfo->ri_NumSlots > 0)
ExecBatchInsert(node, resultRelInfo,
- resultRelInfo->ri_Slots,
- resultRelInfo->ri_PlanSlots,
- resultRelInfo->ri_NumSlots,
- estate, node->canSetTag);
+ resultRelInfo->ri_Slots,
+ resultRelInfo->ri_PlanSlots,
+ resultRelInfo->ri_NumSlots,
+ estate, node->canSetTag);
}
/*
mtstate->mt_resultOidHash = NULL;
/*
- * Determine if the FDW supports batch insert and determine the batch
- * size (a FDW may support batching, but it may be disabled for the
+ * Determine if the FDW supports batch insert and determine the batch size
+ * (a FDW may support batching, but it may be disabled for the
* server/table).
*
- * We only do this for INSERT, so that for UPDATE/DELETE the batch
- * size remains set to 0.
+ * We only do this for INSERT, so that for UPDATE/DELETE the batch size
+ * remains set to 0.
*/
if (operation == CMD_INSERT)
{
ereport(DEBUG1,
(errmsg_internal("time to inline: %.3fs, opt: %.3fs, emit: %.3fs",
- INSTR_TIME_GET_DOUBLE(context->base.instr.inlining_counter),
- INSTR_TIME_GET_DOUBLE(context->base.instr.optimization_counter),
- INSTR_TIME_GET_DOUBLE(context->base.instr.emission_counter)),
+ INSTR_TIME_GET_DOUBLE(context->base.instr.inlining_counter),
+ INSTR_TIME_GET_DOUBLE(context->base.instr.optimization_counter),
+ INSTR_TIME_GET_DOUBLE(context->base.instr.emission_counter)),
errhidestmt(true),
errhidecontext(true)));
}
static LLVMErrorRef
llvm_resolve_symbols(LLVMOrcDefinitionGeneratorRef GeneratorObj, void *Ctx,
- LLVMOrcLookupStateRef *LookupState, LLVMOrcLookupKind Kind,
+ LLVMOrcLookupStateRef * LookupState, LLVMOrcLookupKind Kind,
LLVMOrcJITDylibRef JD, LLVMOrcJITDylibLookupFlags JDLookupFlags,
LLVMOrcCLookupSet LookupSet, size_t LookupSetSize)
{
if (mtype != 'p')
{
/*
- * If the client just disconnects without offering a password,
- * don't make a log entry. This is legal per protocol spec and in
- * fact commonly done by psql, so complaining just clutters the
- * log.
+ * If the client just disconnects without offering a password, don't
+ * make a log entry. This is legal per protocol spec and in fact
+ * commonly done by psql, so complaining just clutters the log.
*/
if (mtype != EOF)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("expected password response, got message type %d",
mtype)));
- return NULL; /* EOF or bad message type */
+ return NULL; /* EOF or bad message type */
}
initStringInfo(&buf);
port->peer_cn = NULL;
return -1;
}
+
/*
* RFC2253 is the closest thing to an accepted standard format for
* DNs. We have documented how to produce this format from a
void
pq_init(void)
{
- int socket_pos PG_USED_FOR_ASSERTS_ONLY;
- int latch_pos PG_USED_FOR_ASSERTS_ONLY;
+ int socket_pos PG_USED_FOR_ASSERTS_ONLY;
+ int latch_pos PG_USED_FOR_ASSERTS_ONLY;
/* initialize state variables */
PqSendBufferSize = PQ_SEND_BUFFER_SIZE;
/*
- * All gather merge paths should have already guaranteed the necessary sort
- * order either by adding an explicit sort node or by using presorted input.
- * We can't simply add a sort here on additional pathkeys, because we can't
- * guarantee the sort would be safe. For example, expressions may be
- * volatile or otherwise parallel unsafe.
+ * All gather merge paths should have already guaranteed the necessary
+ * sort order either by adding an explicit sort node or by using presorted
+ * input. We can't simply add a sort here on additional pathkeys, because
+ * we can't guarantee the sort would be safe. For example, expressions may
+ * be volatile or otherwise parallel unsafe.
*/
if (!pathkeys_contained_in(pathkeys, best_path->subpath->pathkeys))
elog(ERROR, "gather merge input not sufficiently sorted");
Oid attcollation = att_tup->attcollation;
Node *new_expr;
- if (!att_tup->attisdropped)
- {
- new_expr = (Node *) makeConst(atttype,
- -1,
- attcollation,
- att_tup->attlen,
- (Datum) 0,
- true, /* isnull */
- att_tup->attbyval);
- new_expr = coerce_to_domain(new_expr,
- InvalidOid, -1,
- atttype,
- COERCION_IMPLICIT,
- COERCE_IMPLICIT_CAST,
- -1,
- false);
- }
- else
- {
- /* Insert NULL for dropped column */
- new_expr = (Node *) makeConst(INT4OID,
- -1,
- InvalidOid,
- sizeof(int32),
- (Datum) 0,
- true, /* isnull */
- true /* byval */ );
- }
+ if (!att_tup->attisdropped)
+ {
+ new_expr = (Node *) makeConst(atttype,
+ -1,
+ attcollation,
+ att_tup->attlen,
+ (Datum) 0,
+ true, /* isnull */
+ att_tup->attbyval);
+ new_expr = coerce_to_domain(new_expr,
+ InvalidOid, -1,
+ atttype,
+ COERCION_IMPLICIT,
+ COERCE_IMPLICIT_CAST,
+ -1,
+ false);
+ }
+ else
+ {
+ /* Insert NULL for dropped column */
+ new_expr = (Node *) makeConst(INT4OID,
+ -1,
+ InvalidOid,
+ sizeof(int32),
+ (Datum) 0,
+ true, /* isnull */
+ true /* byval */ );
+ }
new_tle = makeTargetEntry((Expr *) new_expr,
attrno,
}
case T_NullIfExpr:
{
- NullIfExpr *expr;
- ListCell *arg;
- bool has_nonconst_input = false;
+ NullIfExpr *expr;
+ ListCell *arg;
+ bool has_nonconst_input = false;
/* Copy the node and const-simplify its arguments */
expr = (NullIfExpr *) ece_generic_processing(node);
}
else
{
- /*
- * Set up to handle parameters while parsing the function body. We need a
- * dummy FuncExpr node containing the already-simplified arguments to pass
- * to prepare_sql_fn_parse_info. (In some cases we don't really need
- * that, but for simplicity we always build it.)
- */
- fexpr = makeNode(FuncExpr);
- fexpr->funcid = funcid;
- fexpr->funcresulttype = result_type;
- fexpr->funcretset = false;
- fexpr->funcvariadic = funcvariadic;
- fexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
- fexpr->funccollid = result_collid; /* doesn't matter */
- fexpr->inputcollid = input_collid;
- fexpr->args = args;
- fexpr->location = -1;
-
- pinfo = prepare_sql_fn_parse_info(func_tuple,
- (Node *) fexpr,
- input_collid);
-
- /* fexpr also provides a convenient way to resolve a composite result */
- (void) get_expr_result_type((Node *) fexpr,
- NULL,
- &rettupdesc);
+ /*
+ * Set up to handle parameters while parsing the function body. We
+ * need a dummy FuncExpr node containing the already-simplified
+ * arguments to pass to prepare_sql_fn_parse_info. (In some cases we
+ * don't really need that, but for simplicity we always build it.)
+ */
+ fexpr = makeNode(FuncExpr);
+ fexpr->funcid = funcid;
+ fexpr->funcresulttype = result_type;
+ fexpr->funcretset = false;
+ fexpr->funcvariadic = funcvariadic;
+ fexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
+ fexpr->funccollid = result_collid; /* doesn't matter */
+ fexpr->inputcollid = input_collid;
+ fexpr->args = args;
+ fexpr->location = -1;
+
+ pinfo = prepare_sql_fn_parse_info(func_tuple,
+ (Node *) fexpr,
+ input_collid);
+
+ /* fexpr also provides a convenient way to resolve a composite result */
+ (void) get_expr_result_type((Node *) fexpr,
+ NULL,
+ &rettupdesc);
- /*
- * We just do parsing and parse analysis, not rewriting, because rewriting
- * will not affect table-free-SELECT-only queries, which is all that we
- * care about. Also, we can punt as soon as we detect more than one
- * command in the function body.
- */
- raw_parsetree_list = pg_parse_query(src);
- if (list_length(raw_parsetree_list) != 1)
- goto fail;
+ /*
+ * We just do parsing and parse analysis, not rewriting, because
+ * rewriting will not affect table-free-SELECT-only queries, which is
+ * all that we care about. Also, we can punt as soon as we detect
+ * more than one command in the function body.
+ */
+ raw_parsetree_list = pg_parse_query(src);
+ if (list_length(raw_parsetree_list) != 1)
+ goto fail;
- pstate = make_parsestate(NULL);
- pstate->p_sourcetext = src;
- sql_fn_parser_setup(pstate, pinfo);
+ pstate = make_parsestate(NULL);
+ pstate->p_sourcetext = src;
+ sql_fn_parser_setup(pstate, pinfo);
- querytree = transformTopLevelStmt(pstate, linitial(raw_parsetree_list));
+ querytree = transformTopLevelStmt(pstate, linitial(raw_parsetree_list));
- free_parsestate(pstate);
+ free_parsestate(pstate);
}
/*
}
else
{
- /*
- * Set up to handle parameters while parsing the function body. We can
- * use the FuncExpr just created as the input for
- * prepare_sql_fn_parse_info.
- */
- pinfo = prepare_sql_fn_parse_info(func_tuple,
- (Node *) fexpr,
- fexpr->inputcollid);
+ /*
+ * Set up to handle parameters while parsing the function body. We
+ * can use the FuncExpr just created as the input for
+ * prepare_sql_fn_parse_info.
+ */
+ pinfo = prepare_sql_fn_parse_info(func_tuple,
+ (Node *) fexpr,
+ fexpr->inputcollid);
- /*
- * Parse, analyze, and rewrite (unlike inline_function(), we can't skip
- * rewriting here). We can fail as soon as we find more than one query,
- * though.
- */
- raw_parsetree_list = pg_parse_query(src);
- if (list_length(raw_parsetree_list) != 1)
- goto fail;
+ /*
+ * Parse, analyze, and rewrite (unlike inline_function(), we can't
+ * skip rewriting here). We can fail as soon as we find more than one
+ * query, though.
+ */
+ raw_parsetree_list = pg_parse_query(src);
+ if (list_length(raw_parsetree_list) != 1)
+ goto fail;
- querytree_list = pg_analyze_and_rewrite_params(linitial(raw_parsetree_list),
- src,
- (ParserSetupHook) sql_fn_parser_setup,
- pinfo, NULL);
- if (list_length(querytree_list) != 1)
- goto fail;
- querytree = linitial(querytree_list);
+ querytree_list = pg_analyze_and_rewrite_params(linitial(raw_parsetree_list),
+ src,
+ (ParserSetupHook) sql_fn_parser_setup,
+ pinfo, NULL);
+ if (list_length(querytree_list) != 1)
+ goto fail;
+ querytree = linitial(querytree_list);
}
/*
(stmt->options & CURSOR_OPT_NO_SCROLL))
ereport(ERROR,
(errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
- /* translator: %s is a SQL keyword */
+ /* translator: %s is a SQL keyword */
errmsg("cannot specify both %s and %s",
"SCROLL", "NO SCROLL")));
(stmt->options & CURSOR_OPT_INSENSITIVE))
ereport(ERROR,
(errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
- /* translator: %s is a SQL keyword */
+ /* translator: %s is a SQL keyword */
errmsg("cannot specify both %s and %s",
"ASENSITIVE", "INSENSITIVE")));
static int
cmp_list_len_contents_asc(const ListCell *a, const ListCell *b)
{
- int res = cmp_list_len_asc(a, b);
+ int res = cmp_list_len_asc(a, b);
if (res == 0)
{
- List *la = (List *) lfirst(a);
- List *lb = (List *) lfirst(b);
- ListCell *lca;
- ListCell *lcb;
+ List *la = (List *) lfirst(a);
+ List *lb = (List *) lfirst(b);
+ ListCell *lca;
+ ListCell *lcb;
forboth(lca, la, lcb, lb)
{
- int va = lfirst_int(lca);
- int vb = lfirst_int(lcb);
+ int va = lfirst_int(lca);
+ int vb = lfirst_int(lcb);
+
if (va > vb)
return 1;
if (va < vb)
* than just being recursive. It basically means the query expression
* looks like
*
- * non-recursive query UNION [ALL] recursive query
+ * non-recursive query UNION [ALL] recursive query
*
* and that the recursive query is not itself a set operation.
*
* list --- caller must do that if appropriate.
*/
psi = buildNSItemFromLists(rte, list_length(pstate->p_rtable),
- rte->coltypes, rte->coltypmods,
- rte->colcollations);
+ rte->coltypes, rte->coltypmods,
+ rte->colcollations);
/*
* The columns added by search and cycle clauses are not included in star
ereport(DEBUG1,
(errmsg_internal("%s will create implicit sequence \"%s\" for serial column \"%s.%s\"",
- cxt->stmtType, sname,
- cxt->relation->relname, column->colname)));
+ cxt->stmtType, sname,
+ cxt->relation->relname, column->colname)));
/*
* Build a CREATE SEQUENCE command to create the sequence object, and add
{
int prev_modulus;
- /* We found the largest modulus less than or equal to ours. */
+ /*
+ * We found the largest modulus less than or equal to
+ * ours.
+ */
prev_modulus = DatumGetInt32(boundinfo->datums[offset][0]);
if (spec->modulus % prev_modulus != 0)
{
ereport(DEBUG1,
(errmsg_internal("updated partition constraint for default partition \"%s\" is implied by existing constraints",
- RelationGetRelationName(default_rel))));
+ RelationGetRelationName(default_rel))));
return;
}
{
ereport(DEBUG1,
(errmsg_internal("updated partition constraint for default partition \"%s\" is implied by existing constraints",
- RelationGetRelationName(part_rel))));
+ RelationGetRelationName(part_rel))));
table_close(part_rel, NoLock);
continue;
* descriptor, it contains an old partition descriptor that may still be
* referenced somewhere. Preserve it, while not leaking it, by
* reattaching it as a child context of the new one. Eventually it will
- * get dropped by either RelationClose or RelationClearRelation.
- * (We keep the regular partdesc in rd_pdcxt, and the partdesc-excluding-
+ * get dropped by either RelationClose or RelationClearRelation. (We keep
+ * the regular partdesc in rd_pdcxt, and the partdesc-excluding-
* detached-partitions in rd_pddcxt.)
*/
if (is_omit)
{
ereport(elevel,
(errmsg("could not enable user right \"%s\": error code %lu",
- /* translator: This is a term from Windows and should be translated to match the Windows localization. */
+
+ /*
+ * translator: This is a term from Windows and should be translated to
+ * match the Windows localization.
+ */
_("Lock pages in memory"),
GetLastError()),
errdetail("Failed system call was %s.", "OpenProcessToken")));
/* Log it! */
ereport(DEBUG1,
(errmsg_internal("registering background worker \"%s\"",
- rw->rw_worker.bgw_name)));
+ rw->rw_worker.bgw_name)));
slist_push_head(&BackgroundWorkerList, &rw->rw_lnode);
}
ereport(DEBUG1,
(errmsg_internal("unregistering background worker \"%s\"",
- rw->rw_worker.bgw_name)));
+ rw->rw_worker.bgw_name)));
slist_delete_current(cur);
free(rw);
}
ereport(DEBUG1,
(errmsg_internal("compacted fsync request queue from %d entries to %d entries",
- CheckpointerShmem->num_requests, preserve_count)));
+ CheckpointerShmem->num_requests, preserve_count)));
CheckpointerShmem->num_requests = preserve_count;
/* Cleanup. */
{
fputc('R', fpout);
rc = fwrite(slotent, sizeof(PgStat_StatReplSlotEntry), 1, fpout);
- (void) rc; /* we'll check for error with ferror */
+ (void) rc; /* we'll check for error with ferror */
}
}
pqsignal_pm(SIGCHLD, reaper); /* handle child termination */
#ifdef SIGURG
+
/*
* Ignore SIGURG for now. Child processes may change this (see
* InitializeLatchSupport), but they will not receive any such signals
ereport(DEBUG1,
(errmsg_internal("starting background worker process \"%s\"",
- rw->rw_worker.bgw_name)));
+ rw->rw_worker.bgw_name)));
#ifdef EXEC_BACKEND
switch ((worker_pid = bgworker_forkexec(rw->rw_shmem_slot)))
* broken backends...
*/
- pqsignal(SIGHUP, SignalHandlerForConfigReload); /* set flag to read config file */
+ pqsignal(SIGHUP, SignalHandlerForConfigReload); /* set flag to read config
+ * file */
pqsignal(SIGINT, SIG_IGN);
pqsignal(SIGTERM, SIG_IGN);
pqsignal(SIGQUIT, SIG_IGN);
if (ti->path == NULL)
{
struct stat statbuf;
- bool sendtblspclinks = true;
+ bool sendtblspclinks = true;
/* In the main tar, include the backup_label first... */
sendFileWithContent(BACKUP_LABEL_FILE, labelfile->data,
LogicalRepCtxStruct *LogicalRepCtx;
-typedef struct LogicalRepWorkerId
-{
- Oid subid;
- Oid relid;
-} LogicalRepWorkerId;
-
static void ApplyLauncherWakeup(void);
static void logicalrep_launcher_onexit(int code, Datum arg);
static void logicalrep_worker_onexit(int code, Datum arg);
ereport(DEBUG1,
(errmsg_internal("starting logical replication worker for subscription \"%s\"",
- subname)));
+ subname)));
/* Report this after the initial starting message for consistency. */
if (max_replication_slots == 0)
tmppath)));
/*
- * no other backend can perform this at the same time; only one
- * checkpoint can happen at a time.
+ * no other backend can perform this at the same time; only one checkpoint
+ * can happen at a time.
*/
tmpfd = OpenTransientFile(tmppath,
O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
* need to do the cleanup and return gracefully on this error, see
* SetupCheckXidLive.
*
- * This error code can be thrown by one of the callbacks we call during
- * decoding so we need to ensure that we return gracefully only when we are
- * sending the data in streaming mode and the streaming is not finished yet
- * or when we are sending the data out on a PREPARE during a two-phase
- * commit.
+ * This error code can be thrown by one of the callbacks we call
+ * during decoding so we need to ensure that we return gracefully only
+ * when we are sending the data in streaming mode and the streaming is
+ * not finished yet or when we are sending the data out on a PREPARE
+ * during a two-phase commit.
*/
if (errdata->sqlerrcode == ERRCODE_TRANSACTION_ROLLBACK &&
(stream_started || rbtxn_prepared(txn)))
/*
* All transactions we needed to finish finished - try to ensure there is
* another xl_running_xacts record in a timely manner, without having to
- * wait for bgwriter or checkpointer to log one. During recovery we
- * can't enforce that, so we'll have to wait.
+ * wait for bgwriter or checkpointer to log one. During recovery we can't
+ * enforce that, so we'll have to wait.
*/
if (!RecoveryInProgress())
{
int max_replication_slots = 0; /* the maximum number of replication
* slots */
-static int ReplicationSlotAcquireInternal(ReplicationSlot *slot,
- const char *name, SlotAcquireBehavior behavior);
+static int ReplicationSlotAcquireInternal(ReplicationSlot *slot,
+ const char *name, SlotAcquireBehavior behavior);
static void ReplicationSlotDropAcquired(void);
static void ReplicationSlotDropPtr(ReplicationSlot *slot);
/*
* If we found the slot but it's already active in another process, we
- * either error out, return the PID of the owning process, or retry
- * after a short wait, as caller specified.
+ * either error out, return the PID of the owning process, or retry after
+ * a short wait, as caller specified.
*/
if (active_pid != MyProcPid)
{
goto retry;
}
else if (behavior == SAB_Block)
- ConditionVariableCancelSleep(); /* no sleep needed after all */
+ ConditionVariableCancelSleep(); /* no sleep needed after all */
/* Let everybody know we've modified this slot */
ConditionVariableBroadcast(&s->active_cv);
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
XLogRecPtr restart_lsn = InvalidXLogRecPtr;
NameData slotname;
- int wspid;
- int last_signaled_pid = 0;
+ int wspid;
+ int last_signaled_pid = 0;
if (!s->in_use)
continue;
/*
* Try to mark this slot as used by this process.
*
- * Note that ReplicationSlotAcquireInternal(SAB_Inquire)
- * should not cancel the prepared condition variable
- * if this slot is active in other process. Because in this case
- * we have to wait on that CV for the process owning
- * the slot to be terminated, later.
+ * Note that ReplicationSlotAcquireInternal(SAB_Inquire) should
+ * not cancel the prepared condition variable if this slot is
+ * active in other process. Because in this case we have to wait
+ * on that CV for the process owning the slot to be terminated,
+ * later.
*/
wspid = ReplicationSlotAcquireInternal(s, NULL, SAB_Inquire);
/*
- * Exit the loop if we successfully acquired the slot or
- * the slot was dropped during waiting for the owning process
- * to be terminated. For example, the latter case is likely to
- * happen when the slot is temporary because it's automatically
- * dropped by the termination of the owning process.
+ * Exit the loop if we successfully acquired the slot or the slot
+ * was dropped during waiting for the owning process to be
+ * terminated. For example, the latter case is likely to happen
+ * when the slot is temporary because it's automatically dropped
+ * by the termination of the owning process.
*/
if (wspid <= 0)
break;
/*
* Signal to terminate the process that owns the slot.
*
- * There is the race condition where other process may own
- * the slot after the process using it was terminated and before
- * this process owns it. To handle this case, we signal again
- * if the PID of the owning process is changed than the last.
+ * There is the race condition where other process may own the
+ * slot after the process using it was terminated and before this
+ * process owns it. To handle this case, we signal again if the
+ * PID of the owning process is changed than the last.
*
- * XXX This logic assumes that the same PID is not reused
- * very quickly.
+ * XXX This logic assumes that the same PID is not reused very
+ * quickly.
*/
if (last_signaled_pid != wspid)
{
ConditionVariableCancelSleep();
/*
- * Do nothing here and start from scratch if the slot has
- * already been dropped.
+ * Do nothing here and start from scratch if the slot has already been
+ * dropped.
*/
if (wspid == -1)
goto restart;
nulls[i++] = true;
else
{
- XLogSegNo targetSeg;
- uint64 slotKeepSegs;
- uint64 keepSegs;
- XLogSegNo failSeg;
- XLogRecPtr failLSN;
+ XLogSegNo targetSeg;
+ uint64 slotKeepSegs;
+ uint64 keepSegs;
+ XLogSegNo failSeg;
+ XLogRecPtr failLSN;
XLByteToSeg(slot_contents.data.restart_lsn, targetSeg, wal_segment_size);
* Since this routine gets called every commit time, it's important to
* exit quickly if sync replication is not requested. So we check
* WalSndCtl->sync_standbys_defined flag without the lock and exit
- * immediately if it's false. If it's true, we need to check it again later
- * while holding the lock, to check the flag and operate the sync rep
- * queue atomically. This is necessary to avoid the race condition
- * described in SyncRepUpdateSyncStandbysDefined(). On the other
- * hand, if it's false, the lock is not necessary because we don't touch
- * the queue.
+ * immediately if it's false. If it's true, we need to check it again
+ * later while holding the lock, to check the flag and operate the sync
+ * rep queue atomically. This is necessary to avoid the race condition
+ * described in SyncRepUpdateSyncStandbysDefined(). On the other hand, if
+ * it's false, the lock is not necessary because we don't touch the queue.
*/
if (!SyncRepRequested() ||
!((volatile WalSndCtlData *) WalSndCtl)->sync_standbys_defined)
ereport(DEBUG1,
(errmsg_internal("standby \"%s\" now has synchronous standby priority %u",
- application_name, priority)));
+ application_name, priority)));
}
}
writeTimeLineHistoryFile(tli, content, len);
/*
- * Mark the streamed history file as ready for archiving
- * if archive_mode is always.
+ * Mark the streamed history file as ready for archiving if
+ * archive_mode is always.
*/
if (XLogArchiveMode != ARCHIVE_MODE_ALWAYS)
XLogArchiveForceDone(fname);
{
ereport(DEBUG1,
(errmsg_internal("\"%s\" has now caught up with upstream server",
- application_name)));
+ application_name)));
WalSndSetState(WALSNDSTATE_STREAMING);
}
static void
WalSndWait(uint32 socket_events, long timeout, uint32 wait_event)
{
- WaitEvent event;
+ WaitEvent event;
ModifyWaitEvent(FeBeWaitSet, FeBeWaitSetSocketPos, socket_events, NULL);
if (WaitEventSetWait(FeBeWaitSet, timeout, &event, 1, wait_event) == 1 &&
mss = multi_sort_init(k);
/*
- * Translate the array of indexes to regular attnums for the dependency (we
- * will need this to identify the columns in StatsBuildData).
+ * Translate the array of indexes to regular attnums for the dependency
+ * (we will need this to identify the columns in StatsBuildData).
*/
attnums_dep = (AttrNumber *) palloc(k * sizeof(AttrNumber));
for (i = 0; i < k; i++)
} AnlExprData;
static void compute_expr_stats(Relation onerel, double totalrows,
- AnlExprData * exprdata, int nexprs,
+ AnlExprData *exprdata, int nexprs,
HeapTuple *rows, int numrows);
-static Datum serialize_expr_stats(AnlExprData * exprdata, int nexprs);
+static Datum serialize_expr_stats(AnlExprData *exprdata, int nexprs);
static Datum expr_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
static AnlExprData *build_expr_data(List *exprs, int stattarget);
/*
* When analyzing an expression, believe the expression tree's type not
- * the column datatype --- the latter might be the opckeytype storage
- * type of the opclass, which is not interesting for our purposes. (Note:
- * if we did anything with non-expression statistics columns, we'd need to
+ * the column datatype --- the latter might be the opckeytype storage type
+ * of the opclass, which is not interesting for our purposes. (Note: if
+ * we did anything with non-expression statistics columns, we'd need to
* figure out where to get the correct type info from, but for now that's
* not a problem.) It's not clear whether anyone will care about the
* typmod, but we store that too just in case.
* attnums of expressions from it. Ignore it if it's not fully
* covered by the chosen statistics.
*
- * We need to check both attributes and expressions, and reject
- * if either is not covered.
+ * We need to check both attributes and expressions, and reject if
+ * either is not covered.
*/
if (!bms_is_subset(list_attnums[listidx], stat->keys) ||
!stat_covers_expressions(stat, list_exprs[listidx], NULL))
continue;
/*
- * Now we know the clause is compatible (we have either attnums
- * or expressions extracted from it), and was not estimated yet.
+ * Now we know the clause is compatible (we have either attnums or
+ * expressions extracted from it), and was not estimated yet.
*/
/* record simple clauses (single column or expression) */
int j;
RelFileNodeBackend rnode;
BlockNumber nForkBlock[MAX_FORKNUM];
- uint64 nBlocksToInvalidate = 0;
+ uint64 nBlocksToInvalidate = 0;
rnode = smgr_reln->smgr_rnode;
int n = 0;
SMgrRelation *rels;
BlockNumber (*block)[MAX_FORKNUM + 1];
- uint64 nBlocksToInvalidate = 0;
+ uint64 nBlocksToInvalidate = 0;
RelFileNode *nodes;
bool cached = true;
bool use_bsearch;
static void
do_syncfs(const char *path)
{
- int fd;
+ int fd;
fd = OpenTransientFile(path, O_RDONLY);
if (fd < 0)
do_syncfs("pg_wal");
return;
}
-#endif /* !HAVE_SYNCFS */
+#endif /* !HAVE_SYNCFS */
/*
* If possible, hint to the kernel that we're soon going to fsync the data
SharedFileSetDeleteOnProcExit(int status, Datum arg)
{
/*
- * Remove all the pending shared fileset entries. We don't use foreach() here
- * because SharedFileSetDeleteAll will remove the current element in
+ * Remove all the pending shared fileset entries. We don't use foreach()
+ * here because SharedFileSetDeleteAll will remove the current element in
* filesetlist. Though we have used foreach_delete_current() to remove the
* element from filesetlist it could only fix up the state of one of the
* loops, see SharedFileSetUnregister.
(cur_kqueue_event->fflags & NOTE_EXIT) != 0)
{
/*
- * The kernel will tell this kqueue object only once about the exit
- * of the postmaster, so let's remember that for next time so that
- * we provide level-triggered semantics.
+ * The kernel will tell this kqueue object only once about the
+ * exit of the postmaster, so let's remember that for next time so
+ * that we provide level-triggered semantics.
*/
set->report_postmaster_not_running = true;
static bool
GetSnapshotDataReuse(Snapshot snapshot)
{
- uint64 curXactCompletionCount;
+ uint64 curXactCompletionCount;
Assert(LWLockHeldByMe(ProcArrayLock));
* holding ProcArrayLock) exclusively). Thus the xactCompletionCount check
* ensures we would detect if the snapshot would have changed.
*
- * As the snapshot contents are the same as it was before, it is safe
- * to re-enter the snapshot's xmin into the PGPROC array. None of the rows
+ * As the snapshot contents are the same as it was before, it is safe to
+ * re-enter the snapshot's xmin into the PGPROC array. None of the rows
* visible under the snapshot could already have been removed (that'd
* require the set of running transactions to change) and it fulfills the
* requirement that concurrent GetSnapshotData() calls yield the same
continue;
/*
- * The only way we are able to get here with a non-normal xid
- * is during bootstrap - with this backend using
- * BootstrapTransactionId. But the above test should filter
- * that out.
+ * The only way we are able to get here with a non-normal xid is
+ * during bootstrap - with this backend using
+ * BootstrapTransactionId. But the above test should filter that
+ * out.
*/
Assert(TransactionIdIsNormal(xid));
*/
typedef struct
{
- volatile pid_t pss_pid;
+ volatile pid_t pss_pid;
volatile sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
pg_atomic_uint64 pss_barrierGeneration;
pg_atomic_uint32 pss_barrierCheckMask;
{
uint64 local_gen;
uint64 shared_gen;
- volatile uint32 flags;
+ volatile uint32 flags;
Assert(MyProcSignalSlot);
* extract the flags, and that any subsequent state changes happen
* afterward.
*
- * NB: In order to avoid race conditions, we must zero pss_barrierCheckMask
- * first and only afterwards try to do barrier processing. If we did it
- * in the other order, someone could send us another barrier of some
- * type right after we called the barrier-processing function but before
- * we cleared the bit. We would have no way of knowing that the bit needs
- * to stay set in that case, so the need to call the barrier-processing
- * function again would just get forgotten. So instead, we tentatively
- * clear all the bits and then put back any for which we don't manage
- * to successfully absorb the barrier.
+ * NB: In order to avoid race conditions, we must zero
+ * pss_barrierCheckMask first and only afterwards try to do barrier
+ * processing. If we did it in the other order, someone could send us
+ * another barrier of some type right after we called the
+ * barrier-processing function but before we cleared the bit. We would
+ * have no way of knowing that the bit needs to stay set in that case, so
+ * the need to call the barrier-processing function again would just get
+ * forgotten. So instead, we tentatively clear all the bits and then put
+ * back any for which we don't manage to successfully absorb the barrier.
*/
flags = pg_atomic_exchange_u32(&MyProcSignalSlot->pss_barrierCheckMask, 0);
*/
if (flags != 0)
{
- bool success = true;
+ bool success = true;
PG_TRY();
{
/*
* Process each type of barrier. The barrier-processing functions
- * should normally return true, but may return false if the barrier
- * can't be absorbed at the current time. This should be rare,
- * because it's pretty expensive. Every single
+ * should normally return true, but may return false if the
+ * barrier can't be absorbed at the current time. This should be
+ * rare, because it's pretty expensive. Every single
* CHECK_FOR_INTERRUPTS() will return here until we manage to
* absorb the barrier, and that cost will add up in a hurry.
*
*/
while (flags != 0)
{
- ProcSignalBarrierType type;
- bool processed = true;
+ ProcSignalBarrierType type;
+ bool processed = true;
type = (ProcSignalBarrierType) pg_rightmost_one_pos32(flags);
switch (type)
}
/*
- * To avoid an infinite loop, we must always unset the bit
- * in flags.
+ * To avoid an infinite loop, we must always unset the bit in
+ * flags.
*/
BARRIER_CLEAR_BIT(flags, type);
* Wait in steps of waittime milliseconds until this function exits or
* timeout.
*/
- int64 waittime = 100;
+ int64 waittime = 100;
+
/*
* Initially remaining time is the entire timeout specified by the user.
*/
- int64 remainingtime = timeout;
+ int64 remainingtime = timeout;
/*
* Check existence of the backend. If the backend still exists, then wait
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("could not check the existence of the backend with PID %d: %m",
- pid)));
+ pid)));
}
/* Process interrupts, if any, before waiting */
Datum
pg_terminate_backend(PG_FUNCTION_ARGS)
{
- int pid;
- int r;
- int timeout;
+ int pid;
+ int r;
+ int timeout;
pid = PG_GETARG_INT32(0);
timeout = PG_GETARG_INT64(1);
if (timeout < 0)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"timeout\" must not be negative")));
+ errmsg("\"timeout\" must not be negative")));
r = pg_signal_backend(pid, SIGTERM);
Datum
pg_wait_for_backend_termination(PG_FUNCTION_ARGS)
{
- int pid;
- int64 timeout;
- PGPROC *proc = NULL;
+ int pid;
+ int64 timeout;
+ PGPROC *proc = NULL;
pid = PG_GETARG_INT32(0);
timeout = PG_GETARG_INT64(1);
if (timeout <= 0)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"timeout\" must not be negative or zero")));
+ errmsg("\"timeout\" must not be negative or zero")));
proc = BackendPidGetProc(pid);
* snapshots that still see it.
*/
FullTransactionId nextXid = ReadNextFullTransactionId();
- uint64 diff;
+ uint64 diff;
diff = U64FromFullTransactionId(nextXid) -
U64FromFullTransactionId(latestRemovedFullXid);
{
Size size = 0;
Size TotalProcs =
- add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
+ add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
/* ProcGlobal */
size = add_size(size, sizeof(PROC_HDR));
/*
* Set timer so we can wake up after awhile and check for a deadlock. If a
* deadlock is detected, the handler sets MyProc->waitStatus =
- * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure rather
- * than success.
+ * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
+ * rather than success.
*
* By delaying the check until we've waited for a bit, we can avoid
* running the rather expensive deadlock-check code in most cases.
}
/*
- * waitStatus could change from PROC_WAIT_STATUS_WAITING to something else
- * asynchronously. Read it just once per loop to prevent surprising
- * behavior (such as missing log messages).
+ * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
+ * else asynchronously. Read it just once per loop to prevent
+ * surprising behavior (such as missing log messages).
*/
myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
ereport(DEBUG1,
(errmsg_internal("sending cancel to blocking autovacuum PID %d",
- pid),
+ pid),
errdetail_log("%s", logbuf.data)));
pfree(locktagbuf.data);
/*
* Currently, the deadlock checker always kicks its own
- * process, which means that we'll only see PROC_WAIT_STATUS_ERROR when
- * deadlock_state == DS_HARD_DEADLOCK, and there's no need to
- * print redundant messages. But for completeness and
- * future-proofing, print a message if it looks like someone
- * else kicked us off the lock.
+ * process, which means that we'll only see
+ * PROC_WAIT_STATUS_ERROR when deadlock_state ==
+ * DS_HARD_DEADLOCK, and there's no need to print redundant
+ * messages. But for completeness and future-proofing, print
+ * a message if it looks like someone else kicked us off the
+ * lock.
*/
if (deadlock_state != DS_HARD_DEADLOCK)
ereport(LOG,
* preserve the flexibility to kill some other transaction than the
* one detecting the deadlock.)
*
- * RemoveFromWaitQueue sets MyProc->waitStatus to PROC_WAIT_STATUS_ERROR, so
- * ProcSleep will report an error after we return from the signal
- * handler.
+ * RemoveFromWaitQueue sets MyProc->waitStatus to
+ * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
+ * return from the signal handler.
*/
Assert(MyProc->waitLock != NULL);
RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
#else
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
-#endif /* DISABLE_ATOMICS */
+#endif /* DISABLE_ATOMICS */
PGSemaphore *SpinlockSemaArray;
* group at the end of the line pointer array.
*/
for (offsetNumber = FirstOffsetNumber;
- offsetNumber < limit; /* limit is maxoff+1 */
+ offsetNumber < limit; /* limit is maxoff+1 */
offsetNumber++)
{
itemId = PageGetItemId(phdr, offsetNumber);
ereport(DEBUG1,
(errcode_for_file_access(),
errmsg_internal("could not fsync file \"%s\" but retrying: %m",
- path)));
+ path)));
/*
* Absorb incoming requests and check to see if a cancel
* the type.
*/
if (pq_getmessage(inBuf, maxmsglen))
- return EOF; /* suitable message already logged */
+ return EOF; /* suitable message already logged */
RESUME_CANCEL_INTERRUPTS();
return qtype;
ereport(DEBUG2,
(errmsg_internal("parse %s: %s",
- *stmt_name ? stmt_name : "",
- query_string)));
+ *stmt_name ? stmt_name : "",
+ query_string)));
/*
* Start up a transaction command so we can run parse analysis etc. (Note
ereport(DEBUG2,
(errmsg_internal("bind %s to %s",
- *portal_name ? portal_name : "",
- *stmt_name ? stmt_name : "")));
+ *portal_name ? portal_name : "",
+ *stmt_name ? stmt_name : "")));
/* Find prepared statement */
if (stmt_name[0] != '\0')
*/
#include "postgres.h"
-#include "port/atomics.h" /* for memory barriers */
+#include "port/atomics.h" /* for memory barriers */
#include "utils/backend_progress.h"
#include "utils/backend_status.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
-#include "port/atomics.h" /* for memory barriers */
+#include "port/atomics.h" /* for memory barriers */
#include "storage/ipc.h"
-#include "storage/proc.h" /* for MyProc */
+#include "storage/proc.h" /* for MyProc */
#include "storage/sinvaladt.h"
#include "utils/ascii.h"
#include "utils/backend_status.h"
-#include "utils/guc.h" /* for application_name */
+#include "utils/guc.h" /* for application_name */
#include "utils/memutils.h"
{
if (!backendStatusSnapContext)
backendStatusSnapContext = AllocSetContextCreate(TopMemoryContext,
- "Backend Status Snapshot",
- ALLOCSET_SMALL_SIZES);
+ "Backend Status Snapshot",
+ ALLOCSET_SMALL_SIZES);
}
if (!MyBEEntry)
return 0;
- /* There's no need for a lock around pgstat_begin_read_activity /
+ /*
+ * There's no need for a lock around pgstat_begin_read_activity /
* pgstat_end_read_activity here as it's only called from
* pg_stat_get_activity which is already protected, or from the same
* backend which means that there won't be concurrent writes.
*/
#include "postgres.h"
-#include "storage/lmgr.h" /* for GetLockNameFromTagType */
-#include "storage/lwlock.h" /* for GetLWLockIdentifier */
+#include "storage/lmgr.h" /* for GetLockNameFromTagType */
+#include "storage/lwlock.h" /* for GetLWLockIdentifier */
#include "utils/wait_event.h"
return -1;
/*
- * Check for column-level privileges first. This serves in
- * part as a check on whether the column even exists, so we
- * need to do it before checking table-level privilege.
+ * Check for column-level privileges first. This serves in part as a check
+ * on whether the column even exists, so we need to do it before checking
+ * table-level privilege.
*/
aclresult = pg_attribute_aclcheck_ext(tableoid, attnum, roleid,
mode, &is_missing);
{
if (relform->relfilenode)
result = relform->relfilenode;
- else /* Consult the relation mapper */
+ else /* Consult the relation mapper */
result = RelationMapOidToFilenode(relid,
relform->relisshared);
}
rnode.dbNode = MyDatabaseId;
if (relform->relfilenode)
rnode.relNode = relform->relfilenode;
- else /* Consult the relation mapper */
+ else /* Consult the relation mapper */
rnode.relNode = RelationMapOidToFilenode(relid,
relform->relisshared);
}
else
{
- /* no storage, return NULL */
- rnode.relNode = InvalidOid;
- /* some compilers generate warnings without these next two lines */
- rnode.dbNode = InvalidOid;
- rnode.spcNode = InvalidOid;
+ /* no storage, return NULL */
+ rnode.relNode = InvalidOid;
+ /* some compilers generate warnings without these next two lines */
+ rnode.dbNode = InvalidOid;
+ rnode.spcNode = InvalidOid;
}
if (!OidIsValid(rnode.relNode))
#define MIN_READ_SIZE 4096
/*
- * If not at end of file, and sbuf.len is equal to
- * MaxAllocSize - 1, then either the file is too large, or
- * there is nothing left to read. Attempt to read one more
- * byte to see if the end of file has been reached. If not,
- * the file is too large; we'd rather give the error message
- * for that ourselves.
+ * If not at end of file, and sbuf.len is equal to MaxAllocSize -
+ * 1, then either the file is too large, or there is nothing left
+ * to read. Attempt to read one more byte to see if the end of
+ * file has been reached. If not, the file is too large; we'd
+ * rather give the error message for that ourselves.
*/
if (sbuf.len == MaxAllocSize - 1)
{
- char rbuf[1];
+ char rbuf[1];
if (fread(rbuf, 1, 1, file) != 0 || !feof(file))
ereport(ERROR,
* Check if any of these are in the list of interesting PIDs, that being
* the sessions that the isolation tester is running. We don't use
* "arrayoverlaps" here, because it would lead to cache lookups and one of
- * our goals is to run quickly with debug_invalidate_system_caches_always > 0. We expect
- * blocking_pids to be usually empty and otherwise a very small number in
- * isolation tester cases, so make that the outer loop of a naive search
- * for a match.
+ * our goals is to run quickly with debug_invalidate_system_caches_always
+ * > 0. We expect blocking_pids to be usually empty and otherwise a very
+ * small number in isolation tester cases, so make that the outer loop of
+ * a naive search for a match.
*/
for (i = 0; i < num_blocking_pids; i++)
for (j = 0; j < num_interesting_pids; j++)
*/
static void
PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore,
- TupleDesc tupdesc, MemoryContext context,
- const char *parent, int level)
+ TupleDesc tupdesc, MemoryContext context,
+ const char *parent, int level)
{
#define PG_GET_BACKEND_MEMORY_CONTEXTS_COLS 9
ident = context->ident;
/*
- * To be consistent with logging output, we label dynahash contexts
- * with just the hash table name as with MemoryContextStatsPrint().
+ * To be consistent with logging output, we label dynahash contexts with
+ * just the hash table name as with MemoryContextStatsPrint().
*/
if (ident && strcmp(name, "dynahash") == 0)
{
if (ident)
{
- int idlen = strlen(ident);
+ int idlen = strlen(ident);
char clipped_ident[MEMORY_CONTEXT_IDENT_DISPLAY_SIZE];
/*
for (child = context->firstchild; child != NULL; child = child->nextchild)
{
PutMemoryContextsStatsTupleStore(tupstore, tupdesc,
- child, name, level + 1);
+ child, name, level + 1);
}
}
MemoryContextSwitchTo(oldcontext);
PutMemoryContextsStatsTupleStore(tupstore, tupdesc,
- TopMemoryContext, NULL, 0);
+ TopMemoryContext, NULL, 0);
/* clean up and return the tuplestore */
tuplestore_donestoring(tupstore);
{
/* NB: We need to zero-pad the destination. */
strncpy(NameStr(*name), str, NAMEDATALEN);
- NameStr(*name)[NAMEDATALEN-1] = '\0';
+ NameStr(*name)[NAMEDATALEN - 1] = '\0';
}
/*
}
else
#endif
- if (collprovider == COLLPROVIDER_LIBC &&
- pg_strcasecmp("C", collcollate) != 0 &&
- pg_strncasecmp("C.", collcollate, 2) != 0 &&
- pg_strcasecmp("POSIX", collcollate) != 0)
+ if (collprovider == COLLPROVIDER_LIBC &&
+ pg_strcasecmp("C", collcollate) != 0 &&
+ pg_strncasecmp("C.", collcollate, 2) != 0 &&
+ pg_strcasecmp("POSIX", collcollate) != 0)
{
#if defined(__GLIBC__)
/* Use the glibc version because we don't have anything better. */
collversion = pstrdup(gnu_get_libc_version());
#elif defined(LC_VERSION_MASK)
- locale_t loc;
+ locale_t loc;
/* Look up FreeBSD collation version. */
loc = newlocale(LC_COLLATE, collcollate, NULL);
stats->statypid[slot_idx] = typcache->type_id;
stats->statyplen[slot_idx] = typcache->typlen;
stats->statypbyval[slot_idx] = typcache->typbyval;
- stats->statypalign[slot_idx] = typcache->typalign;
+ stats->statypalign[slot_idx] = typcache->typalign;
slot_idx++;
}
* Now check that foreign key exists in PK table
*
* XXX detectNewRows must be true when a partitioned table is on the
- * referenced side. The reason is that our snapshot must be fresh
- * in order for the hack in find_inheritance_children() to work.
+ * referenced side. The reason is that our snapshot must be fresh in
+ * order for the hack in find_inheritance_children() to work.
*/
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
tuple.t_data = record;
/*
- * We arrange to look up the needed hashing info just once per series
- * of calls, assuming the record type doesn't change underneath us.
+ * We arrange to look up the needed hashing info just once per series of
+ * calls, assuming the record type doesn't change underneath us.
*/
my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL ||
tuple.t_data = record;
/*
- * We arrange to look up the needed hashing info just once per series
- * of calls, assuming the record type doesn't change underneath us.
+ * We arrange to look up the needed hashing info just once per series of
+ * calls, assuming the record type doesn't change underneath us.
*/
my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL ||
}
else
{
- appendStringInfoString(&buf, "AS ");
+ appendStringInfoString(&buf, "AS ");
- tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_probin, &isnull);
- if (!isnull)
- {
- simple_quote_literal(&buf, TextDatumGetCString(tmp));
- appendStringInfoString(&buf, ", "); /* assume prosrc isn't null */
- }
+ tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_probin, &isnull);
+ if (!isnull)
+ {
+ simple_quote_literal(&buf, TextDatumGetCString(tmp));
+ appendStringInfoString(&buf, ", "); /* assume prosrc isn't null */
+ }
- tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_prosrc, &isnull);
- if (isnull)
- elog(ERROR, "null prosrc");
- prosrc = TextDatumGetCString(tmp);
+ tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_prosrc, &isnull);
+ if (isnull)
+ elog(ERROR, "null prosrc");
+ prosrc = TextDatumGetCString(tmp);
- /*
- * We always use dollar quoting. Figure out a suitable delimiter.
- *
- * Since the user is likely to be editing the function body string, we
- * shouldn't use a short delimiter that he might easily create a conflict
- * with. Hence prefer "$function$"/"$procedure$", but extend if needed.
- */
- initStringInfo(&dq);
- appendStringInfoChar(&dq, '$');
- appendStringInfoString(&dq, (isfunction ? "function" : "procedure"));
- while (strstr(prosrc, dq.data) != NULL)
- appendStringInfoChar(&dq, 'x');
- appendStringInfoChar(&dq, '$');
-
- appendBinaryStringInfo(&buf, dq.data, dq.len);
- appendStringInfoString(&buf, prosrc);
- appendBinaryStringInfo(&buf, dq.data, dq.len);
+ /*
+ * We always use dollar quoting. Figure out a suitable delimiter.
+ *
+ * Since the user is likely to be editing the function body string, we
+ * shouldn't use a short delimiter that he might easily create a
+ * conflict with. Hence prefer "$function$"/"$procedure$", but extend
+ * if needed.
+ */
+ initStringInfo(&dq);
+ appendStringInfoChar(&dq, '$');
+ appendStringInfoString(&dq, (isfunction ? "function" : "procedure"));
+ while (strstr(prosrc, dq.data) != NULL)
+ appendStringInfoChar(&dq, 'x');
+ appendStringInfoChar(&dq, '$');
+
+ appendBinaryStringInfo(&buf, dq.data, dq.len);
+ appendStringInfoString(&buf, prosrc);
+ appendBinaryStringInfo(&buf, dq.data, dq.len);
}
appendStringInfoChar(&buf, '\n');
* XXX This has the consequence that if there's a statistics on the
* expression, we don't split it into individual Vars. This affects
* our selection of statistics in estimate_multivariate_ndistinct,
- * because it's probably better to use more accurate estimate for
- * each expression and treat them as independent, than to combine
- * estimates for the extracted variables when we don't know how that
- * relates to the expressions.
+ * because it's probably better to use more accurate estimate for each
+ * expression and treat them as independent, than to combine estimates
+ * for the extracted variables when we don't know how that relates to
+ * the expressions.
*/
examine_variable(root, groupexpr, 0, &vardata);
if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
/*
* Process a simple Var expression, by matching it to keys
- * directly. If there's a matching expression, we'll try
- * matching it later.
+ * directly. If there's a matching expression, we'll try matching
+ * it later.
*/
if (IsA(varinfo->var, Var))
{
AttrNumber attnum = ((Var *) varinfo->var)->varattno;
/*
- * Ignore expressions on system attributes. Can't rely on
- * the bms check for negative values.
+ * Ignore expressions on system attributes. Can't rely on the
+ * bms check for negative values.
*/
if (!AttrNumberIsForUserDefinedAttr(attnum))
continue;
tm_delta = tm_diff - tm_diff % stride_usecs;
/*
- * Make sure the returned timestamp is at the start of the bin,
- * even if the origin is in the future.
+ * Make sure the returned timestamp is at the start of the bin, even if
+ * the origin is in the future.
*/
if (origin > timestamp && stride_usecs > 1)
tm_delta -= stride_usecs;
tm_delta = tm_diff - tm_diff % stride_usecs;
/*
- * Make sure the returned timestamp is at the start of the bin,
- * even if the origin is in the future.
+ * Make sure the returned timestamp is at the start of the bin, even if
+ * the origin is in the future.
*/
if (origin > timestamp && stride_usecs > 1)
tm_delta -= stride_usecs;
size_t len = strlen(inputText);
uint64 dstlen = pg_hex_dec_len(len - 2);
- bc = dstlen + VARHDRSZ; /* maximum possible length */
+ bc = dstlen + VARHDRSZ; /* maximum possible length */
result = palloc(bc);
bc = pg_hex_decode(inputText + 2, len - 2, VARDATA(result), dstlen);
static int maxSharedInvalidMessagesArray;
/* GUC storage */
-int debug_invalidate_system_caches_always = 0;
+int debug_invalidate_system_caches_always = 0;
/*
* Dynamically-registered callback functions. Current implementation
/*
* Test code to force cache flushes anytime a flush could happen.
*
- * This helps detect intermittent faults caused by code that reads a
- * cache entry and then performs an action that could invalidate the entry,
- * but rarely actually does so. This can spot issues that would otherwise
+ * This helps detect intermittent faults caused by code that reads a cache
+ * entry and then performs an action that could invalidate the entry, but
+ * rarely actually does so. This can spot issues that would otherwise
* only arise with badly timed concurrent DDL, for example.
*
- * The default debug_invalidate_system_caches_always = 0 does no forced cache flushes.
+ * The default debug_invalidate_system_caches_always = 0 does no forced
+ * cache flushes.
*
- * If used with CLOBBER_FREED_MEMORY, debug_invalidate_system_caches_always = 1
- * (CLOBBER_CACHE_ALWAYS) provides a fairly thorough test that the system
- * contains no cache-flush hazards. However, it also makes the system
- * unbelievably slow --- the regression tests take about 100 times longer
- * than normal.
+ * If used with CLOBBER_FREED_MEMORY,
+ * debug_invalidate_system_caches_always = 1 (CLOBBER_CACHE_ALWAYS)
+ * provides a fairly thorough test that the system contains no cache-flush
+ * hazards. However, it also makes the system unbelievably slow --- the
+ * regression tests take about 100 times longer than normal.
*
- * If you're a glutton for punishment, try debug_invalidate_system_caches_always = 3
- * (CLOBBER_CACHE_RECURSIVELY). This slows things by at least a factor
- * of 10000, so I wouldn't suggest trying to run the entire regression
- * tests that way. It's useful to try a few simple tests, to make sure
- * that cache reload isn't subject to internal cache-flush hazards, but
- * after you've done a few thousand recursive reloads it's unlikely
- * you'll learn more.
+ * If you're a glutton for punishment, try
+ * debug_invalidate_system_caches_always = 3 (CLOBBER_CACHE_RECURSIVELY).
+ * This slows things by at least a factor of 10000, so I wouldn't suggest
+ * trying to run the entire regression tests that way. It's useful to try
+ * a few simple tests, to make sure that cache reload isn't subject to
+ * internal cache-flush hazards, but after you've done a few thousand
+ * recursive reloads it's unlikely you'll learn more.
*/
#ifdef CLOBBER_CACHE_ENABLED
{
* rejected a generic plan, it's possible to reach here with is_valid
* false due to an invalidation while making the generic plan. In theory
* the invalidation must be a false positive, perhaps a consequence of an
- * sinval reset event or the debug_invalidate_system_caches_always code. But for
- * safety, let's treat it as real and redo the RevalidateCachedQuery call.
+ * sinval reset event or the debug_invalidate_system_caches_always code.
+ * But for safety, let's treat it as real and redo the
+ * RevalidateCachedQuery call.
*/
if (!plansource->is_valid)
qlist = RevalidateCachedQuery(plansource, queryEnv);
*
* When cache clobbering is enabled or when forced to by
* RECOVER_RELATION_BUILD_MEMORY=1, arrange to allocate the junk in a
- * temporary context that we'll free before returning. Make it a child
- * of caller's context so that it will get cleaned up appropriately if
- * we error out partway through.
+ * temporary context that we'll free before returning. Make it a child of
+ * caller's context so that it will get cleaned up appropriately if we
+ * error out partway through.
*/
#ifdef MAYBE_RECOVER_RELATION_BUILD_MEMORY
MemoryContext tmpcxt = NULL;
!record_fields_have_hashing(typentry))
hash_proc = InvalidOid;
else if (hash_proc == F_HASH_RANGE &&
- !range_element_has_hashing(typentry))
+ !range_element_has_hashing(typentry))
hash_proc = InvalidOid;
/*
!array_element_has_extended_hashing(typentry))
hash_extended_proc = InvalidOid;
else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
- !record_fields_have_extended_hashing(typentry))
+ !record_fields_have_extended_hashing(typentry))
hash_extended_proc = InvalidOid;
else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
- !range_element_has_extended_hashing(typentry))
+ !range_element_has_extended_hashing(typentry))
hash_extended_proc = InvalidOid;
/*
case 'Q':
if (padding != 0)
appendStringInfo(buf, "%*lld", padding,
- (long long) pgstat_get_my_query_id());
+ (long long) pgstat_get_my_query_id());
else
appendStringInfo(buf, "%lld",
- (long long) pgstat_get_my_query_id());
+ (long long) pgstat_get_my_query_id());
break;
default:
/* format error - ignore it */
header => "Dummy map, for invalid values",
min_idx => 0,
max_idx => $widest_range,
- label => "dummy map"
+ label => "dummy map"
};
###
NULL
},
&vacuum_defer_cleanup_age,
- 0, 0, 1000000, /* see ComputeXidHorizons */
+ 0, 0, 1000000, /* see ComputeXidHorizons */
NULL, NULL, NULL
},
{
NULL
},
&autovacuum_freeze_max_age,
+
/*
* see pg_resetwal and vacuum_failsafe_age if you change the
* upper-limit value.
0,
#endif
0, 5,
-#else /* not CLOBBER_CACHE_ENABLED */
+#else /* not CLOBBER_CACHE_ENABLED */
0, 0, 0,
-#endif /* not CLOBBER_CACHE_ENABLED */
+#endif /* not CLOBBER_CACHE_ENABLED */
NULL, NULL, NULL
},
const char *
CleanQuerytext(const char *query, int *location, int *len)
{
- int query_location = *location;
- int query_len = *len;
+ int query_location = *location;
+ int query_len = *len;
/* First apply starting offset, unless it's -1 (unknown). */
if (query_location >= 0)
JumbleQuery(Query *query, const char *querytext)
{
JumbleState *jstate = NULL;
+
if (query->utilityStmt)
{
query->queryId = compute_utility_query_id(querytext,
- query->stmt_location,
- query->stmt_len);
+ query->stmt_location,
+ query->stmt_len);
}
else
{
static uint64
compute_utility_query_id(const char *query_text, int query_location, int query_len)
{
- uint64 queryId;
+ uint64 queryId;
const char *sql;
/*
- * Confine our attention to the relevant part of the string, if the
- * query is a portion of a multi-statement source string.
+ * Confine our attention to the relevant part of the string, if the query
+ * is a portion of a multi-statement source string.
*/
sql = CleanQuerytext(query_text, &query_location, &query_len);
query_len, 0));
/*
- * If we are unlucky enough to get a hash of zero(invalid), use
- * queryID as 2 instead, queryID 1 is already in use for normal
- * statements.
+ * If we are unlucky enough to get a hash of zero(invalid), use queryID as
+ * 2 instead, queryID 1 is already in use for normal statements.
*/
if (queryId == UINT64CONST(0))
queryId = UINT64CONST(2);
for (int i = 0; i < lts->nTapes; i++)
{
LogicalTape *lt = <s->tapes[i];
+
Assert(!lt->writing || lt->buffer == NULL);
}
#endif
if (ts == threshold_timestamp)
{
/*
- * Current timestamp is in same bucket as the last limit that
- * was applied. Reuse.
+ * Current timestamp is in same bucket as the last limit that was
+ * applied. Reuse.
*/
xlimit = threshold_xid;
}
* number of minutes of difference between ts and the current
* head_timestamp.
*
- * The distance from the current head to the current tail is one
- * less than the number of entries in the mapping, because the
- * entry at the head_offset is for 0 minutes after head_timestamp.
+ * The distance from the current head to the current tail is one less
+ * than the number of entries in the mapping, because the entry at the
+ * head_offset is for 0 minutes after head_timestamp.
*
- * The difference between these two values is the number of minutes
- * by which we need to advance the mapping, either adding new entries
- * or rotating old ones out.
+ * The difference between these two values is the number of minutes by
+ * which we need to advance the mapping, either adding new entries or
+ * rotating old ones out.
*/
distance_to_new_tail =
(ts - oldSnapshotControl->head_timestamp) / USECS_PER_MINUTE;
*/
if (opts.install_missing)
{
- char *schema;
- char *install_sql;
+ char *schema;
+ char *install_sql;
/*
* Must re-escape the schema name for each database, as the
# Failing to connect to the initial database is an error.
$node->command_checks_all(
[ 'pg_amcheck', 'qqq' ],
- 1,
- [ qr/^$/ ],
- [ qr/FATAL: database "qqq" does not exist/ ],
+ 1, [qr/^$/],
+ [qr/FATAL: database "qqq" does not exist/],
'checking a non-existent database');
# Failing to resolve a database pattern is an error by default.
$node->command_checks_all(
[ 'pg_amcheck', '-d', 'qqq', '-d', 'postgres' ],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: error: no connectable databases to check matching "qqq"/ ],
+ [qr/^$/],
+ [qr/pg_amcheck: error: no connectable databases to check matching "qqq"/],
'checking an unresolvable database pattern');
# But only a warning under --no-strict-names
$node->command_checks_all(
[ 'pg_amcheck', '--no-strict-names', '-d', 'qqq', '-d', 'postgres' ],
0,
- [ qr/^$/ ],
- [ qr/pg_amcheck: warning: no connectable databases to check matching "qqq"/ ],
+ [qr/^$/],
+ [
+ qr/pg_amcheck: warning: no connectable databases to check matching "qqq"/
+ ],
'checking an unresolvable database pattern under --no-strict-names');
# Check that a substring of an existent database name does not get interpreted
$node->command_checks_all(
[ 'pg_amcheck', '-d', 'post', '-d', 'postgres' ],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: error: no connectable databases to check matching "post"/ ],
- 'checking an unresolvable database pattern (substring of existent database)');
+ [qr/^$/],
+ [
+ qr/pg_amcheck: error: no connectable databases to check matching "post"/
+ ],
+ 'checking an unresolvable database pattern (substring of existent database)'
+);
# Check that a superstring of an existent database name does not get interpreted
# as a matching pattern.
$node->command_checks_all(
[ 'pg_amcheck', '-d', 'postgresql', '-d', 'postgres' ],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: error: no connectable databases to check matching "postgresql"/ ],
- 'checking an unresolvable database pattern (superstring of existent database)');
+ [qr/^$/],
+ [
+ qr/pg_amcheck: error: no connectable databases to check matching "postgresql"/
+ ],
+ 'checking an unresolvable database pattern (superstring of existent database)'
+);
#########################################
# Test connecting with a non-existent user
# Failing to connect to the initial database due to bad username is an error.
-$node->command_checks_all(
- [ 'pg_amcheck', '-U', 'no_such_user', 'postgres' ],
- 1,
- [ qr/^$/ ],
- [ ],
- 'checking with a non-existent user');
+$node->command_checks_all([ 'pg_amcheck', '-U', 'no_such_user', 'postgres' ],
+ 1, [qr/^$/], [], 'checking with a non-existent user');
#########################################
# Test checking databases without amcheck installed
$node->command_checks_all(
[ 'pg_amcheck', 'template1' ],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/,
- qr/pg_amcheck: error: no relations to check/ ],
- 'checking a database by name without amcheck installed, no other databases');
+ [qr/^$/],
+ [
+ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/,
+ qr/pg_amcheck: error: no relations to check/
+ ],
+ 'checking a database by name without amcheck installed, no other databases'
+);
# Again, but this time with another database to check, so no error is raised.
$node->command_checks_all(
[ 'pg_amcheck', '-d', 'template1', '-d', 'postgres' ],
0,
- [ qr/^$/ ],
- [ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/ ],
- 'checking a database by name without amcheck installed, with other databases');
+ [qr/^$/],
+ [
+ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/
+ ],
+ 'checking a database by name without amcheck installed, with other databases'
+);
# Again, but by way of checking all databases
$node->command_checks_all(
[ 'pg_amcheck', '--all' ],
0,
- [ qr/^$/ ],
- [ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/ ],
- 'checking a database by pattern without amcheck installed, with other databases');
+ [qr/^$/],
+ [
+ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/
+ ],
+ 'checking a database by pattern without amcheck installed, with other databases'
+);
#########################################
# Test unreasonable patterns
$node->command_checks_all(
[ 'pg_amcheck', '-d', 'postgres', '-t', '..' ],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: error: no connectable databases to check matching "\.\."/ ],
+ [qr/^$/],
+ [
+ qr/pg_amcheck: error: no connectable databases to check matching "\.\."/
+ ],
'checking table pattern ".."');
# Again, but with non-trivial schema and relation parts
$node->command_checks_all(
[ 'pg_amcheck', '-d', 'postgres', '-t', '.foo.bar' ],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: error: no connectable databases to check matching "\.foo\.bar"/ ],
+ [qr/^$/],
+ [
+ qr/pg_amcheck: error: no connectable databases to check matching "\.foo\.bar"/
+ ],
'checking table pattern ".foo.bar"');
# Check two-part unreasonable pattern that has zero-length names
$node->command_checks_all(
[ 'pg_amcheck', '-d', 'postgres', '-t', '.' ],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: error: no heap tables to check matching "\."/ ],
+ [qr/^$/],
+ [qr/pg_amcheck: error: no heap tables to check matching "\."/],
'checking table pattern "."');
#########################################
# Use --no-strict-names and a single existent table so we only get warnings
# about the failed pattern matches
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names',
- '-t', 'no_such_table',
- '-t', 'no*such*table',
- '-i', 'no_such_index',
- '-i', 'no*such*index',
- '-r', 'no_such_relation',
- '-r', 'no*such*relation',
- '-d', 'no_such_database',
- '-d', 'no*such*database',
- '-r', 'none.none',
- '-r', 'none.none.none',
- '-r', 'this.is.a.really.long.dotted.string',
- '-r', 'postgres.none.none',
- '-r', 'postgres.long.dotted.string',
- '-r', 'postgres.pg_catalog.none',
- '-r', 'postgres.none.pg_class',
- '-t', 'postgres.pg_catalog.pg_class', # This exists
+ [
+ 'pg_amcheck', '--no-strict-names',
+ '-t', 'no_such_table',
+ '-t', 'no*such*table',
+ '-i', 'no_such_index',
+ '-i', 'no*such*index',
+ '-r', 'no_such_relation',
+ '-r', 'no*such*relation',
+ '-d', 'no_such_database',
+ '-d', 'no*such*database',
+ '-r', 'none.none',
+ '-r', 'none.none.none',
+ '-r', 'this.is.a.really.long.dotted.string',
+ '-r', 'postgres.none.none',
+ '-r', 'postgres.long.dotted.string',
+ '-r', 'postgres.pg_catalog.none',
+ '-r', 'postgres.none.pg_class',
+ '-t', 'postgres.pg_catalog.pg_class', # This exists
],
0,
- [ qr/^$/ ],
- [ qr/pg_amcheck: warning: no heap tables to check matching "no_such_table"/,
- qr/pg_amcheck: warning: no heap tables to check matching "no\*such\*table"/,
- qr/pg_amcheck: warning: no btree indexes to check matching "no_such_index"/,
- qr/pg_amcheck: warning: no btree indexes to check matching "no\*such\*index"/,
- qr/pg_amcheck: warning: no relations to check matching "no_such_relation"/,
- qr/pg_amcheck: warning: no relations to check matching "no\*such\*relation"/,
- qr/pg_amcheck: warning: no heap tables to check matching "no\*such\*table"/,
- qr/pg_amcheck: warning: no connectable databases to check matching "no_such_database"/,
- qr/pg_amcheck: warning: no connectable databases to check matching "no\*such\*database"/,
- qr/pg_amcheck: warning: no relations to check matching "none\.none"/,
- qr/pg_amcheck: warning: no connectable databases to check matching "none\.none\.none"/,
- qr/pg_amcheck: warning: no connectable databases to check matching "this\.is\.a\.really\.long\.dotted\.string"/,
- qr/pg_amcheck: warning: no relations to check matching "postgres\.none\.none"/,
- qr/pg_amcheck: warning: no relations to check matching "postgres\.long\.dotted\.string"/,
- qr/pg_amcheck: warning: no relations to check matching "postgres\.pg_catalog\.none"/,
- qr/pg_amcheck: warning: no relations to check matching "postgres\.none\.pg_class"/,
- ],
- 'many unmatched patterns and one matched pattern under --no-strict-names');
+ [qr/^$/],
+ [
+ qr/pg_amcheck: warning: no heap tables to check matching "no_such_table"/,
+ qr/pg_amcheck: warning: no heap tables to check matching "no\*such\*table"/,
+ qr/pg_amcheck: warning: no btree indexes to check matching "no_such_index"/,
+ qr/pg_amcheck: warning: no btree indexes to check matching "no\*such\*index"/,
+ qr/pg_amcheck: warning: no relations to check matching "no_such_relation"/,
+ qr/pg_amcheck: warning: no relations to check matching "no\*such\*relation"/,
+ qr/pg_amcheck: warning: no heap tables to check matching "no\*such\*table"/,
+ qr/pg_amcheck: warning: no connectable databases to check matching "no_such_database"/,
+ qr/pg_amcheck: warning: no connectable databases to check matching "no\*such\*database"/,
+ qr/pg_amcheck: warning: no relations to check matching "none\.none"/,
+ qr/pg_amcheck: warning: no connectable databases to check matching "none\.none\.none"/,
+ qr/pg_amcheck: warning: no connectable databases to check matching "this\.is\.a\.really\.long\.dotted\.string"/,
+ qr/pg_amcheck: warning: no relations to check matching "postgres\.none\.none"/,
+ qr/pg_amcheck: warning: no relations to check matching "postgres\.long\.dotted\.string"/,
+ qr/pg_amcheck: warning: no relations to check matching "postgres\.pg_catalog\.none"/,
+ qr/pg_amcheck: warning: no relations to check matching "postgres\.none\.pg_class"/,
+ ],
+ 'many unmatched patterns and one matched pattern under --no-strict-names'
+);
#########################################
# Test checking otherwise existent objects but in databases where they do not exist
-$node->safe_psql('postgres', q(
+$node->safe_psql(
+ 'postgres', q(
CREATE TABLE public.foo (f integer);
CREATE INDEX foo_idx ON foo(f);
));
$node->safe_psql('postgres', q(CREATE DATABASE another_db));
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'postgres', '--no-strict-names',
- '-t', 'template1.public.foo',
- '-t', 'another_db.public.foo',
- '-t', 'no_such_database.public.foo',
- '-i', 'template1.public.foo_idx',
- '-i', 'another_db.public.foo_idx',
- '-i', 'no_such_database.public.foo_idx',
+ [
+ 'pg_amcheck', '-d',
+ 'postgres', '--no-strict-names',
+ '-t', 'template1.public.foo',
+ '-t', 'another_db.public.foo',
+ '-t', 'no_such_database.public.foo',
+ '-i', 'template1.public.foo_idx',
+ '-i', 'another_db.public.foo_idx',
+ '-i', 'no_such_database.public.foo_idx',
],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/,
- qr/pg_amcheck: warning: no heap tables to check matching "template1\.public\.foo"/,
- qr/pg_amcheck: warning: no heap tables to check matching "another_db\.public\.foo"/,
- qr/pg_amcheck: warning: no connectable databases to check matching "no_such_database\.public\.foo"/,
- qr/pg_amcheck: warning: no btree indexes to check matching "template1\.public\.foo_idx"/,
- qr/pg_amcheck: warning: no btree indexes to check matching "another_db\.public\.foo_idx"/,
- qr/pg_amcheck: warning: no connectable databases to check matching "no_such_database\.public\.foo_idx"/,
- qr/pg_amcheck: error: no relations to check/,
+ [qr/^$/],
+ [
+ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/,
+ qr/pg_amcheck: warning: no heap tables to check matching "template1\.public\.foo"/,
+ qr/pg_amcheck: warning: no heap tables to check matching "another_db\.public\.foo"/,
+ qr/pg_amcheck: warning: no connectable databases to check matching "no_such_database\.public\.foo"/,
+ qr/pg_amcheck: warning: no btree indexes to check matching "template1\.public\.foo_idx"/,
+ qr/pg_amcheck: warning: no btree indexes to check matching "another_db\.public\.foo_idx"/,
+ qr/pg_amcheck: warning: no connectable databases to check matching "no_such_database\.public\.foo_idx"/,
+ qr/pg_amcheck: error: no relations to check/,
],
'checking otherwise existent objets in the wrong databases');
# Check with only schema exclusion patterns
$node->command_checks_all(
- [ 'pg_amcheck', '--all', '--no-strict-names',
- '-S', 'public',
- '-S', 'pg_catalog',
- '-S', 'pg_toast',
- '-S', 'information_schema',
+ [
+ 'pg_amcheck', '--all', '--no-strict-names', '-S',
+ 'public', '-S', 'pg_catalog', '-S',
+ 'pg_toast', '-S', 'information_schema',
],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/,
- qr/pg_amcheck: error: no relations to check/ ],
+ [qr/^$/],
+ [
+ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/,
+ qr/pg_amcheck: error: no relations to check/
+ ],
'schema exclusion patterns exclude all relations');
# Check with schema exclusion patterns overriding relation and schema inclusion patterns
$node->command_checks_all(
- [ 'pg_amcheck', '--all', '--no-strict-names',
- '-s', 'public',
- '-s', 'pg_catalog',
- '-s', 'pg_toast',
- '-s', 'information_schema',
- '-t', 'pg_catalog.pg_class',
- '-S*'
+ [
+ 'pg_amcheck', '--all', '--no-strict-names', '-s',
+ 'public', '-s', 'pg_catalog', '-s',
+ 'pg_toast', '-s', 'information_schema', '-t',
+ 'pg_catalog.pg_class', '-S*'
],
1,
- [ qr/^$/ ],
- [ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/,
- qr/pg_amcheck: error: no relations to check/ ],
+ [qr/^$/],
+ [
+ qr/pg_amcheck: warning: skipping database "template1": amcheck is not installed/,
+ qr/pg_amcheck: error: no relations to check/
+ ],
'schema exclusion pattern overrides all inclusion patterns');
my ($dbname, $relname) = @_;
my $pgdata = $node->data_dir;
- my $rel = $node->safe_psql($dbname,
- qq(SELECT pg_relation_filepath('$relname')));
+ my $rel =
+ $node->safe_psql($dbname, qq(SELECT pg_relation_filepath('$relname')));
die "path not found for relation $relname" unless defined $rel;
return "$pgdata/$rel";
}
{
my ($dbname, $relname) = @_;
- my $rel = $node->safe_psql($dbname, qq(
+ my $rel = $node->safe_psql(
+ $dbname, qq(
SELECT c.reltoastrelid::regclass
FROM pg_catalog.pg_class c
WHERE c.oid = '$relname'::regclass
my $fh;
open($fh, '+<', $relpath)
- or BAIL_OUT("open failed: $!");
+ or BAIL_OUT("open failed: $!");
binmode $fh;
# Corrupt some line pointers. The values are chosen to hit the
# various line-pointer-corruption checks in verify_heapam.c
# on both little-endian and big-endian architectures.
seek($fh, 32, SEEK_SET)
- or BAIL_OUT("seek failed: $!");
+ or BAIL_OUT("seek failed: $!");
syswrite(
$fh,
pack("L*",
- 0xAAA15550, 0xAAA0D550, 0x00010000,
- 0x00008000, 0x0000800F, 0x001e8000,
- 0xFFFFFFFF)
+ 0xAAA15550, 0xAAA0D550, 0x00010000, 0x00008000,
+ 0x0000800F, 0x001e8000, 0xFFFFFFFF)
) or BAIL_OUT("syswrite failed: $!");
close($fh)
- or BAIL_OUT("close failed: $!");
+ or BAIL_OUT("close failed: $!");
}
# Stops the node, performs all the corruptions previously planned, and
# check that pg_amcheck does not get confused by them. Create functions in
# schema public that look like amcheck functions to check that pg_amcheck
# does not use them.
- $node->safe_psql($dbname, q(
+ $node->safe_psql(
+ $dbname, q(
CREATE SCHEMA amcheck_schema;
CREATE EXTENSION amcheck WITH SCHEMA amcheck_schema;
CREATE TABLE amcheck_schema.pg_database (junk text);
#
for my $schema (qw(s1 s2 s3 s4 s5))
{
- $node->safe_psql($dbname, qq(
+ $node->safe_psql(
+ $dbname, qq(
CREATE SCHEMA $schema;
CREATE SEQUENCE $schema.seq1;
CREATE SEQUENCE $schema.seq2;
my @cmd = ('pg_amcheck', '--quiet', '-p', $port);
# Regular expressions to match various expected output
-my $no_output_re = qr/^$/;
+my $no_output_re = qr/^$/;
my $line_pointer_corruption_re = qr/line pointer/;
my $missing_file_re = qr/could not open file ".*": No such file or directory/;
-my $index_missing_relation_fork_re = qr/index ".*" lacks a main relation fork/;
+my $index_missing_relation_fork_re =
+ qr/index ".*" lacks a main relation fork/;
# We have created test databases with tables populated with data, but have not
# yet corrupted anything. As such, we expect no corruption and verify that
# none is reported
#
-$node->command_checks_all(
- [ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ],
- 0,
- [ $no_output_re ],
- [ $no_output_re ],
- 'pg_amcheck prior to corruption');
+$node->command_checks_all([ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ],
+ 0, [$no_output_re], [$no_output_re], 'pg_amcheck prior to corruption');
# Perform the corruptions we planned above using only a single database restart.
#
$node->command_checks_all(
[ @cmd, 'db1' ],
2,
- [ $index_missing_relation_fork_re,
- $line_pointer_corruption_re,
- $missing_file_re,
+ [
+ $index_missing_relation_fork_re, $line_pointer_corruption_re,
+ $missing_file_re,
],
- [ $no_output_re ],
+ [$no_output_re],
'pg_amcheck all schemas, tables and indexes in database db1');
$node->command_checks_all(
[ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ],
2,
- [ $index_missing_relation_fork_re,
- $line_pointer_corruption_re,
- $missing_file_re,
+ [
+ $index_missing_relation_fork_re, $line_pointer_corruption_re,
+ $missing_file_re,
],
- [ $no_output_re ],
- 'pg_amcheck all schemas, tables and indexes in databases db1, db2, and db3');
+ [$no_output_re],
+ 'pg_amcheck all schemas, tables and indexes in databases db1, db2, and db3'
+);
# Scans of indexes in s1 should detect the specific corruption that we created
# above. For missing relation forks, we know what the error message looks
$node->command_checks_all(
[ @cmd, '--all', '-s', 's1', '-i', 't1_btree' ],
2,
- [ $index_missing_relation_fork_re ],
- [ qr/pg_amcheck: warning: skipping database "postgres": amcheck is not installed/ ],
+ [$index_missing_relation_fork_re],
+ [
+ qr/pg_amcheck: warning: skipping database "postgres": amcheck is not installed/
+ ],
'pg_amcheck index s1.t1_btree reports missing main relation fork');
$node->command_checks_all(
[ @cmd, '-d', 'db1', '-s', 's1', '-i', 't2_btree' ],
2,
- [ qr/.+/ ], # Any non-empty error message is acceptable
- [ $no_output_re ],
+ [qr/.+/], # Any non-empty error message is acceptable
+ [$no_output_re],
'pg_amcheck index s1.s2 reports index corruption');
# Checking db1.s1 with indexes excluded should show no corruptions because we
#
$node->command_checks_all(
[ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ],
- 0,
- [ $no_output_re ],
- [ $no_output_re ],
+ 0, [$no_output_re], [$no_output_re],
'pg_amcheck of db1.s1 excluding indexes');
# Checking db2.s1 should show table corruptions if indexes are excluded
#
$node->command_checks_all(
[ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db2' ],
- 2,
- [ $missing_file_re ],
- [ $no_output_re ],
+ 2, [$missing_file_re], [$no_output_re],
'pg_amcheck of db2.s1 excluding indexes');
# In schema db1.s3, the tables and indexes are both corrupt. We should see
$node->command_checks_all(
[ @cmd, '-s', 's3', 'db1' ],
2,
- [ $index_missing_relation_fork_re,
- $line_pointer_corruption_re,
- $missing_file_re,
+ [
+ $index_missing_relation_fork_re, $line_pointer_corruption_re,
+ $missing_file_re,
],
- [ $no_output_re ],
+ [$no_output_re],
'pg_amcheck schema s3 reports table and index errors');
# In schema db1.s4, only toast tables are corrupt. Check that under default
# options the toast corruption is reported, but when excluding toast we get no
# error reports.
-$node->command_checks_all(
- [ @cmd, '-s', 's4', 'db1' ],
- 2,
- [ $missing_file_re ],
- [ $no_output_re ],
+$node->command_checks_all([ @cmd, '-s', 's4', 'db1' ],
+ 2, [$missing_file_re], [$no_output_re],
'pg_amcheck in schema s4 reports toast corruption');
$node->command_checks_all(
- [ @cmd, '--no-dependent-toast', '--exclude-toast-pointers', '-s', 's4', 'db1' ],
+ [
+ @cmd, '--no-dependent-toast', '--exclude-toast-pointers', '-s', 's4',
+ 'db1'
+ ],
0,
- [ $no_output_re ],
- [ $no_output_re ],
+ [$no_output_re],
+ [$no_output_re],
'pg_amcheck in schema s4 excluding toast reports no corruption');
# Check that no corruption is reported in schema db1.s5
-$node->command_checks_all(
- [ @cmd, '-s', 's5', 'db1' ],
- 0,
- [ $no_output_re ],
- [ $no_output_re ],
+$node->command_checks_all([ @cmd, '-s', 's5', 'db1' ],
+ 0, [$no_output_re], [$no_output_re],
'pg_amcheck over schema s5 reports no corruption');
# In schema db1.s1, only indexes are corrupt. Verify that when we exclude
$node->command_checks_all(
[ @cmd, '-s', 's1', '-I', 't1_btree', '-I', 't2_btree', 'db1' ],
0,
- [ $no_output_re ],
- [ $no_output_re ],
- 'pg_amcheck over schema s1 with corrupt indexes excluded reports no corruption');
+ [$no_output_re],
+ [$no_output_re],
+ 'pg_amcheck over schema s1 with corrupt indexes excluded reports no corruption'
+);
# In schema db1.s1, only indexes are corrupt. Verify that when we provide only
# table inclusions, and disable index expansion, no corruption is reported
$node->command_checks_all(
[ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ],
0,
- [ $no_output_re ],
- [ $no_output_re ],
- 'pg_amcheck over schema s1 with all indexes excluded reports no corruption');
+ [$no_output_re],
+ [$no_output_re],
+ 'pg_amcheck over schema s1 with all indexes excluded reports no corruption'
+);
# In schema db1.s2, only tables are corrupt. Verify that when we exclude those
# tables that no corruption is reported.
$node->command_checks_all(
[ @cmd, '-s', 's2', '-T', 't1', '-T', 't2', 'db1' ],
0,
- [ $no_output_re ],
- [ $no_output_re ],
- 'pg_amcheck over schema s2 with corrupt tables excluded reports no corruption');
+ [$no_output_re],
+ [$no_output_re],
+ 'pg_amcheck over schema s2 with corrupt tables excluded reports no corruption'
+);
# Check errors about bad block range command line arguments. We use schema s5
# to avoid getting messages about corrupt tables or indexes.
$node->command_checks_all(
[ @cmd, '-s', 's1', '-i', 't1_btree', '--parent-check', 'db1' ],
2,
- [ $index_missing_relation_fork_re ],
- [ $no_output_re ],
+ [$index_missing_relation_fork_re],
+ [$no_output_re],
'pg_amcheck smoke test --parent-check');
$node->command_checks_all(
- [ @cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed', '--rootdescend', 'db1' ],
+ [
+ @cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed',
+ '--rootdescend', 'db1'
+ ],
2,
- [ $index_missing_relation_fork_re ],
- [ $no_output_re ],
+ [$index_missing_relation_fork_re],
+ [$no_output_re],
'pg_amcheck smoke test --heapallindexed --rootdescend');
$node->command_checks_all(
[ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3', '-S', 's*' ],
- 0,
- [ $no_output_re ],
- [ $no_output_re ],
+ 0, [$no_output_re], [$no_output_re],
'pg_amcheck excluding all corrupt schemas');
# constants here, where they can be compared easily against the layout.
use constant HEAPTUPLE_PACK_CODE => 'LLLSSSSSCCLLCCCCCCCCCCllLL';
-use constant HEAPTUPLE_PACK_LENGTH => 58; # Total size
+use constant HEAPTUPLE_PACK_LENGTH => 58; # Total size
# Read a tuple of our table from a heap page.
#
my ($fh, $offset) = @_;
my ($buffer, %tup);
seek($fh, $offset, SEEK_SET)
- or BAIL_OUT("seek failed: $!");
+ or BAIL_OUT("seek failed: $!");
defined(sysread($fh, $buffer, HEAPTUPLE_PACK_LENGTH))
- or BAIL_OUT("sysread failed: $!");
+ or BAIL_OUT("sysread failed: $!");
@_ = unpack(HEAPTUPLE_PACK_CODE, $buffer);
- %tup = (t_xmin => shift,
- t_xmax => shift,
- t_field3 => shift,
- bi_hi => shift,
- bi_lo => shift,
- ip_posid => shift,
- t_infomask2 => shift,
- t_infomask => shift,
- t_hoff => shift,
- t_bits => shift,
- a_1 => shift,
- a_2 => shift,
- b_header => shift,
- b_body1 => shift,
- b_body2 => shift,
- b_body3 => shift,
- b_body4 => shift,
- b_body5 => shift,
- b_body6 => shift,
- b_body7 => shift,
- c_va_header => shift,
- c_va_vartag => shift,
- c_va_rawsize => shift,
- c_va_extinfo => shift,
- c_va_valueid => shift,
- c_va_toastrelid => shift);
+ %tup = (
+ t_xmin => shift,
+ t_xmax => shift,
+ t_field3 => shift,
+ bi_hi => shift,
+ bi_lo => shift,
+ ip_posid => shift,
+ t_infomask2 => shift,
+ t_infomask => shift,
+ t_hoff => shift,
+ t_bits => shift,
+ a_1 => shift,
+ a_2 => shift,
+ b_header => shift,
+ b_body1 => shift,
+ b_body2 => shift,
+ b_body3 => shift,
+ b_body4 => shift,
+ b_body5 => shift,
+ b_body6 => shift,
+ b_body7 => shift,
+ c_va_header => shift,
+ c_va_vartag => shift,
+ c_va_rawsize => shift,
+ c_va_extinfo => shift,
+ c_va_valueid => shift,
+ c_va_toastrelid => shift);
# Stitch together the text for column 'b'
- $tup{b} = join('', map { chr($tup{"b_body$_"}) } (1..7));
+ $tup{b} = join('', map { chr($tup{"b_body$_"}) } (1 .. 7));
return \%tup;
}
sub write_tuple
{
my ($fh, $offset, $tup) = @_;
- my $buffer = pack(HEAPTUPLE_PACK_CODE,
- $tup->{t_xmin},
- $tup->{t_xmax},
- $tup->{t_field3},
- $tup->{bi_hi},
- $tup->{bi_lo},
- $tup->{ip_posid},
- $tup->{t_infomask2},
- $tup->{t_infomask},
- $tup->{t_hoff},
- $tup->{t_bits},
- $tup->{a_1},
- $tup->{a_2},
- $tup->{b_header},
- $tup->{b_body1},
- $tup->{b_body2},
- $tup->{b_body3},
- $tup->{b_body4},
- $tup->{b_body5},
- $tup->{b_body6},
- $tup->{b_body7},
- $tup->{c_va_header},
- $tup->{c_va_vartag},
- $tup->{c_va_rawsize},
- $tup->{c_va_extinfo},
- $tup->{c_va_valueid},
- $tup->{c_va_toastrelid});
+ my $buffer = pack(
+ HEAPTUPLE_PACK_CODE,
+ $tup->{t_xmin}, $tup->{t_xmax},
+ $tup->{t_field3}, $tup->{bi_hi},
+ $tup->{bi_lo}, $tup->{ip_posid},
+ $tup->{t_infomask2}, $tup->{t_infomask},
+ $tup->{t_hoff}, $tup->{t_bits},
+ $tup->{a_1}, $tup->{a_2},
+ $tup->{b_header}, $tup->{b_body1},
+ $tup->{b_body2}, $tup->{b_body3},
+ $tup->{b_body4}, $tup->{b_body5},
+ $tup->{b_body6}, $tup->{b_body7},
+ $tup->{c_va_header}, $tup->{c_va_vartag},
+ $tup->{c_va_rawsize}, $tup->{c_va_extinfo},
+ $tup->{c_va_valueid}, $tup->{c_va_toastrelid});
seek($fh, $offset, SEEK_SET)
- or BAIL_OUT("seek failed: $!");
+ or BAIL_OUT("seek failed: $!");
defined(syswrite($fh, $buffer, HEAPTUPLE_PACK_LENGTH))
- or BAIL_OUT("syswrite failed: $!");
+ or BAIL_OUT("syswrite failed: $!");
return;
}
# Start the node and load the extensions. We depend on both
# amcheck and pageinspect for this test.
$node->start;
-my $port = $node->port;
+my $port = $node->port;
my $pgdata = $node->data_dir;
$node->safe_psql('postgres', "CREATE EXTENSION amcheck");
$node->safe_psql('postgres', "CREATE EXTENSION pageinspect");
VACUUM FREEZE public.junk
));
-my $rel = $node->safe_psql('postgres', qq(SELECT pg_relation_filepath('public.test')));
+my $rel = $node->safe_psql('postgres',
+ qq(SELECT pg_relation_filepath('public.test')));
my $relpath = "$pgdata/$rel";
# Insert data and freeze public.test
use constant ROWCOUNT => 16;
-$node->safe_psql('postgres', qq(
+$node->safe_psql(
+ 'postgres', qq(
INSERT INTO public.test (a, b, c)
VALUES (
x'DEADF9F9DEADF9F9'::bigint,
repeat('w', 10000)
);
VACUUM FREEZE public.test
- )) for (1..ROWCOUNT);
+ )) for (1 .. ROWCOUNT);
my $relfrozenxid = $node->safe_psql('postgres',
q(select relfrozenxid from pg_class where relname = 'test'));
if ($datfrozenxid <= 3 || $datfrozenxid >= $relfrozenxid)
{
$node->clean_node;
- plan skip_all => "Xid thresholds not as expected: got datfrozenxid = $datfrozenxid, relfrozenxid = $relfrozenxid";
+ plan skip_all =>
+ "Xid thresholds not as expected: got datfrozenxid = $datfrozenxid, relfrozenxid = $relfrozenxid";
exit;
}
# Find where each of the tuples is located on the page.
my @lp_off;
-for my $tup (0..ROWCOUNT-1)
+for my $tup (0 .. ROWCOUNT - 1)
{
- push (@lp_off, $node->safe_psql('postgres', qq(
+ push(
+ @lp_off,
+ $node->safe_psql(
+ 'postgres', qq(
select lp_off from heap_page_items(get_raw_page('test', 'main', 0))
offset $tup limit 1)));
}
$node->stop;
my $file;
open($file, '+<', $relpath)
- or BAIL_OUT("open failed: $!");
+ or BAIL_OUT("open failed: $!");
binmode $file;
my $ENDIANNESS;
for (my $tupidx = 0; $tupidx < ROWCOUNT; $tupidx++)
{
- my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
+ my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
my $offset = $lp_off[$tupidx];
my $tup = read_tuple($file, $offset);
# Sanity-check that the data appears on the page where we expect.
my $a_1 = $tup->{a_1};
my $a_2 = $tup->{a_2};
- my $b = $tup->{b};
+ my $b = $tup->{b};
if ($a_1 != 0xDEADF9F9 || $a_2 != 0xDEADF9F9 || $b ne 'abcdefg')
{
- close($file); # ignore errors on close; we're exiting anyway
+ close($file); # ignore errors on close; we're exiting anyway
$node->clean_node;
- plan skip_all => sprintf("Page layout differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")",
- 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
+ plan skip_all =>
+ sprintf(
+ "Page layout differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")",
+ 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
exit;
}
$ENDIANNESS = $tup->{b_header} == 0x11 ? "little" : "big";
}
close($file)
- or BAIL_OUT("close failed: $!");
+ or BAIL_OUT("close failed: $!");
$node->start;
# Ok, Xids and page layout look ok. We can run corruption tests.
plan tests => 19;
# Check that pg_amcheck runs against the uncorrupted table without error.
-$node->command_ok(['pg_amcheck', '-p', $port, 'postgres'],
- 'pg_amcheck test table, prior to corruption');
+$node->command_ok(
+ [ 'pg_amcheck', '-p', $port, 'postgres' ],
+ 'pg_amcheck test table, prior to corruption');
# Check that pg_amcheck runs against the uncorrupted table and index without error.
-$node->command_ok(['pg_amcheck', '-p', $port, 'postgres'],
- 'pg_amcheck test table and index, prior to corruption');
+$node->command_ok([ 'pg_amcheck', '-p', $port, 'postgres' ],
+ 'pg_amcheck test table and index, prior to corruption');
$node->stop;
# Some #define constants from access/htup_details.h for use while corrupting.
-use constant HEAP_HASNULL => 0x0001;
-use constant HEAP_XMAX_LOCK_ONLY => 0x0080;
-use constant HEAP_XMIN_COMMITTED => 0x0100;
-use constant HEAP_XMIN_INVALID => 0x0200;
-use constant HEAP_XMAX_COMMITTED => 0x0400;
-use constant HEAP_XMAX_INVALID => 0x0800;
-use constant HEAP_NATTS_MASK => 0x07FF;
-use constant HEAP_XMAX_IS_MULTI => 0x1000;
-use constant HEAP_KEYS_UPDATED => 0x2000;
+use constant HEAP_HASNULL => 0x0001;
+use constant HEAP_XMAX_LOCK_ONLY => 0x0080;
+use constant HEAP_XMIN_COMMITTED => 0x0100;
+use constant HEAP_XMIN_INVALID => 0x0200;
+use constant HEAP_XMAX_COMMITTED => 0x0400;
+use constant HEAP_XMAX_INVALID => 0x0800;
+use constant HEAP_NATTS_MASK => 0x07FF;
+use constant HEAP_XMAX_IS_MULTI => 0x1000;
+use constant HEAP_KEYS_UPDATED => 0x2000;
# Helper function to generate a regular expression matching the header we
# expect verify_heapam() to return given which fields we expect to be non-null.
sub header
{
my ($blkno, $offnum, $attnum) = @_;
- return qr/heap table "postgres"\."public"\."test", block $blkno, offset $offnum, attribute $attnum:\s+/ms
- if (defined $attnum);
- return qr/heap table "postgres"\."public"\."test", block $blkno, offset $offnum:\s+/ms
- if (defined $offnum);
+ return
+ qr/heap table "postgres"\."public"\."test", block $blkno, offset $offnum, attribute $attnum:\s+/ms
+ if (defined $attnum);
+ return
+ qr/heap table "postgres"\."public"\."test", block $blkno, offset $offnum:\s+/ms
+ if (defined $offnum);
return qr/heap table "postgres"\."public"\."test", block $blkno:\s+/ms
- if (defined $blkno);
+ if (defined $blkno);
return qr/heap table "postgres"\."public"\."test":\s+/ms;
}
#
my @expected;
open($file, '+<', $relpath)
- or BAIL_OUT("open failed: $!");
+ or BAIL_OUT("open failed: $!");
binmode $file;
for (my $tupidx = 0; $tupidx < ROWCOUNT; $tupidx++)
{
- my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
+ my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
my $offset = $lp_off[$tupidx];
my $tup = read_tuple($file, $offset);
# Expected corruption report
push @expected,
- qr/${header}xmin $xmin precedes relation freeze threshold 0:\d+/;
+ qr/${header}xmin $xmin precedes relation freeze threshold 0:\d+/;
}
if ($offnum == 2)
{
$tup->{t_infomask} &= ~HEAP_XMIN_INVALID;
push @expected,
- qr/${$header}xmin $xmin precedes oldest valid transaction ID 0:\d+/;
+ qr/${$header}xmin $xmin precedes oldest valid transaction ID 0:\d+/;
}
elsif ($offnum == 3)
{
$tup->{t_infomask} &= ~HEAP_XMIN_INVALID;
push @expected,
- qr/${$header}xmin 4026531839 equals or exceeds next valid transaction ID 0:\d+/;
+ qr/${$header}xmin 4026531839 equals or exceeds next valid transaction ID 0:\d+/;
}
elsif ($offnum == 4)
{
$tup->{t_infomask} &= ~HEAP_XMAX_INVALID;
push @expected,
- qr/${$header}xmax 4026531839 equals or exceeds next valid transaction ID 0:\d+/;
+ qr/${$header}xmax 4026531839 equals or exceeds next valid transaction ID 0:\d+/;
}
elsif ($offnum == 5)
{
$tup->{t_hoff} += 128;
push @expected,
- qr/${$header}data begins at offset 152 beyond the tuple length 58/,
- qr/${$header}tuple data should begin at byte 24, but actually begins at byte 152 \(3 attributes, no nulls\)/;
+ qr/${$header}data begins at offset 152 beyond the tuple length 58/,
+ qr/${$header}tuple data should begin at byte 24, but actually begins at byte 152 \(3 attributes, no nulls\)/;
}
elsif ($offnum == 6)
{
$tup->{t_hoff} += 3;
push @expected,
- qr/${$header}tuple data should begin at byte 24, but actually begins at byte 27 \(3 attributes, no nulls\)/;
+ qr/${$header}tuple data should begin at byte 24, but actually begins at byte 27 \(3 attributes, no nulls\)/;
}
elsif ($offnum == 7)
{
$tup->{t_hoff} -= 8;
push @expected,
- qr/${$header}tuple data should begin at byte 24, but actually begins at byte 16 \(3 attributes, no nulls\)/;
+ qr/${$header}tuple data should begin at byte 24, but actually begins at byte 16 \(3 attributes, no nulls\)/;
}
elsif ($offnum == 8)
{
$tup->{t_hoff} -= 3;
push @expected,
- qr/${$header}tuple data should begin at byte 24, but actually begins at byte 21 \(3 attributes, no nulls\)/;
+ qr/${$header}tuple data should begin at byte 24, but actually begins at byte 21 \(3 attributes, no nulls\)/;
}
elsif ($offnum == 9)
{
$tup->{t_infomask2} |= HEAP_NATTS_MASK;
push @expected,
- qr/${$header}number of attributes 2047 exceeds maximum expected for table 3/;
+ qr/${$header}number of attributes 2047 exceeds maximum expected for table 3/;
}
elsif ($offnum == 10)
{
# Corrupt the tuple to look like it has lots of attributes, some of
# them null. This falsely creates the impression that the t_bits
# array is longer than just one byte, but t_hoff still says otherwise.
- $tup->{t_infomask} |= HEAP_HASNULL;
+ $tup->{t_infomask} |= HEAP_HASNULL;
$tup->{t_infomask2} |= HEAP_NATTS_MASK;
$tup->{t_bits} = 0xAA;
push @expected,
- qr/${$header}tuple data should begin at byte 280, but actually begins at byte 24 \(2047 attributes, has nulls\)/;
+ qr/${$header}tuple data should begin at byte 280, but actually begins at byte 24 \(2047 attributes, has nulls\)/;
}
elsif ($offnum == 11)
{
# Same as above, but this time t_hoff plays along
- $tup->{t_infomask} |= HEAP_HASNULL;
+ $tup->{t_infomask} |= HEAP_HASNULL;
$tup->{t_infomask2} |= (HEAP_NATTS_MASK & 0x40);
$tup->{t_bits} = 0xAA;
$tup->{t_hoff} = 32;
push @expected,
- qr/${$header}number of attributes 67 exceeds maximum expected for table 3/;
+ qr/${$header}number of attributes 67 exceeds maximum expected for table 3/;
}
elsif ($offnum == 12)
{
# bytes with 0xFF using 0x3FFFFFFF.
#
$tup->{b_header} = $ENDIANNESS eq 'little' ? 0xFC : 0x3F;
- $tup->{b_body1} = 0xFF;
- $tup->{b_body2} = 0xFF;
- $tup->{b_body3} = 0xFF;
+ $tup->{b_body1} = 0xFF;
+ $tup->{b_body2} = 0xFF;
+ $tup->{b_body3} = 0xFF;
$header = header(0, $offnum, 1);
push @expected,
- qr/${header}attribute with length \d+ ends at offset \d+ beyond total tuple length \d+/;
+ qr/${header}attribute with length \d+ ends at offset \d+ beyond total tuple length \d+/;
}
elsif ($offnum == 13)
{
$tup->{c_va_valueid} = 0xFFFFFFFF;
$header = header(0, $offnum, 2);
- push @expected,
- qr/${header}toast value \d+ not found in toast table/;
+ push @expected, qr/${header}toast value \d+ not found in toast table/;
}
elsif ($offnum == 14)
{
$tup->{t_xmax} = 4;
push @expected,
- qr/${header}multitransaction ID 4 equals or exceeds next valid multitransaction ID 1/;
+ qr/${header}multitransaction ID 4 equals or exceeds next valid multitransaction ID 1/;
}
- elsif ($offnum == 15) # Last offnum must equal ROWCOUNT
+ elsif ($offnum == 15) # Last offnum must equal ROWCOUNT
{
# Set both HEAP_XMAX_COMMITTED and HEAP_XMAX_IS_MULTI
$tup->{t_infomask} |= HEAP_XMAX_COMMITTED;
$tup->{t_xmax} = 4000000000;
push @expected,
- qr/${header}multitransaction ID 4000000000 precedes relation minimum multitransaction ID threshold 1/;
+ qr/${header}multitransaction ID 4000000000 precedes relation minimum multitransaction ID threshold 1/;
}
write_tuple($file, $offset, $tup);
}
close($file)
- or BAIL_OUT("close failed: $!");
+ or BAIL_OUT("close failed: $!");
$node->start;
# Run pg_amcheck against the corrupt table with epoch=0, comparing actual
# corruption messages against the expected messages
$node->command_checks_all(
- ['pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres'],
- 2,
- [ @expected ],
- [ ],
- 'Expected corruption message output');
+ [ 'pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres' ],
+ 2, [@expected], [], 'Expected corruption message output');
$node->teardown_node;
$node->clean_node;
$node->start;
# Create a custom operator class and an index which uses it.
-$node->safe_psql('postgres', q(
+$node->safe_psql(
+ 'postgres', q(
CREATE EXTENSION amcheck;
CREATE FUNCTION int4_asc_cmp (a int4, b int4) RETURNS int LANGUAGE sql AS $$
# Change the operator class to use a function which sorts in a different
# order to corrupt the btree index
-$node->safe_psql('postgres', q(
+$node->safe_psql(
+ 'postgres', q(
CREATE FUNCTION int4_desc_cmp (int4, int4) RETURNS int LANGUAGE sql AS $$
SELECT CASE WHEN $1 = $2 THEN 0 WHEN $1 > $2 THEN -1 ELSE 1 END; $$;
UPDATE pg_catalog.pg_amproc
$node->command_checks_all(
[ 'pg_amcheck', '-p', $node->port, 'postgres' ],
2,
- [ qr/item order invariant violated for index "fickleidx"/ ],
- [ ],
+ [qr/item order invariant violated for index "fickleidx"/],
+ [],
'pg_amcheck all schemas, tables and indexes reports fickleidx corruption'
);
# set page header and block sizes
my $pageheader_size = 24;
-my $block_size = $node->safe_psql('postgres', 'SHOW block_size;');
+my $block_size = $node->safe_psql('postgres', 'SHOW block_size;');
# induce corruption
system_or_bail 'pg_ctl', '-D', $pgdata, 'stop';
{
bool foundNotNull; /* Attr was NOT NULL in a parent */
bool foundDefault; /* Found a default in a parent */
- bool foundGenerated; /* Found a generated in a parent */
+ bool foundGenerated; /* Found a generated in a parent */
/* no point in examining dropped columns */
if (tbinfo->attisdropped[j])
/* working state while dumping/restoring */
pgoff_t dataLength; /* item's data size; 0 if none or unknown */
- int reqs; /* do we need schema and/or data of object (REQ_* bit mask) */
+ int reqs; /* do we need schema and/or data of object
+ * (REQ_* bit mask) */
bool created; /* set for DATA member if TABLE was created */
/* working state (needed only for parallel restore) */
extern ArchiveHandle *CloneArchive(ArchiveHandle *AH);
extern void DeCloneArchive(ArchiveHandle *AH);
-extern int TocIDRequired(ArchiveHandle *AH, DumpId id);
+extern int TocIDRequired(ArchiveHandle *AH, DumpId id);
TocEntry *getTocEntryByDumpId(ArchiveHandle *AH, DumpId id);
extern bool checkSeek(FILE *fp);
/* Header doesn't match, so read to next header */
len = th->fileLen;
len += tarPaddingBytesRequired(th->fileLen);
- blks = len / TAR_BLOCK_SIZE; /* # of tar blocks */
+ blks = len / TAR_BLOCK_SIZE; /* # of tar blocks */
for (i = 0; i < blks; i++)
_tarReadRaw(AH, &header[0], TAR_BLOCK_SIZE, NULL, ctx->tarFH);
# The odds of finding something interesting by testing all ASCII letters
# seem too small to justify the cycles of testing a fifth name.
my $dbname1 =
- 'regression'
+ 'regression'
. generate_ascii_string(1, 9)
. generate_ascii_string(11, 12)
. generate_ascii_string(14, 33)
primary_psql("VACUUM tail_tbl");
# Drop drop_tbl. pg_rewind should copy it back.
- primary_psql("insert into drop_tbl values ('in primary, after promotion')");
+ primary_psql(
+ "insert into drop_tbl values ('in primary, after promotion')");
primary_psql("DROP TABLE drop_tbl");
# Before running pg_rewind, do a couple of extra tests with several
# in "local" mode for simplicity's sake.
if ($test_mode eq 'local')
{
- my $primary_pgdata = $node_primary->data_dir;
+ my $primary_pgdata = $node_primary->data_dir;
my $standby_pgdata = $node_standby->data_dir;
# First check that pg_rewind fails if the target cluster is
# Create a subdir and files that will be present in both
mkdir "$test_primary_datadir/tst_both_dir";
- append_to_file "$test_primary_datadir/tst_both_dir/both_file1", "in both1";
- append_to_file "$test_primary_datadir/tst_both_dir/both_file2", "in both2";
+ append_to_file "$test_primary_datadir/tst_both_dir/both_file1",
+ "in both1";
+ append_to_file "$test_primary_datadir/tst_both_dir/both_file2",
+ "in both2";
mkdir "$test_primary_datadir/tst_both_dir/both_subdir/";
- append_to_file "$test_primary_datadir/tst_both_dir/both_subdir/both_file3",
+ append_to_file
+ "$test_primary_datadir/tst_both_dir/both_subdir/both_file3",
"in both3";
RewindTest::create_standby($test_mode);
my $node_1 = get_new_node('node_1');
$node_1->init(allows_streaming => 1);
-$node_1->append_conf('postgresql.conf', qq(
+$node_1->append_conf(
+ 'postgresql.conf', qq(
wal_keep_size='100 MB'
));
$node_1->backup($backup_name);
my $node_2 = get_new_node('node_2');
-$node_2->init_from_backup($node_1, $backup_name,
- has_streaming => 1);
+$node_2->init_from_backup($node_1, $backup_name, has_streaming => 1);
$node_2->start;
my $node_3 = get_new_node('node_3');
-$node_3->init_from_backup($node_1, $backup_name,
- has_streaming => 1);
+$node_3->init_from_backup($node_1, $backup_name, has_streaming => 1);
$node_3->start;
# Wait until node 3 has connected and caught up
# reconfigure node_1 as a standby following node_3
my $node_3_connstr = $node_3->connstr;
-$node_1->append_conf('postgresql.conf', qq(
+$node_1->append_conf(
+ 'postgresql.conf', qq(
primary_conninfo='$node_3_connstr'
));
$node_1->set_standby_mode();
$node_1->start();
# also reconfigure node_2 to follow node_3
-$node_2->append_conf('postgresql.conf', qq(
+$node_2->append_conf(
+ 'postgresql.conf', qq(
primary_conninfo='$node_3_connstr'
));
$node_2->restart();
# demonstratively create a split brain. After the rewind, we should only
# see the insert on 1, as the insert on node 3 is rewound away.
#
-$node_1->safe_psql('postgres', "INSERT INTO public.foo (t) VALUES ('keep this')");
+$node_1->safe_psql('postgres',
+ "INSERT INTO public.foo (t) VALUES ('keep this')");
# 'bar' is unmodified in node 1, so it won't be overwritten by replaying the
# WAL from node 1.
-$node_3->safe_psql('postgres', "INSERT INTO public.bar (t) VALUES ('rewind this')");
+$node_3->safe_psql('postgres',
+ "INSERT INTO public.bar (t) VALUES ('rewind this')");
# Insert more rows in node 1, to bump up the XID counter. Otherwise, if
# rewind doesn't correctly rewind the changes made on the other node,
# we might fail to notice if the inserts are invisible because the XIDs
# are not marked as committed.
-$node_1->safe_psql('postgres', "INSERT INTO public.foo (t) VALUES ('and this')");
-$node_1->safe_psql('postgres', "INSERT INTO public.foo (t) VALUES ('and this too')");
+$node_1->safe_psql('postgres',
+ "INSERT INTO public.foo (t) VALUES ('and this')");
+$node_1->safe_psql('postgres',
+ "INSERT INTO public.foo (t) VALUES ('and this too')");
# Wait for node 2 to catch up
$node_2->poll_query_until('postgres',
$node_2->stop('fast');
$node_3->stop('fast');
-my $node_2_pgdata = $node_2->data_dir;
+my $node_2_pgdata = $node_2->data_dir;
my $node_1_connstr = $node_1->connstr;
# Keep a temporary postgresql.conf or it would be overwritten during the rewind.
"$tmp_folder/node_2-postgresql.conf.tmp");
command_ok(
- [
- 'pg_rewind',
- "--source-server=$node_1_connstr",
- "--target-pgdata=$node_2_pgdata",
- "--debug"
- ],
+ [
+ 'pg_rewind', "--source-server=$node_1_connstr",
+ "--target-pgdata=$node_2_pgdata", "--debug"
+ ],
'run pg_rewind');
# Now move back postgresql.conf with old settings
# before rewind should've been overwritten with the data from node 1.
my $result;
$result = $node_2->safe_psql('postgres', 'SELECT * FROM public.foo');
-is($result, qq(keep this
+is( $result, qq(keep this
and this
and this too), 'table foo after rewind');
sub run_pg_rewind
{
my $test_mode = shift;
- my $primary_pgdata = $node_primary->data_dir;
+ my $primary_pgdata = $node_primary->data_dir;
my $standby_pgdata = $node_standby->data_dir;
my $standby_connstr = $node_standby->connstr('postgres');
my $tmp_folder = TestLib::tempdir;
# recovery configuration automatically.
command_ok(
[
- 'pg_rewind', "--debug",
- "--source-server", $standby_connstr,
+ 'pg_rewind', "--debug",
+ "--source-server", $standby_connstr,
"--target-pgdata=$primary_pgdata", "--no-sync",
"--write-recovery-conf"
],
buf,
XLOG_BLCKSZ,
writes * XLOG_BLCKSZ) != XLOG_BLCKSZ)
+
/*
* This can generate write failures if the filesystem has
* a large block size, e.g. 4k, and there is no support
static void
check_for_new_tablespace_dir(ClusterInfo *new_cluster)
{
- int tblnum;
- char new_tablespace_dir[MAXPGPATH];
+ int tblnum;
+ char new_tablespace_dir[MAXPGPATH];
prep_status("Checking for new cluster tablespace directories");
struct stat statbuf;
snprintf(new_tablespace_dir, MAXPGPATH, "%s%s",
- os_info.old_tablespaces[tblnum],
- new_cluster->tablespace_suffix);
+ os_info.old_tablespaces[tblnum],
+ new_cluster->tablespace_suffix);
if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT)
pg_fatal("new cluster tablespace directory already exists: \"%s\"\n",
static void
check_exec(const char *dir, const char *program, bool check_version)
{
- char path[MAXPGPATH];
- char line[MAXPGPATH];
- char cmd[MAXPGPATH];
- char versionstr[128];
- int ret;
+ char path[MAXPGPATH];
+ char line[MAXPGPATH];
+ char cmd[MAXPGPATH];
+ char versionstr[128];
+ int ret;
snprintf(path, sizeof(path), "%s/%s", dir, program);
* GO before proceeding to the "done" path which will cleanup,
* so as to avoid locking the process.
*
- * It is unclear whether it is worth doing anything rather than
- * coldly exiting with an error message.
+ * It is unclear whether it is worth doing anything rather
+ * than coldly exiting with an error message.
*/
THREAD_BARRIER_WAIT(&barrier);
goto done;
qr{command=98.: int 5432\b}, # :random_seed
qr{command=99.: int -9223372036854775808\b}, # min int
qr{command=100.: int 9223372036854775807\b}, # max int
- # pseudorandom permutation tests
+ # pseudorandom permutation tests
qr{command=101.: boolean true\b},
qr{command=102.: boolean true\b},
qr{command=103.: boolean true\b},
[qr{malformed variable.*trueXXX}], q{\set b :badtrue or true}
],
[
- 'invalid permute size', 2,
- [qr{permute size parameter must be greater than zero}], q{\set i permute(0, 0)}
+ 'invalid permute size',
+ 2,
+ [qr{permute size parameter must be greater than zero}],
+ q{\set i permute(0, 0)}
],
# GSET
/* ALTER INDEX SET|RESET ( */
else if (Matches("ALTER", "INDEX", MatchAny, "RESET", "("))
COMPLETE_WITH("fillfactor",
- "deduplicate_items", /* BTREE */
+ "deduplicate_items", /* BTREE */
"fastupdate", "gin_pending_list_limit", /* GIN */
"buffering", /* GiST */
"pages_per_range", "autosummarize" /* BRIN */
);
else if (Matches("ALTER", "INDEX", MatchAny, "SET", "("))
COMPLETE_WITH("fillfactor =",
- "deduplicate_items =", /* BTREE */
+ "deduplicate_items =", /* BTREE */
"fastupdate =", "gin_pending_list_limit =", /* GIN */
"buffering =", /* GiST */
"pages_per_range =", "autosummarize =" /* BRIN */
bool bv_allnulls; /* are all values nulls in the page range? */
Datum *bv_values; /* current accumulated values */
Datum bv_mem_value; /* expanded accumulated values */
- MemoryContext bv_context;
+ MemoryContext bv_context;
brin_serialize_callback_type bv_serialize;
} BrinValues;
RepOriginId nodeid;
TransactionId mainxid;
/* subxact Xids follow */
-} xl_commit_ts_set;
+} xl_commit_ts_set;
#define SizeOfCommitTsSet (offsetof(xl_commit_ts_set, mainxid) + \
sizeof(TransactionId))
{
int32 varlena_header_; /* varlena header (do not touch directly!) */
int fillfactor; /* page fill factor in percent (0..100) */
- float8 vacuum_cleanup_index_scale_factor; /* deprecated */
+ float8 vacuum_cleanup_index_scale_factor; /* deprecated */
bool deduplicate_items; /* Try to deduplicate items? */
} BTOptions;
uint32 phsw_chunk_remaining; /* # blocks left in this chunk */
uint32 phsw_chunk_size; /* The number of blocks to allocate in
* each I/O chunk for the scan */
-} ParallelBlockTableScanWorkerData;
+} ParallelBlockTableScanWorkerData;
typedef struct ParallelBlockTableScanWorkerData *ParallelBlockTableScanWorker;
/*
* but the value is one of the char values defined below, as they appear in
* pg_attribute.attcompression, e.g. TOAST_PGLZ_COMPRESSION.
*/
-extern int default_toast_compression;
+extern int default_toast_compression;
/*
* Built-in compression method-id. The toast compression header will store
* GetSnapshotData() needs to recompute the contents of the snapshot, or
* not. There are likely other users of this. Always above 1.
*/
- uint64 xactCompletionCount;
+ uint64 xactCompletionCount;
/*
* These fields are protected by XactTruncationLock
SYNCHRONOUS_COMMIT_REMOTE_WRITE, /* wait for local flush and remote
* write */
SYNCHRONOUS_COMMIT_REMOTE_FLUSH, /* wait for local and remote flush */
- SYNCHRONOUS_COMMIT_REMOTE_APPLY /* wait for local and remote flush
- and remote apply */
+ SYNCHRONOUS_COMMIT_REMOTE_APPLY /* wait for local and remote flush and
+ * remote apply */
} SyncCommitLevel;
/* Define the default setting for synchronous_commit */
aggcombinefn => 'int2and', aggtranstype => 'int2' },
{ aggfnoid => 'bit_or(int2)', aggtransfn => 'int2or', aggcombinefn => 'int2or',
aggtranstype => 'int2' },
-{ aggfnoid => 'bit_xor(int2)', aggtransfn => 'int2xor', aggcombinefn => 'int2xor',
- aggtranstype => 'int2' },
+{ aggfnoid => 'bit_xor(int2)', aggtransfn => 'int2xor',
+ aggcombinefn => 'int2xor', aggtranstype => 'int2' },
{ aggfnoid => 'bit_and(int4)', aggtransfn => 'int4and',
aggcombinefn => 'int4and', aggtranstype => 'int4' },
{ aggfnoid => 'bit_or(int4)', aggtransfn => 'int4or', aggcombinefn => 'int4or',
aggtranstype => 'int4' },
-{ aggfnoid => 'bit_xor(int4)', aggtransfn => 'int4xor', aggcombinefn => 'int4xor',
- aggtranstype => 'int4' },
+{ aggfnoid => 'bit_xor(int4)', aggtransfn => 'int4xor',
+ aggcombinefn => 'int4xor', aggtranstype => 'int4' },
{ aggfnoid => 'bit_and(int8)', aggtransfn => 'int8and',
aggcombinefn => 'int8and', aggtranstype => 'int8' },
{ aggfnoid => 'bit_or(int8)', aggtransfn => 'int8or', aggcombinefn => 'int8or',
aggtranstype => 'int8' },
-{ aggfnoid => 'bit_xor(int8)', aggtransfn => 'int8xor', aggcombinefn => 'int8xor',
- aggtranstype => 'int8' },
+{ aggfnoid => 'bit_xor(int8)', aggtransfn => 'int8xor',
+ aggcombinefn => 'int8xor', aggtranstype => 'int8' },
{ aggfnoid => 'bit_and(bit)', aggtransfn => 'bitand', aggcombinefn => 'bitand',
aggtranstype => 'bit' },
{ aggfnoid => 'bit_or(bit)', aggtransfn => 'bitor', aggcombinefn => 'bitor',
# bloom "char"
{ amprocfamily => 'brin/char_bloom_ops', amproclefttype => 'char',
- amprocrighttype => 'char', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'char', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/char_bloom_ops', amproclefttype => 'char',
amprocrighttype => 'char', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/char_bloom_ops', amproclefttype => 'char',
amprocrighttype => 'char', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/char_bloom_ops', amproclefttype => 'char',
- amprocrighttype => 'char', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'char', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/char_bloom_ops', amproclefttype => 'char',
amprocrighttype => 'char', amprocnum => '11', amproc => 'hashchar' },
# bloom name
{ amprocfamily => 'brin/name_bloom_ops', amproclefttype => 'name',
- amprocrighttype => 'name', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'name', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/name_bloom_ops', amproclefttype => 'name',
amprocrighttype => 'name', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/name_bloom_ops', amproclefttype => 'name',
amprocrighttype => 'name', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/name_bloom_ops', amproclefttype => 'name',
- amprocrighttype => 'name', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'name', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/name_bloom_ops', amproclefttype => 'name',
amprocrighttype => 'name', amprocnum => '11', amproc => 'hashname' },
amprocrighttype => 'int2', amprocnum => '3',
amproc => 'brin_minmax_multi_consistent' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int2',
- amprocrighttype => 'int2', amprocnum => '4', amproc => 'brin_minmax_multi_union' },
+ amprocrighttype => 'int2', amprocnum => '4',
+ amproc => 'brin_minmax_multi_union' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int2',
amprocrighttype => 'int2', amprocnum => '5',
amproc => 'brin_minmax_multi_options' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int2',
- amprocrighttype => 'int2', amprocnum => '11', amproc => 'brin_minmax_multi_distance_int2' },
+ amprocrighttype => 'int2', amprocnum => '11',
+ amproc => 'brin_minmax_multi_distance_int2' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int4',
amprocrighttype => 'int4', amprocnum => '1',
amprocrighttype => 'int4', amprocnum => '3',
amproc => 'brin_minmax_multi_consistent' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int4',
- amprocrighttype => 'int4', amprocnum => '4', amproc => 'brin_minmax_multi_union' },
+ amprocrighttype => 'int4', amprocnum => '4',
+ amproc => 'brin_minmax_multi_union' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int4',
amprocrighttype => 'int4', amprocnum => '5',
amproc => 'brin_minmax_multi_options' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int4',
- amprocrighttype => 'int4', amprocnum => '11', amproc => 'brin_minmax_multi_distance_int4' },
+ amprocrighttype => 'int4', amprocnum => '11',
+ amproc => 'brin_minmax_multi_distance_int4' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int8',
amprocrighttype => 'int8', amprocnum => '1',
amprocrighttype => 'int8', amprocnum => '3',
amproc => 'brin_minmax_multi_consistent' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int8',
- amprocrighttype => 'int8', amprocnum => '4', amproc => 'brin_minmax_multi_union' },
+ amprocrighttype => 'int8', amprocnum => '4',
+ amproc => 'brin_minmax_multi_union' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int8',
amprocrighttype => 'int8', amprocnum => '5',
amproc => 'brin_minmax_multi_options' },
{ amprocfamily => 'brin/integer_minmax_multi_ops', amproclefttype => 'int8',
- amprocrighttype => 'int8', amprocnum => '11', amproc => 'brin_minmax_multi_distance_int8' },
+ amprocrighttype => 'int8', amprocnum => '11',
+ amproc => 'brin_minmax_multi_distance_int8' },
# bloom integer: int2, int4, int8
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int8',
- amprocrighttype => 'int8', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'int8', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int8',
amprocrighttype => 'int8', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int8',
amprocrighttype => 'int8', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int8',
- amprocrighttype => 'int8', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'int8', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int8',
amprocrighttype => 'int8', amprocnum => '11', amproc => 'hashint8' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int2',
- amprocrighttype => 'int2', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'int2', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int2',
amprocrighttype => 'int2', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int2',
amprocrighttype => 'int2', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int2',
- amprocrighttype => 'int2', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'int2', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int2',
amprocrighttype => 'int2', amprocnum => '11', amproc => 'hashint2' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int4',
- amprocrighttype => 'int4', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'int4', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int4',
amprocrighttype => 'int4', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int4',
amprocrighttype => 'int4', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int4',
- amprocrighttype => 'int4', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'int4', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/integer_bloom_ops', amproclefttype => 'int4',
amprocrighttype => 'int4', amprocnum => '11', amproc => 'hashint4' },
# bloom text
{ amprocfamily => 'brin/text_bloom_ops', amproclefttype => 'text',
- amprocrighttype => 'text', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'text', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/text_bloom_ops', amproclefttype => 'text',
amprocrighttype => 'text', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/text_bloom_ops', amproclefttype => 'text',
amprocrighttype => 'text', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/text_bloom_ops', amproclefttype => 'text',
- amprocrighttype => 'text', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'text', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/text_bloom_ops', amproclefttype => 'text',
amprocrighttype => 'text', amprocnum => '11', amproc => 'hashtext' },
# minmax multi oid
{ amprocfamily => 'brin/oid_minmax_multi_ops', amproclefttype => 'oid',
- amprocrighttype => 'oid', amprocnum => '1', amproc => 'brin_minmax_multi_opcinfo' },
+ amprocrighttype => 'oid', amprocnum => '1',
+ amproc => 'brin_minmax_multi_opcinfo' },
{ amprocfamily => 'brin/oid_minmax_multi_ops', amproclefttype => 'oid',
amprocrighttype => 'oid', amprocnum => '2',
amproc => 'brin_minmax_multi_add_value' },
amprocrighttype => 'oid', amprocnum => '3',
amproc => 'brin_minmax_multi_consistent' },
{ amprocfamily => 'brin/oid_minmax_multi_ops', amproclefttype => 'oid',
- amprocrighttype => 'oid', amprocnum => '4', amproc => 'brin_minmax_multi_union' },
+ amprocrighttype => 'oid', amprocnum => '4',
+ amproc => 'brin_minmax_multi_union' },
{ amprocfamily => 'brin/oid_minmax_multi_ops', amproclefttype => 'oid',
amprocrighttype => 'oid', amprocnum => '5',
amproc => 'brin_minmax_multi_options' },
{ amprocfamily => 'brin/oid_minmax_multi_ops', amproclefttype => 'oid',
- amprocrighttype => 'oid', amprocnum => '11', amproc => 'brin_minmax_multi_distance_int4' },
+ amprocrighttype => 'oid', amprocnum => '11',
+ amproc => 'brin_minmax_multi_distance_int4' },
# bloom oid
{ amprocfamily => 'brin/oid_bloom_ops', amproclefttype => 'oid',
{ amprocfamily => 'brin/oid_bloom_ops', amproclefttype => 'oid',
amprocrighttype => 'oid', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/oid_bloom_ops', amproclefttype => 'oid',
- amprocrighttype => 'oid', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'oid', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/oid_bloom_ops', amproclefttype => 'oid',
amprocrighttype => 'oid', amprocnum => '11', amproc => 'hashoid' },
{ amprocfamily => 'brin/tid_bloom_ops', amproclefttype => 'tid',
amprocrighttype => 'tid', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/tid_bloom_ops', amproclefttype => 'tid',
- amprocrighttype => 'tid', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'tid', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/tid_bloom_ops', amproclefttype => 'tid',
amprocrighttype => 'tid', amprocnum => '11', amproc => 'hashtid' },
# minmax multi tid
{ amprocfamily => 'brin/tid_minmax_multi_ops', amproclefttype => 'tid',
- amprocrighttype => 'tid', amprocnum => '1', amproc => 'brin_minmax_multi_opcinfo' },
+ amprocrighttype => 'tid', amprocnum => '1',
+ amproc => 'brin_minmax_multi_opcinfo' },
{ amprocfamily => 'brin/tid_minmax_multi_ops', amproclefttype => 'tid',
amprocrighttype => 'tid', amprocnum => '2',
amproc => 'brin_minmax_multi_add_value' },
amprocrighttype => 'tid', amprocnum => '3',
amproc => 'brin_minmax_multi_consistent' },
{ amprocfamily => 'brin/tid_minmax_multi_ops', amproclefttype => 'tid',
- amprocrighttype => 'tid', amprocnum => '4', amproc => 'brin_minmax_multi_union' },
+ amprocrighttype => 'tid', amprocnum => '4',
+ amproc => 'brin_minmax_multi_union' },
{ amprocfamily => 'brin/tid_minmax_multi_ops', amproclefttype => 'tid',
amprocrighttype => 'tid', amprocnum => '5',
amproc => 'brin_minmax_multi_options' },
{ amprocfamily => 'brin/tid_minmax_multi_ops', amproclefttype => 'tid',
- amprocrighttype => 'tid', amprocnum => '11', amproc => 'brin_minmax_multi_distance_tid' },
+ amprocrighttype => 'tid', amprocnum => '11',
+ amproc => 'brin_minmax_multi_distance_tid' },
# minmax float
{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float4',
amprocrighttype => 'float4', amprocnum => '3',
amproc => 'brin_bloom_consistent' },
{ amprocfamily => 'brin/float_bloom_ops', amproclefttype => 'float4',
- amprocrighttype => 'float4', amprocnum => '4',
- amproc => 'brin_bloom_union' },
+ amprocrighttype => 'float4', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/float_bloom_ops', amproclefttype => 'float4',
amprocrighttype => 'float4', amprocnum => '5',
amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/float_bloom_ops', amproclefttype => 'float4',
- amprocrighttype => 'float4', amprocnum => '11',
- amproc => 'hashfloat4' },
+ amprocrighttype => 'float4', amprocnum => '11', amproc => 'hashfloat4' },
{ amprocfamily => 'brin/float_bloom_ops', amproclefttype => 'float8',
amprocrighttype => 'float8', amprocnum => '1',
amprocrighttype => 'float8', amprocnum => '3',
amproc => 'brin_bloom_consistent' },
{ amprocfamily => 'brin/float_bloom_ops', amproclefttype => 'float8',
- amprocrighttype => 'float8', amprocnum => '4',
- amproc => 'brin_bloom_union' },
+ amprocrighttype => 'float8', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/float_bloom_ops', amproclefttype => 'float8',
amprocrighttype => 'float8', amprocnum => '5',
amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/float_bloom_ops', amproclefttype => 'float8',
- amprocrighttype => 'float8', amprocnum => '11',
- amproc => 'hashfloat8' },
+ amprocrighttype => 'float8', amprocnum => '11', amproc => 'hashfloat8' },
# minmax macaddr
{ amprocfamily => 'brin/macaddr_minmax_ops', amproclefttype => 'macaddr',
amprocrighttype => 'macaddr', amprocnum => '5',
amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/macaddr_bloom_ops', amproclefttype => 'macaddr',
- amprocrighttype => 'macaddr', amprocnum => '11',
- amproc => 'hashmacaddr' },
+ amprocrighttype => 'macaddr', amprocnum => '11', amproc => 'hashmacaddr' },
# minmax macaddr8
{ amprocfamily => 'brin/macaddr8_minmax_ops', amproclefttype => 'macaddr8',
amproc => 'brin_minmax_union' },
# minmax multi macaddr8
-{ amprocfamily => 'brin/macaddr8_minmax_multi_ops', amproclefttype => 'macaddr8',
- amprocrighttype => 'macaddr8', amprocnum => '1',
+{ amprocfamily => 'brin/macaddr8_minmax_multi_ops',
+ amproclefttype => 'macaddr8', amprocrighttype => 'macaddr8', amprocnum => '1',
amproc => 'brin_minmax_multi_opcinfo' },
-{ amprocfamily => 'brin/macaddr8_minmax_multi_ops', amproclefttype => 'macaddr8',
- amprocrighttype => 'macaddr8', amprocnum => '2',
+{ amprocfamily => 'brin/macaddr8_minmax_multi_ops',
+ amproclefttype => 'macaddr8', amprocrighttype => 'macaddr8', amprocnum => '2',
amproc => 'brin_minmax_multi_add_value' },
-{ amprocfamily => 'brin/macaddr8_minmax_multi_ops', amproclefttype => 'macaddr8',
- amprocrighttype => 'macaddr8', amprocnum => '3',
+{ amprocfamily => 'brin/macaddr8_minmax_multi_ops',
+ amproclefttype => 'macaddr8', amprocrighttype => 'macaddr8', amprocnum => '3',
amproc => 'brin_minmax_multi_consistent' },
-{ amprocfamily => 'brin/macaddr8_minmax_multi_ops', amproclefttype => 'macaddr8',
- amprocrighttype => 'macaddr8', amprocnum => '4',
+{ amprocfamily => 'brin/macaddr8_minmax_multi_ops',
+ amproclefttype => 'macaddr8', amprocrighttype => 'macaddr8', amprocnum => '4',
amproc => 'brin_minmax_multi_union' },
-{ amprocfamily => 'brin/macaddr8_minmax_multi_ops', amproclefttype => 'macaddr8',
- amprocrighttype => 'macaddr8', amprocnum => '5',
+{ amprocfamily => 'brin/macaddr8_minmax_multi_ops',
+ amproclefttype => 'macaddr8', amprocrighttype => 'macaddr8', amprocnum => '5',
amproc => 'brin_minmax_multi_options' },
-{ amprocfamily => 'brin/macaddr8_minmax_multi_ops', amproclefttype => 'macaddr8',
- amprocrighttype => 'macaddr8', amprocnum => '11',
- amproc => 'brin_minmax_multi_distance_macaddr8' },
+{ amprocfamily => 'brin/macaddr8_minmax_multi_ops',
+ amproclefttype => 'macaddr8', amprocrighttype => 'macaddr8',
+ amprocnum => '11', amproc => 'brin_minmax_multi_distance_macaddr8' },
# bloom macaddr8
{ amprocfamily => 'brin/macaddr8_bloom_ops', amproclefttype => 'macaddr8',
amprocrighttype => 'macaddr8', amprocnum => '5',
amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/macaddr8_bloom_ops', amproclefttype => 'macaddr8',
- amprocrighttype => 'macaddr8', amprocnum => '11',
- amproc => 'hashmacaddr8' },
+ amprocrighttype => 'macaddr8', amprocnum => '11', amproc => 'hashmacaddr8' },
# minmax inet
{ amprocfamily => 'brin/network_minmax_ops', amproclefttype => 'inet',
# bloom inet
{ amprocfamily => 'brin/network_bloom_ops', amproclefttype => 'inet',
- amprocrighttype => 'inet', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'inet', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/network_bloom_ops', amproclefttype => 'inet',
amprocrighttype => 'inet', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/network_bloom_ops', amproclefttype => 'inet',
amprocrighttype => 'inet', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/network_bloom_ops', amproclefttype => 'inet',
- amprocrighttype => 'inet', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'inet', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/network_bloom_ops', amproclefttype => 'inet',
amprocrighttype => 'inet', amprocnum => '11', amproc => 'hashinet' },
amprocrighttype => 'bpchar', amprocnum => '3',
amproc => 'brin_bloom_consistent' },
{ amprocfamily => 'brin/bpchar_bloom_ops', amproclefttype => 'bpchar',
- amprocrighttype => 'bpchar', amprocnum => '4',
- amproc => 'brin_bloom_union' },
+ amprocrighttype => 'bpchar', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/bpchar_bloom_ops', amproclefttype => 'bpchar',
amprocrighttype => 'bpchar', amprocnum => '5',
amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/bpchar_bloom_ops', amproclefttype => 'bpchar',
- amprocrighttype => 'bpchar', amprocnum => '11',
- amproc => 'hashbpchar' },
+ amprocrighttype => 'bpchar', amprocnum => '11', amproc => 'hashbpchar' },
# minmax time without time zone
{ amprocfamily => 'brin/time_minmax_ops', amproclefttype => 'time',
amprocrighttype => 'time', amprocnum => '3',
amproc => 'brin_minmax_multi_consistent' },
{ amprocfamily => 'brin/time_minmax_multi_ops', amproclefttype => 'time',
- amprocrighttype => 'time', amprocnum => '4', amproc => 'brin_minmax_multi_union' },
+ amprocrighttype => 'time', amprocnum => '4',
+ amproc => 'brin_minmax_multi_union' },
{ amprocfamily => 'brin/time_minmax_multi_ops', amproclefttype => 'time',
amprocrighttype => 'time', amprocnum => '5',
amproc => 'brin_minmax_multi_options' },
# bloom time without time zone
{ amprocfamily => 'brin/time_bloom_ops', amproclefttype => 'time',
- amprocrighttype => 'time', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'time', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/time_bloom_ops', amproclefttype => 'time',
amprocrighttype => 'time', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/time_bloom_ops', amproclefttype => 'time',
amprocrighttype => 'time', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/time_bloom_ops', amproclefttype => 'time',
- amprocrighttype => 'time', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'time', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/time_bloom_ops', amproclefttype => 'time',
amprocrighttype => 'time', amprocnum => '11', amproc => 'time_hash' },
amprocrighttype => 'date', amprocnum => '4', amproc => 'brin_minmax_union' },
# minmax multi datetime (date, timestamp, timestamptz)
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamp',
- amprocrighttype => 'timestamp', amprocnum => '1',
- amproc => 'brin_minmax_multi_opcinfo' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamp',
- amprocrighttype => 'timestamp', amprocnum => '2',
- amproc => 'brin_minmax_multi_add_value' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamp',
- amprocrighttype => 'timestamp', amprocnum => '3',
- amproc => 'brin_minmax_multi_consistent' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamp',
- amprocrighttype => 'timestamp', amprocnum => '4',
- amproc => 'brin_minmax_multi_union' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamp',
- amprocrighttype => 'timestamp', amprocnum => '5',
- amproc => 'brin_minmax_multi_options' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamp',
- amprocrighttype => 'timestamp', amprocnum => '11',
- amproc => 'brin_minmax_multi_distance_timestamp' },
-
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamptz',
- amprocrighttype => 'timestamptz', amprocnum => '1',
- amproc => 'brin_minmax_multi_opcinfo' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamptz',
- amprocrighttype => 'timestamptz', amprocnum => '2',
- amproc => 'brin_minmax_multi_add_value' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamptz',
- amprocrighttype => 'timestamptz', amprocnum => '3',
- amproc => 'brin_minmax_multi_consistent' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamptz',
- amprocrighttype => 'timestamptz', amprocnum => '4',
- amproc => 'brin_minmax_multi_union' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamptz',
- amprocrighttype => 'timestamptz', amprocnum => '5',
- amproc => 'brin_minmax_multi_options' },
-{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'timestamptz',
- amprocrighttype => 'timestamptz', amprocnum => '11',
- amproc => 'brin_minmax_multi_distance_timestamp' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamp', amprocrighttype => 'timestamp',
+ amprocnum => '1', amproc => 'brin_minmax_multi_opcinfo' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamp', amprocrighttype => 'timestamp',
+ amprocnum => '2', amproc => 'brin_minmax_multi_add_value' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamp', amprocrighttype => 'timestamp',
+ amprocnum => '3', amproc => 'brin_minmax_multi_consistent' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamp', amprocrighttype => 'timestamp',
+ amprocnum => '4', amproc => 'brin_minmax_multi_union' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamp', amprocrighttype => 'timestamp',
+ amprocnum => '5', amproc => 'brin_minmax_multi_options' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamp', amprocrighttype => 'timestamp',
+ amprocnum => '11', amproc => 'brin_minmax_multi_distance_timestamp' },
+
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamptz', amprocrighttype => 'timestamptz',
+ amprocnum => '1', amproc => 'brin_minmax_multi_opcinfo' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamptz', amprocrighttype => 'timestamptz',
+ amprocnum => '2', amproc => 'brin_minmax_multi_add_value' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamptz', amprocrighttype => 'timestamptz',
+ amprocnum => '3', amproc => 'brin_minmax_multi_consistent' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamptz', amprocrighttype => 'timestamptz',
+ amprocnum => '4', amproc => 'brin_minmax_multi_union' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamptz', amprocrighttype => 'timestamptz',
+ amprocnum => '5', amproc => 'brin_minmax_multi_options' },
+{ amprocfamily => 'brin/datetime_minmax_multi_ops',
+ amproclefttype => 'timestamptz', amprocrighttype => 'timestamptz',
+ amprocnum => '11', amproc => 'brin_minmax_multi_distance_timestamp' },
{ amprocfamily => 'brin/datetime_minmax_multi_ops', amproclefttype => 'date',
amprocrighttype => 'date', amprocnum => '1',
amproc => 'timestamp_hash' },
{ amprocfamily => 'brin/datetime_bloom_ops', amproclefttype => 'date',
- amprocrighttype => 'date', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'date', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/datetime_bloom_ops', amproclefttype => 'date',
amprocrighttype => 'date', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/datetime_bloom_ops', amproclefttype => 'date',
amprocrighttype => 'date', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/datetime_bloom_ops', amproclefttype => 'date',
- amprocrighttype => 'date', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'date', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/datetime_bloom_ops', amproclefttype => 'date',
amprocrighttype => 'date', amprocnum => '11', amproc => 'hashint4' },
amproc => 'brin_minmax_union' },
# minmax multi interval
-{ amprocfamily => 'brin/interval_minmax_multi_ops', amproclefttype => 'interval',
- amprocrighttype => 'interval', amprocnum => '1',
+{ amprocfamily => 'brin/interval_minmax_multi_ops',
+ amproclefttype => 'interval', amprocrighttype => 'interval', amprocnum => '1',
amproc => 'brin_minmax_multi_opcinfo' },
-{ amprocfamily => 'brin/interval_minmax_multi_ops', amproclefttype => 'interval',
- amprocrighttype => 'interval', amprocnum => '2',
+{ amprocfamily => 'brin/interval_minmax_multi_ops',
+ amproclefttype => 'interval', amprocrighttype => 'interval', amprocnum => '2',
amproc => 'brin_minmax_multi_add_value' },
-{ amprocfamily => 'brin/interval_minmax_multi_ops', amproclefttype => 'interval',
- amprocrighttype => 'interval', amprocnum => '3',
+{ amprocfamily => 'brin/interval_minmax_multi_ops',
+ amproclefttype => 'interval', amprocrighttype => 'interval', amprocnum => '3',
amproc => 'brin_minmax_multi_consistent' },
-{ amprocfamily => 'brin/interval_minmax_multi_ops', amproclefttype => 'interval',
- amprocrighttype => 'interval', amprocnum => '4',
+{ amprocfamily => 'brin/interval_minmax_multi_ops',
+ amproclefttype => 'interval', amprocrighttype => 'interval', amprocnum => '4',
amproc => 'brin_minmax_multi_union' },
-{ amprocfamily => 'brin/interval_minmax_multi_ops', amproclefttype => 'interval',
- amprocrighttype => 'interval', amprocnum => '5',
+{ amprocfamily => 'brin/interval_minmax_multi_ops',
+ amproclefttype => 'interval', amprocrighttype => 'interval', amprocnum => '5',
amproc => 'brin_minmax_multi_options' },
-{ amprocfamily => 'brin/interval_minmax_multi_ops', amproclefttype => 'interval',
- amprocrighttype => 'interval', amprocnum => '11',
- amproc => 'brin_minmax_multi_distance_interval' },
+{ amprocfamily => 'brin/interval_minmax_multi_ops',
+ amproclefttype => 'interval', amprocrighttype => 'interval',
+ amprocnum => '11', amproc => 'brin_minmax_multi_distance_interval' },
# bloom interval
{ amprocfamily => 'brin/interval_bloom_ops', amproclefttype => 'interval',
amprocrighttype => 'interval', amprocnum => '5',
amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/interval_bloom_ops', amproclefttype => 'interval',
- amprocrighttype => 'interval', amprocnum => '11',
- amproc => 'interval_hash' },
+ amprocrighttype => 'interval', amprocnum => '11', amproc => 'interval_hash' },
# minmax time with time zone
{ amprocfamily => 'brin/timetz_minmax_ops', amproclefttype => 'timetz',
amprocrighttype => 'timetz', amprocnum => '3',
amproc => 'brin_bloom_consistent' },
{ amprocfamily => 'brin/timetz_bloom_ops', amproclefttype => 'timetz',
- amprocrighttype => 'timetz', amprocnum => '4',
- amproc => 'brin_bloom_union' },
+ amprocrighttype => 'timetz', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/timetz_bloom_ops', amproclefttype => 'timetz',
amprocrighttype => 'timetz', amprocnum => '5',
amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/timetz_bloom_ops', amproclefttype => 'timetz',
- amprocrighttype => 'timetz', amprocnum => '11',
- amproc => 'timetz_hash' },
+ amprocrighttype => 'timetz', amprocnum => '11', amproc => 'timetz_hash' },
# minmax bit
{ amprocfamily => 'brin/bit_minmax_ops', amproclefttype => 'bit',
amprocrighttype => 'numeric', amprocnum => '5',
amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/numeric_bloom_ops', amproclefttype => 'numeric',
- amprocrighttype => 'numeric', amprocnum => '11',
- amproc => 'hash_numeric' },
+ amprocrighttype => 'numeric', amprocnum => '11', amproc => 'hash_numeric' },
# minmax uuid
{ amprocfamily => 'brin/uuid_minmax_ops', amproclefttype => 'uuid',
# bloom uuid
{ amprocfamily => 'brin/uuid_bloom_ops', amproclefttype => 'uuid',
- amprocrighttype => 'uuid', amprocnum => '1',
- amproc => 'brin_bloom_opcinfo' },
+ amprocrighttype => 'uuid', amprocnum => '1', amproc => 'brin_bloom_opcinfo' },
{ amprocfamily => 'brin/uuid_bloom_ops', amproclefttype => 'uuid',
amprocrighttype => 'uuid', amprocnum => '2',
amproc => 'brin_bloom_add_value' },
{ amprocfamily => 'brin/uuid_bloom_ops', amproclefttype => 'uuid',
amprocrighttype => 'uuid', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/uuid_bloom_ops', amproclefttype => 'uuid',
- amprocrighttype => 'uuid', amprocnum => '5',
- amproc => 'brin_bloom_options' },
+ amprocrighttype => 'uuid', amprocnum => '5', amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/uuid_bloom_ops', amproclefttype => 'uuid',
amprocrighttype => 'uuid', amprocnum => '11', amproc => 'uuid_hash' },
amprocrighttype => 'pg_lsn', amprocnum => '3',
amproc => 'brin_bloom_consistent' },
{ amprocfamily => 'brin/pg_lsn_bloom_ops', amproclefttype => 'pg_lsn',
- amprocrighttype => 'pg_lsn', amprocnum => '4',
- amproc => 'brin_bloom_union' },
+ amprocrighttype => 'pg_lsn', amprocnum => '4', amproc => 'brin_bloom_union' },
{ amprocfamily => 'brin/pg_lsn_bloom_ops', amproclefttype => 'pg_lsn',
amprocrighttype => 'pg_lsn', amprocnum => '5',
amproc => 'brin_bloom_options' },
{ amprocfamily => 'brin/pg_lsn_bloom_ops', amproclefttype => 'pg_lsn',
- amprocrighttype => 'pg_lsn', amprocnum => '11',
- amproc => 'pg_lsn_hash' },
+ amprocrighttype => 'pg_lsn', amprocnum => '11', amproc => 'pg_lsn_hash' },
# inclusion box
{ amprocfamily => 'brin/box_inclusion_ops', amproclefttype => 'box',
NameData collcollate; /* LC_COLLATE setting */
NameData collctype; /* LC_CTYPE setting */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
- text collversion BKI_DEFAULT(_null_); /* provider-dependent */
- /* version of */
- /* collation data */
+ text collversion BKI_DEFAULT(_null_); /* provider-dependent
+ * version of collation
+ * data */
#endif
} FormData_pg_collation;
opcfamily => 'brin/bytea_minmax_ops', opcintype => 'bytea',
opckeytype => 'bytea' },
{ opcmethod => 'brin', opcname => 'bytea_bloom_ops',
- opcfamily => 'brin/bytea_bloom_ops', opcintype => 'bytea',
- opckeytype => 'bytea', opcdefault => 'f' },
+ opcfamily => 'brin/bytea_bloom_ops', opcintype => 'bytea', opcdefault => 'f',
+ opckeytype => 'bytea' },
{ opcmethod => 'brin', opcname => 'char_minmax_ops',
opcfamily => 'brin/char_minmax_ops', opcintype => 'char',
opckeytype => 'char' },
{ opcmethod => 'brin', opcname => 'char_bloom_ops',
- opcfamily => 'brin/char_bloom_ops', opcintype => 'char',
- opckeytype => 'char', opcdefault => 'f' },
+ opcfamily => 'brin/char_bloom_ops', opcintype => 'char', opcdefault => 'f',
+ opckeytype => 'char' },
{ opcmethod => 'brin', opcname => 'name_minmax_ops',
opcfamily => 'brin/name_minmax_ops', opcintype => 'name',
opckeytype => 'name' },
{ opcmethod => 'brin', opcname => 'name_bloom_ops',
- opcfamily => 'brin/name_bloom_ops', opcintype => 'name',
- opckeytype => 'name', opcdefault => 'f' },
+ opcfamily => 'brin/name_bloom_ops', opcintype => 'name', opcdefault => 'f',
+ opckeytype => 'name' },
{ opcmethod => 'brin', opcname => 'int8_minmax_ops',
opcfamily => 'brin/integer_minmax_ops', opcintype => 'int8',
opckeytype => 'int8' },
{ opcmethod => 'brin', opcname => 'int8_minmax_multi_ops',
opcfamily => 'brin/integer_minmax_multi_ops', opcintype => 'int8',
- opckeytype => 'int8', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'int8' },
{ opcmethod => 'brin', opcname => 'int8_bloom_ops',
- opcfamily => 'brin/integer_bloom_ops', opcintype => 'int8',
- opckeytype => 'int8', opcdefault => 'f' },
+ opcfamily => 'brin/integer_bloom_ops', opcintype => 'int8', opcdefault => 'f',
+ opckeytype => 'int8' },
{ opcmethod => 'brin', opcname => 'int2_minmax_ops',
opcfamily => 'brin/integer_minmax_ops', opcintype => 'int2',
opckeytype => 'int2' },
{ opcmethod => 'brin', opcname => 'int2_minmax_multi_ops',
opcfamily => 'brin/integer_minmax_multi_ops', opcintype => 'int2',
- opckeytype => 'int2', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'int2' },
{ opcmethod => 'brin', opcname => 'int2_bloom_ops',
- opcfamily => 'brin/integer_bloom_ops', opcintype => 'int2',
- opckeytype => 'int2', opcdefault => 'f' },
+ opcfamily => 'brin/integer_bloom_ops', opcintype => 'int2', opcdefault => 'f',
+ opckeytype => 'int2' },
{ opcmethod => 'brin', opcname => 'int4_minmax_ops',
opcfamily => 'brin/integer_minmax_ops', opcintype => 'int4',
opckeytype => 'int4' },
{ opcmethod => 'brin', opcname => 'int4_minmax_multi_ops',
opcfamily => 'brin/integer_minmax_multi_ops', opcintype => 'int4',
- opckeytype => 'int4', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'int4' },
{ opcmethod => 'brin', opcname => 'int4_bloom_ops',
- opcfamily => 'brin/integer_bloom_ops', opcintype => 'int4',
- opckeytype => 'int4', opcdefault => 'f' },
+ opcfamily => 'brin/integer_bloom_ops', opcintype => 'int4', opcdefault => 'f',
+ opckeytype => 'int4' },
{ opcmethod => 'brin', opcname => 'text_minmax_ops',
opcfamily => 'brin/text_minmax_ops', opcintype => 'text',
opckeytype => 'text' },
{ opcmethod => 'brin', opcname => 'text_bloom_ops',
- opcfamily => 'brin/text_bloom_ops', opcintype => 'text',
- opckeytype => 'text', opcdefault => 'f' },
+ opcfamily => 'brin/text_bloom_ops', opcintype => 'text', opcdefault => 'f',
+ opckeytype => 'text' },
{ opcmethod => 'brin', opcname => 'oid_minmax_ops',
opcfamily => 'brin/oid_minmax_ops', opcintype => 'oid', opckeytype => 'oid' },
{ opcmethod => 'brin', opcname => 'oid_minmax_multi_ops',
opcfamily => 'brin/oid_minmax_multi_ops', opcintype => 'oid',
- opckeytype => 'oid', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'oid' },
{ opcmethod => 'brin', opcname => 'oid_bloom_ops',
- opcfamily => 'brin/oid_bloom_ops', opcintype => 'oid',
- opckeytype => 'oid', opcdefault => 'f' },
+ opcfamily => 'brin/oid_bloom_ops', opcintype => 'oid', opcdefault => 'f',
+ opckeytype => 'oid' },
{ opcmethod => 'brin', opcname => 'tid_minmax_ops',
opcfamily => 'brin/tid_minmax_ops', opcintype => 'tid', opckeytype => 'tid' },
{ opcmethod => 'brin', opcname => 'tid_bloom_ops',
- opcfamily => 'brin/tid_bloom_ops', opcintype => 'tid', opckeytype => 'tid',
- opcdefault => 'f'},
+ opcfamily => 'brin/tid_bloom_ops', opcintype => 'tid', opcdefault => 'f',
+ opckeytype => 'tid' },
{ opcmethod => 'brin', opcname => 'tid_minmax_multi_ops',
opcfamily => 'brin/tid_minmax_multi_ops', opcintype => 'tid',
- opckeytype => 'tid', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'tid' },
{ opcmethod => 'brin', opcname => 'float4_minmax_ops',
opcfamily => 'brin/float_minmax_ops', opcintype => 'float4',
opckeytype => 'float4' },
{ opcmethod => 'brin', opcname => 'float4_minmax_multi_ops',
opcfamily => 'brin/float_minmax_multi_ops', opcintype => 'float4',
- opckeytype => 'float4', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'float4' },
{ opcmethod => 'brin', opcname => 'float4_bloom_ops',
- opcfamily => 'brin/float_bloom_ops', opcintype => 'float4',
- opckeytype => 'float4', opcdefault => 'f' },
+ opcfamily => 'brin/float_bloom_ops', opcintype => 'float4', opcdefault => 'f',
+ opckeytype => 'float4' },
{ opcmethod => 'brin', opcname => 'float8_minmax_ops',
opcfamily => 'brin/float_minmax_ops', opcintype => 'float8',
opckeytype => 'float8' },
{ opcmethod => 'brin', opcname => 'float8_minmax_multi_ops',
opcfamily => 'brin/float_minmax_multi_ops', opcintype => 'float8',
- opckeytype => 'float8', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'float8' },
{ opcmethod => 'brin', opcname => 'float8_bloom_ops',
- opcfamily => 'brin/float_bloom_ops', opcintype => 'float8',
- opckeytype => 'float8', opcdefault => 'f' },
+ opcfamily => 'brin/float_bloom_ops', opcintype => 'float8', opcdefault => 'f',
+ opckeytype => 'float8' },
{ opcmethod => 'brin', opcname => 'macaddr_minmax_ops',
opcfamily => 'brin/macaddr_minmax_ops', opcintype => 'macaddr',
opckeytype => 'macaddr' },
{ opcmethod => 'brin', opcname => 'macaddr_minmax_multi_ops',
opcfamily => 'brin/macaddr_minmax_multi_ops', opcintype => 'macaddr',
- opckeytype => 'macaddr', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'macaddr' },
{ opcmethod => 'brin', opcname => 'macaddr_bloom_ops',
opcfamily => 'brin/macaddr_bloom_ops', opcintype => 'macaddr',
- opckeytype => 'macaddr', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'macaddr' },
{ opcmethod => 'brin', opcname => 'macaddr8_minmax_ops',
opcfamily => 'brin/macaddr8_minmax_ops', opcintype => 'macaddr8',
opckeytype => 'macaddr8' },
{ opcmethod => 'brin', opcname => 'macaddr8_minmax_multi_ops',
opcfamily => 'brin/macaddr8_minmax_multi_ops', opcintype => 'macaddr8',
- opckeytype => 'macaddr8', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'macaddr8' },
{ opcmethod => 'brin', opcname => 'macaddr8_bloom_ops',
opcfamily => 'brin/macaddr8_bloom_ops', opcintype => 'macaddr8',
- opckeytype => 'macaddr8', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'macaddr8' },
{ opcmethod => 'brin', opcname => 'inet_minmax_ops',
opcfamily => 'brin/network_minmax_ops', opcintype => 'inet',
opcdefault => 'f', opckeytype => 'inet' },
{ opcmethod => 'brin', opcname => 'inet_minmax_multi_ops',
opcfamily => 'brin/network_minmax_multi_ops', opcintype => 'inet',
- opcdefault => 'f', opckeytype => 'inet', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'inet' },
{ opcmethod => 'brin', opcname => 'inet_bloom_ops',
- opcfamily => 'brin/network_bloom_ops', opcintype => 'inet',
- opcdefault => 'f', opckeytype => 'inet', opcdefault => 'f' },
+ opcfamily => 'brin/network_bloom_ops', opcintype => 'inet', opcdefault => 'f',
+ opckeytype => 'inet' },
{ opcmethod => 'brin', opcname => 'inet_inclusion_ops',
opcfamily => 'brin/network_inclusion_ops', opcintype => 'inet',
opckeytype => 'inet' },
opckeytype => 'bpchar' },
{ opcmethod => 'brin', opcname => 'bpchar_bloom_ops',
opcfamily => 'brin/bpchar_bloom_ops', opcintype => 'bpchar',
- opckeytype => 'bpchar', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'bpchar' },
{ opcmethod => 'brin', opcname => 'time_minmax_ops',
opcfamily => 'brin/time_minmax_ops', opcintype => 'time',
opckeytype => 'time' },
{ opcmethod => 'brin', opcname => 'time_minmax_multi_ops',
opcfamily => 'brin/time_minmax_multi_ops', opcintype => 'time',
- opckeytype => 'time', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'time' },
{ opcmethod => 'brin', opcname => 'time_bloom_ops',
- opcfamily => 'brin/time_bloom_ops', opcintype => 'time',
- opckeytype => 'time', opcdefault => 'f' },
+ opcfamily => 'brin/time_bloom_ops', opcintype => 'time', opcdefault => 'f',
+ opckeytype => 'time' },
{ opcmethod => 'brin', opcname => 'date_minmax_ops',
opcfamily => 'brin/datetime_minmax_ops', opcintype => 'date',
opckeytype => 'date' },
{ opcmethod => 'brin', opcname => 'date_minmax_multi_ops',
opcfamily => 'brin/datetime_minmax_multi_ops', opcintype => 'date',
- opckeytype => 'date', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'date' },
{ opcmethod => 'brin', opcname => 'date_bloom_ops',
opcfamily => 'brin/datetime_bloom_ops', opcintype => 'date',
- opckeytype => 'date', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'date' },
{ opcmethod => 'brin', opcname => 'timestamp_minmax_ops',
opcfamily => 'brin/datetime_minmax_ops', opcintype => 'timestamp',
opckeytype => 'timestamp' },
{ opcmethod => 'brin', opcname => 'timestamp_minmax_multi_ops',
opcfamily => 'brin/datetime_minmax_multi_ops', opcintype => 'timestamp',
- opckeytype => 'timestamp', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'timestamp' },
{ opcmethod => 'brin', opcname => 'timestamp_bloom_ops',
opcfamily => 'brin/datetime_bloom_ops', opcintype => 'timestamp',
- opckeytype => 'timestamp', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'timestamp' },
{ opcmethod => 'brin', opcname => 'timestamptz_minmax_ops',
opcfamily => 'brin/datetime_minmax_ops', opcintype => 'timestamptz',
opckeytype => 'timestamptz' },
{ opcmethod => 'brin', opcname => 'timestamptz_minmax_multi_ops',
opcfamily => 'brin/datetime_minmax_multi_ops', opcintype => 'timestamptz',
- opckeytype => 'timestamptz', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'timestamptz' },
{ opcmethod => 'brin', opcname => 'timestamptz_bloom_ops',
opcfamily => 'brin/datetime_bloom_ops', opcintype => 'timestamptz',
- opckeytype => 'timestamptz', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'timestamptz' },
{ opcmethod => 'brin', opcname => 'interval_minmax_ops',
opcfamily => 'brin/interval_minmax_ops', opcintype => 'interval',
opckeytype => 'interval' },
{ opcmethod => 'brin', opcname => 'interval_minmax_multi_ops',
opcfamily => 'brin/interval_minmax_multi_ops', opcintype => 'interval',
- opckeytype => 'interval', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'interval' },
{ opcmethod => 'brin', opcname => 'interval_bloom_ops',
opcfamily => 'brin/interval_bloom_ops', opcintype => 'interval',
- opckeytype => 'interval', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'interval' },
{ opcmethod => 'brin', opcname => 'timetz_minmax_ops',
opcfamily => 'brin/timetz_minmax_ops', opcintype => 'timetz',
opckeytype => 'timetz' },
{ opcmethod => 'brin', opcname => 'timetz_minmax_multi_ops',
opcfamily => 'brin/timetz_minmax_multi_ops', opcintype => 'timetz',
- opckeytype => 'timetz', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'timetz' },
{ opcmethod => 'brin', opcname => 'timetz_bloom_ops',
opcfamily => 'brin/timetz_bloom_ops', opcintype => 'timetz',
- opckeytype => 'timetz', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'timetz' },
{ opcmethod => 'brin', opcname => 'bit_minmax_ops',
opcfamily => 'brin/bit_minmax_ops', opcintype => 'bit', opckeytype => 'bit' },
{ opcmethod => 'brin', opcname => 'varbit_minmax_ops',
opckeytype => 'numeric' },
{ opcmethod => 'brin', opcname => 'numeric_minmax_multi_ops',
opcfamily => 'brin/numeric_minmax_multi_ops', opcintype => 'numeric',
- opckeytype => 'numeric', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'numeric' },
{ opcmethod => 'brin', opcname => 'numeric_bloom_ops',
opcfamily => 'brin/numeric_bloom_ops', opcintype => 'numeric',
- opckeytype => 'numeric', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'numeric' },
# no brin opclass for record, anyarray
opckeytype => 'uuid' },
{ opcmethod => 'brin', opcname => 'uuid_minmax_multi_ops',
opcfamily => 'brin/uuid_minmax_multi_ops', opcintype => 'uuid',
- opckeytype => 'uuid', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'uuid' },
{ opcmethod => 'brin', opcname => 'uuid_bloom_ops',
- opcfamily => 'brin/uuid_bloom_ops', opcintype => 'uuid',
- opckeytype => 'uuid', opcdefault => 'f' },
+ opcfamily => 'brin/uuid_bloom_ops', opcintype => 'uuid', opcdefault => 'f',
+ opckeytype => 'uuid' },
{ opcmethod => 'brin', opcname => 'range_inclusion_ops',
opcfamily => 'brin/range_inclusion_ops', opcintype => 'anyrange',
opckeytype => 'anyrange' },
opckeytype => 'pg_lsn' },
{ opcmethod => 'brin', opcname => 'pg_lsn_minmax_multi_ops',
opcfamily => 'brin/pg_lsn_minmax_multi_ops', opcintype => 'pg_lsn',
- opckeytype => 'pg_lsn', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'pg_lsn' },
{ opcmethod => 'brin', opcname => 'pg_lsn_bloom_ops',
opcfamily => 'brin/pg_lsn_bloom_ops', opcintype => 'pg_lsn',
- opckeytype => 'pg_lsn', opcdefault => 'f' },
+ opcdefault => 'f', opckeytype => 'pg_lsn' },
# no brin opclass for enum, tsvector, tsquery, jsonb
oprname => '>', oprleft => 'tid', oprright => 'tid', oprresult => 'bool',
oprcom => '<(tid,tid)', oprnegate => '<=(tid,tid)', oprcode => 'tidgt',
oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' },
-{ oid => '2801', oid_symbol => 'TIDLessEqOperator', descr => 'less than or equal',
+{ oid => '2801', oid_symbol => 'TIDLessEqOperator',
+ descr => 'less than or equal',
oprname => '<=', oprleft => 'tid', oprright => 'tid', oprresult => 'bool',
oprcom => '>=(tid,tid)', oprnegate => '>(tid,tid)', oprcode => 'tidle',
oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' },
-{ oid => '2802', oid_symbol => 'TIDGreaterEqOperator', descr => 'greater than or equal',
+{ oid => '2802', oid_symbol => 'TIDGreaterEqOperator',
+ descr => 'greater than or equal',
oprname => '>=', oprleft => 'tid', oprright => 'tid', oprresult => 'bool',
oprcom => '<=(tid,tid)', oprnegate => '<(tid,tid)', oprcode => 'tidge',
oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' },
proargtypes => 'bytea bytea int4', prosrc => 'byteaoverlay_no_len' },
{ oid => '8436', descr => 'number of set bits',
proname => 'bit_count', prorettype => 'int8', proargtypes => 'bytea',
- prosrc => 'bytea_bit_count'},
+ prosrc => 'bytea_bit_count' },
{ oid => '725',
proname => 'dist_pl', prorettype => 'float8', proargtypes => 'point line',
{ oid => '8103', descr => 'list of catalog foreign key relationships',
proname => 'pg_get_catalog_foreign_keys', procost => '10', prorows => '250',
proretset => 't', provolatile => 's', prorettype => 'record',
- proargtypes => '', proallargtypes => '{regclass,_text,regclass,_text,bool,bool}',
+ proargtypes => '',
+ proallargtypes => '{regclass,_text,regclass,_text,bool,bool}',
proargmodes => '{o,o,o,o,o,o}',
proargnames => '{fktable,fkcols,pktable,pkcols,is_array,is_opt}',
prosrc => 'pg_get_catalog_foreign_keys' },
prosrc => 'bitsetbit' },
{ oid => '8435', descr => 'number of set bits',
proname => 'bit_count', prorettype => 'int8', proargtypes => 'bit',
- prosrc => 'bit_bit_count'},
+ prosrc => 'bit_bit_count' },
# for macaddr type support
{ oid => '436', descr => 'I/O',
proargnames => '{pid,status,receive_start_lsn,receive_start_tli,written_lsn,flushed_lsn,received_tli,last_msg_send_time,last_msg_receipt_time,latest_end_lsn,latest_end_time,slot_name,sender_host,sender_port,conninfo}',
prosrc => 'pg_stat_get_wal_receiver' },
{ oid => '8595', descr => 'statistics: information about replication slot',
- proname => 'pg_stat_get_replication_slot', prorows => '1',
- proisstrict => 'f', proretset => 't', provolatile => 's', proparallel => 'r',
+ proname => 'pg_stat_get_replication_slot', prorows => '1', proisstrict => 'f',
+ proretset => 't', provolatile => 's', proparallel => 'r',
prorettype => 'record', proargtypes => 'text',
proallargtypes => '{text,text,int8,int8,int8,int8,int8,int8,int8,int8,timestamptz}',
proargmodes => '{i,o,o,o,o,o,o,o,o,o,o}',
proname => 'date_trunc', prorettype => 'timestamp',
proargtypes => 'text timestamp', prosrc => 'timestamp_trunc' },
-{ oid => '8990',
- descr => 'bin timestamp into specified interval',
+{ oid => '8990', descr => 'bin timestamp into specified interval',
proname => 'date_bin', prorettype => 'timestamp',
- proargtypes => 'interval timestamp timestamp',
- prosrc => 'timestamp_bin' },
+ proargtypes => 'interval timestamp timestamp', prosrc => 'timestamp_bin' },
{ oid => '8993',
descr => 'bin timestamp with time zone into specified interval',
proname => 'date_bin', prorettype => 'timestamptz',
- proargtypes => 'interval timestamptz timestamptz', prosrc => 'timestamptz_bin' },
+ proargtypes => 'interval timestamptz timestamptz',
+ prosrc => 'timestamptz_bin' },
{ oid => '2021', descr => 'extract field from timestamp',
proname => 'date_part', prorettype => 'float8',
{ oid => '2171', descr => 'cancel a server process\' current query',
proname => 'pg_cancel_backend', provolatile => 'v', prorettype => 'bool',
proargtypes => 'int4', prosrc => 'pg_cancel_backend' },
-{ oid => '2096', descr => 'terminate a backend process and if timeout is specified, wait for its exit or until timeout occurs',
+{ oid => '2096',
+ descr => 'terminate a backend process and if timeout is specified, wait for its exit or until timeout occurs',
proname => 'pg_terminate_backend', provolatile => 'v', prorettype => 'bool',
proargtypes => 'int4 int8', proargnames => '{pid,timeout}',
prosrc => 'pg_terminate_backend' },
{ oid => '2137', descr => 'wait for a backend process exit or timeout occurs',
- proname => 'pg_wait_for_backend_termination', provolatile => 'v', prorettype => 'bool',
- proargtypes => 'int4 int8', proargnames => '{pid,timeout}',
- prosrc => 'pg_wait_for_backend_termination' },
+ proname => 'pg_wait_for_backend_termination', provolatile => 'v',
+ prorettype => 'bool', proargtypes => 'int4 int8',
+ proargnames => '{pid,timeout}', prosrc => 'pg_wait_for_backend_termination' },
{ oid => '2172', descr => 'prepare for taking an online backup',
proname => 'pg_start_backup', provolatile => 'v', proparallel => 'r',
prorettype => 'pg_lsn', proargtypes => 'text bool bool',
descr => 'bytes required to store the value, perhaps with compression',
proname => 'pg_column_size', provolatile => 's', prorettype => 'int4',
proargtypes => 'any', prosrc => 'pg_column_size' },
-{ oid => '2121',
- descr => 'compression method for the compressed datum',
+{ oid => '2121', descr => 'compression method for the compressed datum',
proname => 'pg_column_compression', provolatile => 's', prorettype => 'text',
proargtypes => 'any', prosrc => 'pg_column_compression' },
{ oid => '2322',
# logging memory contexts of the specified backend
{ oid => '4543', descr => 'log memory contexts of the specified backend',
- proname => 'pg_log_backend_memory_contexts',
- provolatile => 'v', prorettype => 'bool',
- proargtypes => 'int4', prosrc => 'pg_log_backend_memory_contexts' },
+ proname => 'pg_log_backend_memory_contexts', provolatile => 'v',
+ prorettype => 'bool', proargtypes => 'int4',
+ prosrc => 'pg_log_backend_memory_contexts' },
# non-persistent series generator
{ oid => '1066', descr => 'non-persistent series generator',
proname => 'bit_or', prokind => 'a', proisstrict => 'f', prorettype => 'int2',
proargtypes => 'int2', prosrc => 'aggregate_dummy' },
{ oid => '8452', descr => 'bitwise-xor smallint aggregate',
- proname => 'bit_xor', prokind => 'a', proisstrict => 'f', prorettype => 'int2',
- proargtypes => 'int2', prosrc => 'aggregate_dummy' },
+ proname => 'bit_xor', prokind => 'a', proisstrict => 'f',
+ prorettype => 'int2', proargtypes => 'int2', prosrc => 'aggregate_dummy' },
{ oid => '2238', descr => 'bitwise-and integer aggregate',
proname => 'bit_and', prokind => 'a', proisstrict => 'f',
prorettype => 'int4', proargtypes => 'int4', prosrc => 'aggregate_dummy' },
proname => 'bit_or', prokind => 'a', proisstrict => 'f', prorettype => 'int4',
proargtypes => 'int4', prosrc => 'aggregate_dummy' },
{ oid => '8453', descr => 'bitwise-xor integer aggregate',
- proname => 'bit_xor', prokind => 'a', proisstrict => 'f', prorettype => 'int4',
- proargtypes => 'int4', prosrc => 'aggregate_dummy' },
+ proname => 'bit_xor', prokind => 'a', proisstrict => 'f',
+ prorettype => 'int4', proargtypes => 'int4', prosrc => 'aggregate_dummy' },
{ oid => '2240', descr => 'bitwise-and bigint aggregate',
proname => 'bit_and', prokind => 'a', proisstrict => 'f',
prorettype => 'int8', proargtypes => 'int8', prosrc => 'aggregate_dummy' },
proname => 'bit_or', prokind => 'a', proisstrict => 'f', prorettype => 'int8',
proargtypes => 'int8', prosrc => 'aggregate_dummy' },
{ oid => '8454', descr => 'bitwise-xor bigint aggregate',
- proname => 'bit_xor', prokind => 'a', proisstrict => 'f', prorettype => 'int8',
- proargtypes => 'int8', prosrc => 'aggregate_dummy' },
+ proname => 'bit_xor', prokind => 'a', proisstrict => 'f',
+ prorettype => 'int8', proargtypes => 'int8', prosrc => 'aggregate_dummy' },
{ oid => '2242', descr => 'bitwise-and bit aggregate',
proname => 'bit_and', prokind => 'a', proisstrict => 'f', prorettype => 'bit',
proargtypes => 'bit', prosrc => 'aggregate_dummy' },
prosrc => 'brin_minmax_multi_consistent' },
{ oid => '4619', descr => 'BRIN multi minmax support',
proname => 'brin_minmax_multi_union', prorettype => 'bool',
- proargtypes => 'internal internal internal', prosrc => 'brin_minmax_multi_union' },
+ proargtypes => 'internal internal internal',
+ prosrc => 'brin_minmax_multi_union' },
{ oid => '4620', descr => 'BRIN multi minmax support',
- proname => 'brin_minmax_multi_options', prorettype => 'void', proisstrict => 'f',
- proargtypes => 'internal', prosrc => 'brin_minmax_multi_options' },
+ proname => 'brin_minmax_multi_options', proisstrict => 'f',
+ prorettype => 'void', proargtypes => 'internal',
+ prosrc => 'brin_minmax_multi_options' },
{ oid => '4621', descr => 'BRIN multi minmax int2 distance',
proname => 'brin_minmax_multi_distance_int2', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_int2' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_int2' },
{ oid => '4622', descr => 'BRIN multi minmax int4 distance',
proname => 'brin_minmax_multi_distance_int4', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_int4' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_int4' },
{ oid => '4623', descr => 'BRIN multi minmax int8 distance',
proname => 'brin_minmax_multi_distance_int8', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_int8' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_int8' },
{ oid => '4624', descr => 'BRIN multi minmax float4 distance',
proname => 'brin_minmax_multi_distance_float4', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_float4' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_float4' },
{ oid => '4625', descr => 'BRIN multi minmax float8 distance',
proname => 'brin_minmax_multi_distance_float8', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_float8' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_float8' },
{ oid => '4626', descr => 'BRIN multi minmax numeric distance',
proname => 'brin_minmax_multi_distance_numeric', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_numeric' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_numeric' },
{ oid => '4627', descr => 'BRIN multi minmax tid distance',
proname => 'brin_minmax_multi_distance_tid', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_tid' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_tid' },
{ oid => '4628', descr => 'BRIN multi minmax uuid distance',
proname => 'brin_minmax_multi_distance_uuid', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_uuid' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_uuid' },
{ oid => '4629', descr => 'BRIN multi minmax date distance',
proname => 'brin_minmax_multi_distance_date', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_date' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_date' },
{ oid => '4630', descr => 'BRIN multi minmax time distance',
proname => 'brin_minmax_multi_distance_time', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_time' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_time' },
{ oid => '4631', descr => 'BRIN multi minmax interval distance',
proname => 'brin_minmax_multi_distance_interval', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_interval' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_interval' },
{ oid => '4632', descr => 'BRIN multi minmax timetz distance',
proname => 'brin_minmax_multi_distance_timetz', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_timetz' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_timetz' },
{ oid => '4633', descr => 'BRIN multi minmax pg_lsn distance',
proname => 'brin_minmax_multi_distance_pg_lsn', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_pg_lsn' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_pg_lsn' },
{ oid => '4634', descr => 'BRIN multi minmax macaddr distance',
proname => 'brin_minmax_multi_distance_macaddr', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_macaddr' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_macaddr' },
{ oid => '4635', descr => 'BRIN multi minmax macaddr8 distance',
proname => 'brin_minmax_multi_distance_macaddr8', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_macaddr8' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_macaddr8' },
{ oid => '4636', descr => 'BRIN multi minmax inet distance',
proname => 'brin_minmax_multi_distance_inet', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_inet' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_inet' },
{ oid => '4637', descr => 'BRIN multi minmax timestamp distance',
proname => 'brin_minmax_multi_distance_timestamp', prorettype => 'float8',
- proargtypes => 'internal internal', prosrc => 'brin_minmax_multi_distance_timestamp' },
+ proargtypes => 'internal internal',
+ prosrc => 'brin_minmax_multi_distance_timestamp' },
# BRIN inclusion
{ oid => '4105', descr => 'BRIN inclusion support',
prosrc => 'brin_bloom_consistent' },
{ oid => '4594', descr => 'BRIN bloom support',
proname => 'brin_bloom_union', prorettype => 'bool',
- proargtypes => 'internal internal internal',
- prosrc => 'brin_bloom_union' },
+ proargtypes => 'internal internal internal', prosrc => 'brin_bloom_union' },
{ oid => '4595', descr => 'BRIN bloom support',
- proname => 'brin_bloom_options', prorettype => 'void', proisstrict => 'f',
+ proname => 'brin_bloom_options', proisstrict => 'f', prorettype => 'void',
proargtypes => 'internal', prosrc => 'brin_bloom_options' },
# userlock replacements
proargtypes => 'anymultirange int8', prosrc => 'hash_multirange_extended' },
{ oid => '4280', descr => 'int4multirange constructor',
- proname => 'int4multirange',
- prorettype => 'int4multirange', proargtypes => '',
- prosrc => 'multirange_constructor0' },
+ proname => 'int4multirange', prorettype => 'int4multirange',
+ proargtypes => '', prosrc => 'multirange_constructor0' },
{ oid => '4281', descr => 'int4multirange constructor',
proname => 'int4multirange', prorettype => 'int4multirange',
proargtypes => 'int4range', prosrc => 'multirange_constructor1' },
proallargtypes => '{_int4range}', proargmodes => '{v}',
prosrc => 'multirange_constructor2' },
{ oid => '4283', descr => 'nummultirange constructor',
- proname => 'nummultirange', prorettype => 'nummultirange',
- proargtypes => '', prosrc => 'multirange_constructor0' },
+ proname => 'nummultirange', prorettype => 'nummultirange', proargtypes => '',
+ prosrc => 'multirange_constructor0' },
{ oid => '4284', descr => 'nummultirange constructor',
proname => 'nummultirange', prorettype => 'nummultirange',
proargtypes => 'numrange', prosrc => 'multirange_constructor1' },
proallargtypes => '{_numrange}', proargmodes => '{v}',
prosrc => 'multirange_constructor2' },
{ oid => '4286', descr => 'tsmultirange constructor',
- proname => 'tsmultirange', prorettype => 'tsmultirange',
- proargtypes => '', prosrc => 'multirange_constructor0' },
+ proname => 'tsmultirange', prorettype => 'tsmultirange', proargtypes => '',
+ prosrc => 'multirange_constructor0' },
{ oid => '4287', descr => 'tsmultirange constructor',
proname => 'tsmultirange', prorettype => 'tsmultirange',
proargtypes => 'tsrange', prosrc => 'multirange_constructor1' },
proallargtypes => '{_tsrange}', proargmodes => '{v}',
prosrc => 'multirange_constructor2' },
{ oid => '4289', descr => 'tstzmultirange constructor',
- proname => 'tstzmultirange',
- prorettype => 'tstzmultirange', proargtypes => '',
- prosrc => 'multirange_constructor0' },
+ proname => 'tstzmultirange', prorettype => 'tstzmultirange',
+ proargtypes => '', prosrc => 'multirange_constructor0' },
{ oid => '4290', descr => 'tstzmultirange constructor',
proname => 'tstzmultirange', prorettype => 'tstzmultirange',
proargtypes => 'tstzrange', prosrc => 'multirange_constructor1' },
proallargtypes => '{_tstzrange}', proargmodes => '{v}',
prosrc => 'multirange_constructor2' },
{ oid => '4292', descr => 'datemultirange constructor',
- proname => 'datemultirange',
- prorettype => 'datemultirange', proargtypes => '',
- prosrc => 'multirange_constructor0' },
+ proname => 'datemultirange', prorettype => 'datemultirange',
+ proargtypes => '', prosrc => 'multirange_constructor0' },
{ oid => '4293', descr => 'datemultirange constructor',
proname => 'datemultirange', prorettype => 'datemultirange',
proargtypes => 'daterange', prosrc => 'multirange_constructor1' },
proallargtypes => '{_daterange}', proargmodes => '{v}',
prosrc => 'multirange_constructor2' },
{ oid => '4295', descr => 'int8multirange constructor',
- proname => 'int8multirange',
- prorettype => 'int8multirange', proargtypes => '',
- prosrc => 'multirange_constructor0' },
+ proname => 'int8multirange', prorettype => 'int8multirange',
+ proargtypes => '', prosrc => 'multirange_constructor0' },
{ oid => '4296', descr => 'int8multirange constructor',
proname => 'int8multirange', prorettype => 'int8multirange',
proargtypes => 'int8range', prosrc => 'multirange_constructor1' },
prosrc => 'pg_get_replication_slots' },
{ oid => '3786', descr => 'set up a logical replication slot',
proname => 'pg_create_logical_replication_slot', provolatile => 'v',
- proparallel => 'u', prorettype => 'record', proargtypes => 'name name bool bool',
+ proparallel => 'u', prorettype => 'record',
+ proargtypes => 'name name bool bool',
proallargtypes => '{name,name,bool,bool,name,pg_lsn}',
proargmodes => '{i,i,i,i,o,o}',
proargnames => '{slot_name,plugin,temporary,twophase,slot_name,lsn}',
{ oid => '4302',
descr => 'internal conversion function for KOI8R to MULE_INTERNAL',
proname => 'koi8r_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'koi8r_to_mic',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'koi8r_to_mic', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4303',
descr => 'internal conversion function for MULE_INTERNAL to KOI8R',
proname => 'mic_to_koi8r', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_koi8r',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_koi8r', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4304',
descr => 'internal conversion function for ISO-8859-5 to MULE_INTERNAL',
proname => 'iso_to_mic', prolang => 'c', prorettype => 'int4',
{ oid => '4306',
descr => 'internal conversion function for WIN1251 to MULE_INTERNAL',
proname => 'win1251_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'win1251_to_mic',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'win1251_to_mic', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4307',
descr => 'internal conversion function for MULE_INTERNAL to WIN1251',
proname => 'mic_to_win1251', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_win1251',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_win1251', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4308',
descr => 'internal conversion function for WIN866 to MULE_INTERNAL',
proname => 'win866_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'win866_to_mic',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'win866_to_mic', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4309',
descr => 'internal conversion function for MULE_INTERNAL to WIN866',
proname => 'mic_to_win866', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_win866',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_win866', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4310', descr => 'internal conversion function for KOI8R to WIN1251',
proname => 'koi8r_to_win1251', prolang => 'c', prorettype => 'int4',
proargtypes => 'int4 int4 cstring internal int4 bool',
prosrc => 'win1251_to_koi8r', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4312', descr => 'internal conversion function for KOI8R to WIN866',
proname => 'koi8r_to_win866', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'koi8r_to_win866',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'koi8r_to_win866', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4313', descr => 'internal conversion function for WIN866 to KOI8R',
proname => 'win866_to_koi8r', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'win866_to_koi8r',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'win866_to_koi8r', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4314',
descr => 'internal conversion function for WIN866 to WIN1251',
proname => 'win866_to_win1251', prolang => 'c', prorettype => 'int4',
{ oid => '4316',
descr => 'internal conversion function for ISO-8859-5 to KOI8R',
proname => 'iso_to_koi8r', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'iso_to_koi8r',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'iso_to_koi8r', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4317',
descr => 'internal conversion function for KOI8R to ISO-8859-5',
proname => 'koi8r_to_iso', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'koi8r_to_iso',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'koi8r_to_iso', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4318',
descr => 'internal conversion function for ISO-8859-5 to WIN1251',
proname => 'iso_to_win1251', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'iso_to_win1251',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'iso_to_win1251', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4319',
descr => 'internal conversion function for WIN1251 to ISO-8859-5',
proname => 'win1251_to_iso', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'win1251_to_iso',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'win1251_to_iso', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4320',
descr => 'internal conversion function for ISO-8859-5 to WIN866',
proname => 'iso_to_win866', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'iso_to_win866',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'iso_to_win866', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4321',
descr => 'internal conversion function for WIN866 to ISO-8859-5',
proname => 'win866_to_iso', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'win866_to_iso',
- probin => '$libdir/cyrillic_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'win866_to_iso', probin => '$libdir/cyrillic_and_mic' },
{ oid => '4322',
descr => 'internal conversion function for EUC_CN to MULE_INTERNAL',
proname => 'euc_cn_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_cn_to_mic',
- probin => '$libdir/euc_cn_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_cn_to_mic', probin => '$libdir/euc_cn_and_mic' },
{ oid => '4323',
descr => 'internal conversion function for MULE_INTERNAL to EUC_CN',
proname => 'mic_to_euc_cn', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_euc_cn',
- probin => '$libdir/euc_cn_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_euc_cn', probin => '$libdir/euc_cn_and_mic' },
{ oid => '4324', descr => 'internal conversion function for EUC_JP to SJIS',
proname => 'euc_jp_to_sjis', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_jp_to_sjis',
- probin => '$libdir/euc_jp_and_sjis' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_jp_to_sjis', probin => '$libdir/euc_jp_and_sjis' },
{ oid => '4325', descr => 'internal conversion function for SJIS to EUC_JP',
proname => 'sjis_to_euc_jp', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'sjis_to_euc_jp',
- probin => '$libdir/euc_jp_and_sjis' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'sjis_to_euc_jp', probin => '$libdir/euc_jp_and_sjis' },
{ oid => '4326',
descr => 'internal conversion function for EUC_JP to MULE_INTERNAL',
proname => 'euc_jp_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_jp_to_mic',
- probin => '$libdir/euc_jp_and_sjis' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_jp_to_mic', probin => '$libdir/euc_jp_and_sjis' },
{ oid => '4327',
descr => 'internal conversion function for SJIS to MULE_INTERNAL',
proname => 'sjis_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'sjis_to_mic',
- probin => '$libdir/euc_jp_and_sjis' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'sjis_to_mic', probin => '$libdir/euc_jp_and_sjis' },
{ oid => '4328',
descr => 'internal conversion function for MULE_INTERNAL to EUC_JP',
proname => 'mic_to_euc_jp', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_euc_jp',
- probin => '$libdir/euc_jp_and_sjis' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_euc_jp', probin => '$libdir/euc_jp_and_sjis' },
{ oid => '4329',
descr => 'internal conversion function for MULE_INTERNAL to SJIS',
proname => 'mic_to_sjis', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_sjis',
- probin => '$libdir/euc_jp_and_sjis' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_sjis', probin => '$libdir/euc_jp_and_sjis' },
{ oid => '4330',
descr => 'internal conversion function for EUC_KR to MULE_INTERNAL',
proname => 'euc_kr_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_kr_to_mic',
- probin => '$libdir/euc_kr_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_kr_to_mic', probin => '$libdir/euc_kr_and_mic' },
{ oid => '4331',
descr => 'internal conversion function for MULE_INTERNAL to EUC_KR',
proname => 'mic_to_euc_kr', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_euc_kr',
- probin => '$libdir/euc_kr_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_euc_kr', probin => '$libdir/euc_kr_and_mic' },
{ oid => '4332', descr => 'internal conversion function for EUC_TW to BIG5',
proname => 'euc_tw_to_big5', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_tw_to_big5',
- probin => '$libdir/euc_tw_and_big5' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_tw_to_big5', probin => '$libdir/euc_tw_and_big5' },
{ oid => '4333', descr => 'internal conversion function for BIG5 to EUC_TW',
proname => 'big5_to_euc_tw', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'big5_to_euc_tw',
- probin => '$libdir/euc_tw_and_big5' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'big5_to_euc_tw', probin => '$libdir/euc_tw_and_big5' },
{ oid => '4334',
descr => 'internal conversion function for EUC_TW to MULE_INTERNAL',
proname => 'euc_tw_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_tw_to_mic',
- probin => '$libdir/euc_tw_and_big5' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_tw_to_mic', probin => '$libdir/euc_tw_and_big5' },
{ oid => '4335',
descr => 'internal conversion function for BIG5 to MULE_INTERNAL',
proname => 'big5_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'big5_to_mic',
- probin => '$libdir/euc_tw_and_big5' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'big5_to_mic', probin => '$libdir/euc_tw_and_big5' },
{ oid => '4336',
descr => 'internal conversion function for MULE_INTERNAL to EUC_TW',
proname => 'mic_to_euc_tw', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_euc_tw',
- probin => '$libdir/euc_tw_and_big5' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_euc_tw', probin => '$libdir/euc_tw_and_big5' },
{ oid => '4337',
descr => 'internal conversion function for MULE_INTERNAL to BIG5',
proname => 'mic_to_big5', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_big5',
- probin => '$libdir/euc_tw_and_big5' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_big5', probin => '$libdir/euc_tw_and_big5' },
{ oid => '4338',
descr => 'internal conversion function for LATIN2 to MULE_INTERNAL',
proname => 'latin2_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'latin2_to_mic',
- probin => '$libdir/latin2_and_win1250' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'latin2_to_mic', probin => '$libdir/latin2_and_win1250' },
{ oid => '4339',
descr => 'internal conversion function for MULE_INTERNAL to LATIN2',
proname => 'mic_to_latin2', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_latin2',
- probin => '$libdir/latin2_and_win1250' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_latin2', probin => '$libdir/latin2_and_win1250' },
{ oid => '4340',
descr => 'internal conversion function for WIN1250 to MULE_INTERNAL',
proname => 'win1250_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'win1250_to_mic',
- probin => '$libdir/latin2_and_win1250' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'win1250_to_mic', probin => '$libdir/latin2_and_win1250' },
{ oid => '4341',
descr => 'internal conversion function for MULE_INTERNAL to WIN1250',
proname => 'mic_to_win1250', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_win1250',
- probin => '$libdir/latin2_and_win1250' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_win1250', probin => '$libdir/latin2_and_win1250' },
{ oid => '4342',
descr => 'internal conversion function for LATIN2 to WIN1250',
proname => 'latin2_to_win1250', prolang => 'c', prorettype => 'int4',
{ oid => '4344',
descr => 'internal conversion function for LATIN1 to MULE_INTERNAL',
proname => 'latin1_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'latin1_to_mic',
- probin => '$libdir/latin_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'latin1_to_mic', probin => '$libdir/latin_and_mic' },
{ oid => '4345',
descr => 'internal conversion function for MULE_INTERNAL to LATIN1',
proname => 'mic_to_latin1', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_latin1',
- probin => '$libdir/latin_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_latin1', probin => '$libdir/latin_and_mic' },
{ oid => '4346',
descr => 'internal conversion function for LATIN3 to MULE_INTERNAL',
proname => 'latin3_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'latin3_to_mic',
- probin => '$libdir/latin_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'latin3_to_mic', probin => '$libdir/latin_and_mic' },
{ oid => '4347',
descr => 'internal conversion function for MULE_INTERNAL to LATIN3',
proname => 'mic_to_latin3', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_latin3',
- probin => '$libdir/latin_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_latin3', probin => '$libdir/latin_and_mic' },
{ oid => '4348',
descr => 'internal conversion function for LATIN4 to MULE_INTERNAL',
proname => 'latin4_to_mic', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'latin4_to_mic',
- probin => '$libdir/latin_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'latin4_to_mic', probin => '$libdir/latin_and_mic' },
{ oid => '4349',
descr => 'internal conversion function for MULE_INTERNAL to LATIN4',
proname => 'mic_to_latin4', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'mic_to_latin4',
- probin => '$libdir/latin_and_mic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'mic_to_latin4', probin => '$libdir/latin_and_mic' },
{ oid => '4352', descr => 'internal conversion function for BIG5 to UTF8',
proname => 'big5_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'big5_to_utf8',
- probin => '$libdir/utf8_and_big5' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'big5_to_utf8', probin => '$libdir/utf8_and_big5' },
{ oid => '4353', descr => 'internal conversion function for UTF8 to BIG5',
proname => 'utf8_to_big5', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_big5',
- probin => '$libdir/utf8_and_big5' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_big5', probin => '$libdir/utf8_and_big5' },
{ oid => '4354', descr => 'internal conversion function for UTF8 to KOI8R',
proname => 'utf8_to_koi8r', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_koi8r',
- probin => '$libdir/utf8_and_cyrillic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_koi8r', probin => '$libdir/utf8_and_cyrillic' },
{ oid => '4355', descr => 'internal conversion function for KOI8R to UTF8',
proname => 'koi8r_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'koi8r_to_utf8',
- probin => '$libdir/utf8_and_cyrillic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'koi8r_to_utf8', probin => '$libdir/utf8_and_cyrillic' },
{ oid => '4356', descr => 'internal conversion function for UTF8 to KOI8U',
proname => 'utf8_to_koi8u', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_koi8u',
- probin => '$libdir/utf8_and_cyrillic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_koi8u', probin => '$libdir/utf8_and_cyrillic' },
{ oid => '4357', descr => 'internal conversion function for KOI8U to UTF8',
proname => 'koi8u_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'koi8u_to_utf8',
- probin => '$libdir/utf8_and_cyrillic' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'koi8u_to_utf8', probin => '$libdir/utf8_and_cyrillic' },
{ oid => '4358', descr => 'internal conversion function for UTF8 to WIN',
proname => 'utf8_to_win', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_win',
- probin => '$libdir/utf8_and_win' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_win', probin => '$libdir/utf8_and_win' },
{ oid => '4359', descr => 'internal conversion function for WIN to UTF8',
proname => 'win_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'win_to_utf8',
- probin => '$libdir/utf8_and_win' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'win_to_utf8', probin => '$libdir/utf8_and_win' },
{ oid => '4360', descr => 'internal conversion function for EUC_CN to UTF8',
proname => 'euc_cn_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_cn_to_utf8',
- probin => '$libdir/utf8_and_euc_cn' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_cn_to_utf8', probin => '$libdir/utf8_and_euc_cn' },
{ oid => '4361', descr => 'internal conversion function for UTF8 to EUC_CN',
proname => 'utf8_to_euc_cn', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_euc_cn',
- probin => '$libdir/utf8_and_euc_cn' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_euc_cn', probin => '$libdir/utf8_and_euc_cn' },
{ oid => '4362', descr => 'internal conversion function for EUC_JP to UTF8',
proname => 'euc_jp_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_jp_to_utf8',
- probin => '$libdir/utf8_and_euc_jp' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_jp_to_utf8', probin => '$libdir/utf8_and_euc_jp' },
{ oid => '4363', descr => 'internal conversion function for UTF8 to EUC_JP',
proname => 'utf8_to_euc_jp', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_euc_jp',
- probin => '$libdir/utf8_and_euc_jp' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_euc_jp', probin => '$libdir/utf8_and_euc_jp' },
{ oid => '4364', descr => 'internal conversion function for EUC_KR to UTF8',
proname => 'euc_kr_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_kr_to_utf8',
- probin => '$libdir/utf8_and_euc_kr' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_kr_to_utf8', probin => '$libdir/utf8_and_euc_kr' },
{ oid => '4365', descr => 'internal conversion function for UTF8 to EUC_KR',
proname => 'utf8_to_euc_kr', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_euc_kr',
- probin => '$libdir/utf8_and_euc_kr' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_euc_kr', probin => '$libdir/utf8_and_euc_kr' },
{ oid => '4366', descr => 'internal conversion function for EUC_TW to UTF8',
proname => 'euc_tw_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'euc_tw_to_utf8',
- probin => '$libdir/utf8_and_euc_tw' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'euc_tw_to_utf8', probin => '$libdir/utf8_and_euc_tw' },
{ oid => '4367', descr => 'internal conversion function for UTF8 to EUC_TW',
proname => 'utf8_to_euc_tw', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_euc_tw',
- probin => '$libdir/utf8_and_euc_tw' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_euc_tw', probin => '$libdir/utf8_and_euc_tw' },
{ oid => '4368', descr => 'internal conversion function for GB18030 to UTF8',
proname => 'gb18030_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'gb18030_to_utf8',
- probin => '$libdir/utf8_and_gb18030' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'gb18030_to_utf8', probin => '$libdir/utf8_and_gb18030' },
{ oid => '4369', descr => 'internal conversion function for UTF8 to GB18030',
proname => 'utf8_to_gb18030', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_gb18030',
- probin => '$libdir/utf8_and_gb18030' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_gb18030', probin => '$libdir/utf8_and_gb18030' },
{ oid => '4370', descr => 'internal conversion function for GBK to UTF8',
proname => 'gbk_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'gbk_to_utf8',
- probin => '$libdir/utf8_and_gbk' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'gbk_to_utf8', probin => '$libdir/utf8_and_gbk' },
{ oid => '4371', descr => 'internal conversion function for UTF8 to GBK',
proname => 'utf8_to_gbk', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_gbk',
- probin => '$libdir/utf8_and_gbk' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_gbk', probin => '$libdir/utf8_and_gbk' },
{ oid => '4372',
descr => 'internal conversion function for UTF8 to ISO-8859 2-16',
proname => 'utf8_to_iso8859', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_iso8859',
- probin => '$libdir/utf8_and_iso8859' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_iso8859', probin => '$libdir/utf8_and_iso8859' },
{ oid => '4373',
descr => 'internal conversion function for ISO-8859 2-16 to UTF8',
proname => 'iso8859_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'iso8859_to_utf8',
- probin => '$libdir/utf8_and_iso8859' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'iso8859_to_utf8', probin => '$libdir/utf8_and_iso8859' },
{ oid => '4374', descr => 'internal conversion function for LATIN1 to UTF8',
proname => 'iso8859_1_to_utf8', prolang => 'c', prorettype => 'int4',
proargtypes => 'int4 int4 cstring internal int4 bool',
prosrc => 'utf8_to_iso8859_1', probin => '$libdir/utf8_and_iso8859_1' },
{ oid => '4376', descr => 'internal conversion function for JOHAB to UTF8',
proname => 'johab_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'johab_to_utf8',
- probin => '$libdir/utf8_and_johab' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'johab_to_utf8', probin => '$libdir/utf8_and_johab' },
{ oid => '4377', descr => 'internal conversion function for UTF8 to JOHAB',
proname => 'utf8_to_johab', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_johab',
- probin => '$libdir/utf8_and_johab' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_johab', probin => '$libdir/utf8_and_johab' },
{ oid => '4378', descr => 'internal conversion function for SJIS to UTF8',
proname => 'sjis_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'sjis_to_utf8',
- probin => '$libdir/utf8_and_sjis' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'sjis_to_utf8', probin => '$libdir/utf8_and_sjis' },
{ oid => '4379', descr => 'internal conversion function for UTF8 to SJIS',
proname => 'utf8_to_sjis', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_sjis',
- probin => '$libdir/utf8_and_sjis' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_sjis', probin => '$libdir/utf8_and_sjis' },
{ oid => '4380', descr => 'internal conversion function for UHC to UTF8',
proname => 'uhc_to_utf8', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'uhc_to_utf8',
- probin => '$libdir/utf8_and_uhc' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'uhc_to_utf8', probin => '$libdir/utf8_and_uhc' },
{ oid => '4381', descr => 'internal conversion function for UTF8 to UHC',
proname => 'utf8_to_uhc', prolang => 'c', prorettype => 'int4',
- proargtypes => 'int4 int4 cstring internal int4 bool', prosrc => 'utf8_to_uhc',
- probin => '$libdir/utf8_and_uhc' },
+ proargtypes => 'int4 int4 cstring internal int4 bool',
+ prosrc => 'utf8_to_uhc', probin => '$libdir/utf8_and_uhc' },
{ oid => '4382',
descr => 'internal conversion function for EUC_JIS_2004 to UTF8',
proname => 'euc_jis_2004_to_utf8', prolang => 'c', prorettype => 'int4',
prorettype => 'pg_brin_bloom_summary', proargtypes => 'internal',
prosrc => 'brin_bloom_summary_recv' },
{ oid => '4599', descr => 'I/O',
- proname => 'brin_bloom_summary_send', provolatile => 's', prorettype => 'bytea',
- proargtypes => 'pg_brin_bloom_summary', prosrc => 'brin_bloom_summary_send' },
+ proname => 'brin_bloom_summary_send', provolatile => 's',
+ prorettype => 'bytea', proargtypes => 'pg_brin_bloom_summary',
+ prosrc => 'brin_bloom_summary_send' },
{ oid => '4638', descr => 'I/O',
- proname => 'brin_minmax_multi_summary_in', prorettype => 'pg_brin_minmax_multi_summary',
- proargtypes => 'cstring', prosrc => 'brin_minmax_multi_summary_in' },
+ proname => 'brin_minmax_multi_summary_in',
+ prorettype => 'pg_brin_minmax_multi_summary', proargtypes => 'cstring',
+ prosrc => 'brin_minmax_multi_summary_in' },
{ oid => '4639', descr => 'I/O',
proname => 'brin_minmax_multi_summary_out', prorettype => 'cstring',
- proargtypes => 'pg_brin_minmax_multi_summary', prosrc => 'brin_minmax_multi_summary_out' },
+ proargtypes => 'pg_brin_minmax_multi_summary',
+ prosrc => 'brin_minmax_multi_summary_out' },
{ oid => '4640', descr => 'I/O',
proname => 'brin_minmax_multi_summary_recv', provolatile => 's',
prorettype => 'pg_brin_minmax_multi_summary', proargtypes => 'internal',
prosrc => 'brin_minmax_multi_summary_recv' },
{ oid => '4641', descr => 'I/O',
- proname => 'brin_minmax_multi_summary_send', provolatile => 's', prorettype => 'bytea',
- proargtypes => 'pg_brin_minmax_multi_summary', prosrc => 'brin_minmax_multi_summary_send' },
+ proname => 'brin_minmax_multi_summary_send', provolatile => 's',
+ prorettype => 'bytea', proargtypes => 'pg_brin_minmax_multi_summary',
+ prosrc => 'brin_minmax_multi_summary_send' },
]
# jsonb
{ oid => '3802', array_type_oid => '3807', descr => 'Binary JSON',
typname => 'jsonb', typlen => '-1', typbyval => 'f', typcategory => 'U',
- typinput => 'jsonb_in', typoutput => 'jsonb_out', typreceive => 'jsonb_recv',
- typsend => 'jsonb_send', typalign => 'i', typstorage => 'x',
- typsubscript => 'jsonb_subscript_handler' },
+ typsubscript => 'jsonb_subscript_handler', typinput => 'jsonb_in',
+ typoutput => 'jsonb_out', typreceive => 'jsonb_recv', typsend => 'jsonb_send',
+ typalign => 'i', typstorage => 'x' },
{ oid => '4072', array_type_oid => '4073', descr => 'JSON path',
typname => 'jsonpath', typlen => '-1', typbyval => 'f', typcategory => 'U',
typinput => 'jsonpath_in', typoutput => 'jsonpath_out',
typtype => 'p', typcategory => 'P', typinput => 'anycompatiblemultirange_in',
typoutput => 'anycompatiblemultirange_out', typreceive => '-', typsend => '-',
typalign => 'd', typstorage => 'x' },
-{ oid => '4600',
- descr => 'BRIN bloom summary',
- typname => 'pg_brin_bloom_summary', typlen => '-1', typbyval => 'f', typcategory => 'S',
- typinput => 'brin_bloom_summary_in', typoutput => 'brin_bloom_summary_out',
+{ oid => '4600', descr => 'BRIN bloom summary',
+ typname => 'pg_brin_bloom_summary', typlen => '-1', typbyval => 'f',
+ typcategory => 'S', typinput => 'brin_bloom_summary_in',
+ typoutput => 'brin_bloom_summary_out',
typreceive => 'brin_bloom_summary_recv', typsend => 'brin_bloom_summary_send',
typalign => 'i', typstorage => 'x', typcollation => 'default' },
-{ oid => '4601',
- descr => 'BRIN minmax-multi summary',
- typname => 'pg_brin_minmax_multi_summary', typlen => '-1', typbyval => 'f', typcategory => 'S',
- typinput => 'brin_minmax_multi_summary_in', typoutput => 'brin_minmax_multi_summary_out',
- typreceive => 'brin_minmax_multi_summary_recv', typsend => 'brin_minmax_multi_summary_send',
- typalign => 'i', typstorage => 'x', typcollation => 'default' },
+{ oid => '4601', descr => 'BRIN minmax-multi summary',
+ typname => 'pg_brin_minmax_multi_summary', typlen => '-1', typbyval => 'f',
+ typcategory => 'S', typinput => 'brin_minmax_multi_summary_in',
+ typoutput => 'brin_minmax_multi_summary_out',
+ typreceive => 'brin_minmax_multi_summary_recv',
+ typsend => 'brin_minmax_multi_summary_send', typalign => 'i',
+ typstorage => 'x', typcollation => 'default' },
]
# Collect all the existing assigned OIDs (including those to be remapped).
my @header_files = glob("pg_*.h");
-my $oids = Catalog::FindAllOidsFromHeaders(@header_files);
+my $oids = Catalog::FindAllOidsFromHeaders(@header_files);
# Hash-ify the existing OIDs for convenient lookup.
my %oidhash;
extern void ProcessCopyOptions(ParseState *pstate, CopyFormatOptions *ops_out, bool is_from, List *options);
extern CopyFromState BeginCopyFrom(ParseState *pstate, Relation rel, Node *whereClause,
- const char *filename,
- bool is_program, copy_data_source_cb data_source_cb, List *attnamelist, List *options);
+ const char *filename,
+ bool is_program, copy_data_source_cb data_source_cb, List *attnamelist, List *options);
extern void EndCopyFrom(CopyFromState cstate);
extern bool NextCopyFrom(CopyFromState cstate, ExprContext *econtext,
Datum *values, bool *nulls);
/*
* input_buf holds input data, already converted to database encoding.
*
- * In text mode, CopyReadLine parses this data sufficiently to locate
- * line boundaries, then transfers the data to line_buf. We guarantee
- * that there is a \0 at input_buf[input_buf_len] at all times. (In
- * binary mode, input_buf is not used.)
+ * In text mode, CopyReadLine parses this data sufficiently to locate line
+ * boundaries, then transfers the data to line_buf. We guarantee that
+ * there is a \0 at input_buf[input_buf_len] at all times. (In binary
+ * mode, input_buf is not used.)
*
* If encoding conversion is not required, input_buf is not a separate
* buffer but points directly to raw_buf. In that case, input_buf_len
* tracks the number of bytes that have been verified as valid in the
- * database encoding, and raw_buf_len is the total number of bytes
- * stored in the buffer.
+ * database encoding, and raw_buf_len is the total number of bytes stored
+ * in the buffer.
*/
#define INPUT_BUF_SIZE 65536 /* we palloc INPUT_BUF_SIZE+1 bytes */
char *input_buf;
int input_buf_index; /* next byte to process */
- int input_buf_len; /* total # of bytes stored */
+ int input_buf_len; /* total # of bytes stored */
bool input_reached_eof; /* true if we reached EOF */
- bool input_reached_error; /* true if a conversion error happened */
+ bool input_reached_error; /* true if a conversion error happened */
/* Shorthand for number of unconsumed bytes available in input_buf */
#define INPUT_BUF_BYTES(cstate) ((cstate)->input_buf_len - (cstate)->input_buf_index)
extern ObjectAddress CreateStatistics(CreateStatsStmt *stmt);
extern ObjectAddress AlterStatistics(AlterStatsStmt *stmt);
extern void RemoveStatisticsById(Oid statsOid);
-extern Oid StatisticsGetRelation(Oid statId, bool missing_ok);
+extern Oid StatisticsGetRelation(Oid statId, bool missing_ok);
/* commands/aggregatecmds.c */
extern ObjectAddress DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle,
extern void ExecAsyncRequestDone(AsyncRequest *areq, TupleTableSlot *result);
extern void ExecAsyncRequestPending(AsyncRequest *areq);
-#endif /* EXECASYNC_H */
+#endif /* EXECASYNC_H */
char **argnames; /* names of input arguments; NULL if none */
/* Note that argnames[i] can be NULL, if some args are unnamed */
Oid collation; /* function's input collation, if known */
-} SQLFunctionParseInfo;
+} SQLFunctionParseInfo;
typedef SQLFunctionParseInfo *SQLFunctionParseInfoPtr;
TupleTableSlot *planSlot);
typedef TupleTableSlot **(*ExecForeignBatchInsert_function) (EState *estate,
- ResultRelInfo *rinfo,
- TupleTableSlot **slots,
- TupleTableSlot **planSlots,
- int *numSlots);
+ ResultRelInfo *rinfo,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int *numSlots);
typedef int (*GetForeignModifyBatchSize_function) (ResultRelInfo *rinfo);
#ifdef ST_COMPARE_RUNTIME_POINTER
typedef int (*ST_COMPARATOR_TYPE_NAME) (const ST_ELEMENT_TYPE *,
- const ST_ELEMENT_TYPE *ST_SORT_PROTO_ARG);
+ const ST_ELEMENT_TYPE * ST_SORT_PROTO_ARG);
#endif
/* Declare the sort function. Note optional arguments at end. */
-ST_SCOPE void ST_SORT(ST_ELEMENT_TYPE *first, size_t n
+ST_SCOPE void ST_SORT(ST_ELEMENT_TYPE * first, size_t n
ST_SORT_PROTO_ELEMENT_SIZE
ST_SORT_PROTO_COMPARE
ST_SORT_PROTO_ARG);
* in the qsort function.
*/
static pg_noinline ST_ELEMENT_TYPE *
-ST_MED3(ST_ELEMENT_TYPE *a,
- ST_ELEMENT_TYPE *b,
- ST_ELEMENT_TYPE *c
+ST_MED3(ST_ELEMENT_TYPE * a,
+ ST_ELEMENT_TYPE * b,
+ ST_ELEMENT_TYPE * c
ST_SORT_PROTO_COMPARE
ST_SORT_PROTO_ARG)
{
}
static inline void
-ST_SWAP(ST_POINTER_TYPE *a, ST_POINTER_TYPE *b)
+ST_SWAP(ST_POINTER_TYPE * a, ST_POINTER_TYPE * b)
{
ST_POINTER_TYPE tmp = *a;
}
static inline void
-ST_SWAPN(ST_POINTER_TYPE *a, ST_POINTER_TYPE *b, size_t n)
+ST_SWAPN(ST_POINTER_TYPE * a, ST_POINTER_TYPE * b, size_t n)
{
for (size_t i = 0; i < n; ++i)
ST_SWAP(&a[i], &b[i]);
* Sort an array.
*/
ST_SCOPE void
-ST_SORT(ST_ELEMENT_TYPE *data, size_t n
+ST_SORT(ST_ELEMENT_TYPE * data, size_t n
ST_SORT_PROTO_ELEMENT_SIZE
ST_SORT_PROTO_COMPARE
ST_SORT_PROTO_ARG)
bool ri_usesFdwDirectModify;
/* batch insert stuff */
- int ri_NumSlots; /* number of slots in the array */
- int ri_BatchSize; /* max slots inserted in a single batch */
- TupleTableSlot **ri_Slots; /* input tuples for batch insert */
+ int ri_NumSlots; /* number of slots in the array */
+ int ri_BatchSize; /* max slots inserted in a single batch */
+ TupleTableSlot **ri_Slots; /* input tuples for batch insert */
TupleTableSlot **ri_PlanSlots;
/* list of WithCheckOption's to be checked */
int as_whichplan;
bool as_begun; /* false means need to initialize */
Bitmapset *as_asyncplans; /* asynchronous plans indexes */
- int as_nasyncplans; /* # of asynchronous plans */
+ int as_nasyncplans; /* # of asynchronous plans */
AsyncRequest **as_asyncrequests; /* array of AsyncRequests */
TupleTableSlot **as_asyncresults; /* unreturned results of async plans */
int as_nasyncresults; /* # of valid entries in as_asyncresults */
bool as_syncdone; /* true if all synchronous plans done in
* asynchronous mode, else false */
int as_nasyncremain; /* # of remaining asynchronous plans */
- Bitmapset *as_needrequest; /* asynchronous plans needing a new request */
- struct WaitEventSet *as_eventset; /* WaitEventSet used to configure
- * file descriptor wait events */
+ Bitmapset *as_needrequest; /* asynchronous plans needing a new request */
+ struct WaitEventSet *as_eventset; /* WaitEventSet used to configure file
+ * descriptor wait events */
int as_first_partial_plan; /* Index of 'appendplans' containing
* the first partial plan */
ParallelAppendState *as_pstate; /* parallel coordination info */
AT_GenericOptions, /* OPTIONS (...) */
AT_AttachPartition, /* ATTACH PARTITION */
AT_DetachPartition, /* DETACH PARTITION */
- AT_DetachPartitionFinalize, /* DETACH PARTITION FINALIZE */
+ AT_DetachPartitionFinalize, /* DETACH PARTITION FINALIZE */
AT_AddIdentity, /* ADD IDENTITY */
AT_SetIdentity, /* SET identity column options */
AT_DropIdentity, /* DROP IDENTITY */
Index *sortgrouprefs; /* corresponding sort/group refnos, or 0 */
QualCost cost; /* cost of evaluating the expressions */
int width; /* estimated avg width of result tuples */
- VolatileFunctionStatus has_volatile_expr; /* indicates if exprs contain
+ VolatileFunctionStatus has_volatile_expr; /* indicates if exprs contain
* any volatile functions. */
} PathTarget;
bool leakproof; /* true if known to contain no leaked Vars */
- VolatileFunctionStatus has_volatile; /* to indicate if clause contains
+ VolatileFunctionStatus has_volatile; /* to indicate if clause contains
* any volatile functions. */
Index security_level; /* see comment above */
/*
* information needed for asynchronous execution
*/
- bool async_capable; /* engage asynchronous-capable logic? */
+ bool async_capable; /* engage asynchronous-capable logic? */
/*
* Common structural data for all Plan types.
* memory even when clobber is off, or to 0 to never free relation cache
* memory even when clobbering is on.
*/
-/* #define RECOVER_RELATION_BUILD_MEMORY 0 */ /* Force disable */
-/* #define RECOVER_RELATION_BUILD_MEMORY 1 */ /* Force enable */
+ /* #define RECOVER_RELATION_BUILD_MEMORY 0 */ /* Force disable */
+ /* #define RECOVER_RELATION_BUILD_MEMORY 1 */ /* Force enable */
/*
* Define this to force all parse and plan trees to be passed through
#include "datatype/timestamp.h"
#include "portability/instr_time.h"
-#include "postmaster/pgarch.h" /* for MAX_XFN_CHARS */
+#include "postmaster/pgarch.h" /* for MAX_XFN_CHARS */
#include "utils/backend_progress.h" /* for backward compatibility */
-#include "utils/backend_status.h" /* for backward compatibility */
+#include "utils/backend_status.h" /* for backward compatibility */
#include "utils/hsearch.h"
#include "utils/relcache.h"
-#include "utils/wait_event.h" /* for backward compatibility */
+#include "utils/wait_event.h" /* for backward compatibility */
/* ----------
typedef struct PgStat_MsgResetreplslotcounter
{
PgStat_MsgHdr m_hdr;
- NameData m_slotname;
+ NameData m_slotname;
bool clearall;
} PgStat_MsgResetreplslotcounter;
#include
-typedef enum RecoveryInitSyncMethod {
+typedef enum RecoveryInitSyncMethod
+{
RECOVERY_INIT_SYNC_METHOD_FSYNC,
RECOVERY_INIT_SYNC_METHOD_SYNCFS
-} RecoveryInitSyncMethod;
+} RecoveryInitSyncMethod;
struct iovec; /* avoid including port/pg_iovec.h here */
/* GUC parameter */
extern PGDLLIMPORT int max_files_per_process;
extern PGDLLIMPORT bool data_sync_retry;
-extern int recovery_init_sync_method;
+extern int recovery_init_sync_method;
/*
* This is private to fd.c, but exported for save/restore_backend_variables()
typedef struct XidCacheStatus
{
/* number of cached subxids, never more than PGPROC_MAX_CACHED_SUBXIDS */
- uint8 count;
+ uint8 count;
/* has PGPROC->subxids overflowed */
- bool overflowed;
+ bool overflowed;
} XidCacheStatus;
struct XidCache
* else InvalidLocalTransactionId */
int pid; /* Backend's process ID; 0 if prepared xact */
- int pgxactoff; /* offset into various ProcGlobal->arrays
- * with data mirrored from this PGPROC */
+ int pgxactoff; /* offset into various ProcGlobal->arrays with
+ * data mirrored from this PGPROC */
int pgprocno;
/* These fields are zero while a backend is still starting up: */
*/
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS];
- XidCacheStatus subxidStatus; /* mirrored with
- * ProcGlobal->subxidStates[i] */
+ XidCacheStatus subxidStatus; /* mirrored with
+ * ProcGlobal->subxidStates[i] */
struct XidCache subxids; /* cache for subtransaction XIDs */
/* Support for group XID clearing. */
extern void pgstat_progress_end_command(void);
-#endif /* BACKEND_PROGRESS_H */
+#endif /* BACKEND_PROGRESS_H */
#include "datatype/timestamp.h"
#include "libpq/pqcomm.h"
-#include "miscadmin.h" /* for BackendType */
+#include "miscadmin.h" /* for BackendType */
#include "utils/backend_progress.h"
* Other global variables
* ----------
*/
-extern PGDLLIMPORT PgBackendStatus *MyBEEntry;
+extern PGDLLIMPORT PgBackendStatus *MyBEEntry;
/* ----------
extern char *pgstat_clip_activity(const char *raw_activity);
-#endif /* BACKEND_STATUS_H */
+#endif /* BACKEND_STATUS_H */
extern int2vector *buildint2vector(const int16 *int2s, int n);
/* name.c */
-extern void namestrcpy(Name name, const char *str);
+extern void namestrcpy(Name name, const char *str);
extern int namestrcmp(Name name, const char *str);
/* numutils.c */
extern int32 pg_atoi(const char *s, int size, int c);
extern int16 pg_strtoint16(const char *s);
extern int32 pg_strtoint32(const char *s);
-extern int pg_itoa(int16 i, char *a);
-extern int pg_ultoa_n(uint32 l, char *a);
-extern int pg_ulltoa_n(uint64 l, char *a);
-extern int pg_ltoa(int32 l, char *a);
-extern int pg_lltoa(int64 ll, char *a);
+extern int pg_itoa(int16 i, char *a);
+extern int pg_ultoa_n(uint32 l, char *a);
+extern int pg_ulltoa_n(uint64 l, char *a);
+extern int pg_ltoa(int32 l, char *a);
+extern int pg_lltoa(int64 ll, char *a);
extern char *pg_ultostr_zeropad(char *str, uint32 value, int32 minwidth);
extern char *pg_ultostr(char *str, uint32 value);
extern uint64 pg_strtouint64(const char *str, char **endptr, int base);
typedef struct EstimationInfo
{
- uint32 flags; /* Flags, as defined above to mark special
+ uint32 flags; /* Flags, as defined above to mark special
* properties of the estimation. */
} EstimationInfo;
}
-#endif /* WAIT_EVENT_H */
+#endif /* WAIT_EVENT_H */
for (list = g_declared_list; list != NULL;)
{
struct declared_list *this = list;
+
list = list->next;
free(this);
}
'opt_array_bounds' => '',
# "ignore" means: do not create type and rules for this non-term-id
- 'parse_toplevel' => 'ignore',
- 'stmtmulti' => 'ignore',
- 'CreateAsStmt' => 'ignore',
- 'DeallocateStmt' => 'ignore',
- 'ColId' => 'ignore',
- 'type_function_name' => 'ignore',
- 'ColLabel' => 'ignore',
- 'Sconst' => 'ignore',
+ 'parse_toplevel' => 'ignore',
+ 'stmtmulti' => 'ignore',
+ 'CreateAsStmt' => 'ignore',
+ 'DeallocateStmt' => 'ignore',
+ 'ColId' => 'ignore',
+ 'type_function_name' => 'ignore',
+ 'ColLabel' => 'ignore',
+ 'Sconst' => 'ignore',
'opt_distinct_clause' => 'ignore',
- 'PLpgSQL_Expr' => 'ignore',
- 'PLAssignStmt' => 'ignore',
- 'plassign_target' => 'ignore',
- 'plassign_equals' => 'ignore',);
+ 'PLpgSQL_Expr' => 'ignore',
+ 'PLAssignStmt' => 'ignore',
+ 'plassign_target' => 'ignore',
+ 'plassign_equals' => 'ignore',);
# these replace_line commands excise certain keywords from the core keyword
# lists. Be sure to account for these in ColLabel and related productions.
if ((cvstore = SSL_CTX_get_cert_store(SSL_context)) != NULL)
{
- char *fname = NULL;
- char *dname = NULL;
+ char *fname = NULL;
+ char *dname = NULL;
if (conn->sslcrl && strlen(conn->sslcrl) > 0)
fname = conn->sslcrl;
{
/*
* In the non-SSL case, just remove the crypto callbacks if the
- * connection has then loaded. This code path has no dependency
- * on any pending SSL calls.
+ * connection has then loaded. This code path has no dependency on
+ * any pending SSL calls.
*/
if (conn->crypto_loaded)
destroy_needed = true;
if (!toServer)
pqTraceOutputS(conn->Pfdebug, message, &logCursor);
else
- fprintf(conn->Pfdebug, "Sync"); /* no message content */
+ fprintf(conn->Pfdebug, "Sync"); /* no message content */
break;
case 't': /* Parameter Description */
pqTraceOutputt(conn->Pfdebug, message, &logCursor, regress);
return -1;
return readv(fd, iov, iovcnt);
#else
- ssize_t sum = 0;
- ssize_t part;
+ ssize_t sum = 0;
+ ssize_t part;
for (int i = 0; i < iovcnt; ++i)
{
$ENV{"PGPASSFILE"} = $pgpassfile;
unlink($pgpassfile);
-append_to_file($pgpassfile, qq!
+append_to_file(
+ $pgpassfile, qq!
# This very long comment is just here to exercise handling of long lines in the file. This very long comment is just here to exercise handling of long lines in the file. This very long comment is just here to exercise handling of long lines in the file. This very long comment is just here to exercise handling of long lines in the file. This very long comment is just here to exercise handling of long lines in the file.
*:*:postgres:scram_role:pass:this is not part of the password.
!);
test_role($node, 'scram_role', 'password from pgpass', 0);
test_role($node, 'md5_role', 'password from pgpass', 2);
-append_to_file($pgpassfile, qq!
+append_to_file(
+ $pgpassfile, qq!
*:*:*:md5_role:p\\ass
!);
-test_role($node, 'md5_role', 'password from pgpass', 0);
+test_role($node, 'md5_role', 'password from pgpass', 0);
my $kdc_pidfile = "${TestLib::tmp_check}/krb5kdc.pid";
my $keytab = "${TestLib::tmp_check}/krb5.keytab";
-my $dbname = 'postgres';
-my $username = 'test1';
+my $dbname = 'postgres';
+my $username = 'test1';
my $application = '001_auth.pl';
note "setting up Kerberos";
my $connstr = $node->connstr('postgres')
. " user=$role host=$host hostaddr=$hostaddr $gssencmode";
- my %params = (
- sql => $query,
- );
+ my %params = (sql => $query,);
if (@expect_log_msgs)
{
use PostgresNode;
my $bkplabel = 'backup';
-my $primary = get_new_node('primary');
+my $primary = get_new_node('primary');
$primary->init(allows_streaming => 1);
$primary->append_conf(
use PostgresNode;
my $bkplabel = 'backup';
-my $primary = get_new_node('primary');
+my $primary = get_new_node('primary');
$primary->init(allows_streaming => 1);
$primary->append_conf(
'postgresql.conf', qq{
for my $testname (@tests)
{
my @extraargs = ('-r', $numrows);
- my $cmptrace = grep(/^$testname$/,
+ my $cmptrace = grep(/^$testname$/,
qw(simple_pipeline multi_pipelines prepared singlerow
pipeline_abort transaction disallowed_in_pipeline)) > 0;
print "# Taking pg_basebackup $backup_name from node \"$name\"\n";
TestLib::system_or_bail(
- 'pg_basebackup', '-D', $backup_path, '-h',
- $self->host, '-p', $self->port, '--checkpoint',
+ 'pg_basebackup', '-D',
+ $backup_path, '-h',
+ $self->host, '-p',
+ $self->port, '--checkpoint',
'fast', '--no-sync',
@{ $params{backup_options} });
print "# Backup finished\n";
TestLib::system_or_bail($params{tar_program}, 'xf',
$backup_path . '/base.tar',
'-C', $data_path);
- TestLib::system_or_bail($params{tar_program}, 'xf',
- $backup_path . '/pg_wal.tar',
- '-C', $data_path . '/pg_wal');
+ TestLib::system_or_bail(
+ $params{tar_program}, 'xf',
+ $backup_path . '/pg_wal.tar', '-C',
+ $data_path . '/pg_wal');
}
else
{
#
sub _set_pg_version
{
- my ($self) = @_;
- my $inst = $self->{_install_path};
- my $pg_config = "pg_config";
-
- if (defined $inst)
- {
- # If the _install_path is invalid, our PATH variables might find an
- # unrelated pg_config executable elsewhere. Sanity check the
- # directory.
- BAIL_OUT("directory not found: $inst")
- unless -d $inst;
-
- # If the directory exists but is not the root of a postgresql
- # installation, or if the user configured using
- # --bindir=$SOMEWHERE_ELSE, we're not going to find pg_config, so
- # complain about that, too.
- $pg_config = "$inst/bin/pg_config";
- BAIL_OUT("pg_config not found: $pg_config")
- unless -e $pg_config;
- BAIL_OUT("pg_config not executable: $pg_config")
- unless -x $pg_config;
-
- # Leave $pg_config install_path qualified, to be sure we get the right
- # version information, below, or die trying
- }
-
- local %ENV = $self->_get_env();
-
- # We only want the version field
- open my $fh, "-|", $pg_config, "--version"
- or
- BAIL_OUT("$pg_config failed: $!");
- my $version_line = <$fh>;
- close $fh or die;
-
- $self->{_pg_version} = PostgresVersion->new($version_line);
-
- BAIL_OUT("could not parse pg_config --version output: $version_line")
+ my ($self) = @_;
+ my $inst = $self->{_install_path};
+ my $pg_config = "pg_config";
+
+ if (defined $inst)
+ {
+ # If the _install_path is invalid, our PATH variables might find an
+ # unrelated pg_config executable elsewhere. Sanity check the
+ # directory.
+ BAIL_OUT("directory not found: $inst")
+ unless -d $inst;
+
+ # If the directory exists but is not the root of a postgresql
+ # installation, or if the user configured using
+ # --bindir=$SOMEWHERE_ELSE, we're not going to find pg_config, so
+ # complain about that, too.
+ $pg_config = "$inst/bin/pg_config";
+ BAIL_OUT("pg_config not found: $pg_config")
+ unless -e $pg_config;
+ BAIL_OUT("pg_config not executable: $pg_config")
+ unless -x $pg_config;
+
+ # Leave $pg_config install_path qualified, to be sure we get the right
+ # version information, below, or die trying
+ }
+
+ local %ENV = $self->_get_env();
+
+ # We only want the version field
+ open my $fh, "-|", $pg_config, "--version"
+ or BAIL_OUT("$pg_config failed: $!");
+ my $version_line = <$fh>;
+ close $fh or die;
+
+ $self->{_pg_version} = PostgresVersion->new($version_line);
+
+ BAIL_OUT("could not parse pg_config --version output: $version_line")
unless defined $self->{_pg_version};
}
# a common parent directory.
sub _get_env
{
- my $self = shift;
+ my $self = shift;
my %inst_env = (%ENV, PGHOST => $self->{_host}, PGPORT => $self->{_port});
# the remaining arguments are modifications to make to the environment
my %mods = (@_);
# caching a command.
sub installed_command
{
- my ($self, $cmd) = @_;
+ my ($self, $cmd) = @_;
- # Nodes using alternate installation locations use their installation's
- # bin/ directory explicitly
- return join('/', $self->{_install_path}, 'bin', $cmd)
- if defined $self->{_install_path};
+ # Nodes using alternate installation locations use their installation's
+ # bin/ directory explicitly
+ return join('/', $self->{_install_path}, 'bin', $cmd)
+ if defined $self->{_install_path};
- # Nodes implicitly using the default installation location rely on IPC::Run
- # to find the right binary, which should not cause %cmd_cache confusion,
- # because no nodes with other installation paths do it that way.
- return $cmd;
+ # Nodes implicitly using the default installation location rely on IPC::Run
+ # to find the right binary, which should not cause %cmd_cache confusion,
+ # because no nodes with other installation paths do it that way.
+ return $cmd;
}
=pod
if ($found == 1)
{
foreach my $addr (qw(127.0.0.1),
- ($use_tcp && $TestLib::windows_os)
- ? qw(127.0.0.2 127.0.0.3 0.0.0.0)
- : ())
+ ($use_tcp && $TestLib::windows_os)
+ ? qw(127.0.0.2 127.0.0.3 0.0.0.0)
+ : ())
{
if (!can_bind($addr, $port))
{
}
$psql_connstr .= defined $replication ? " replication=$replication" : "";
- my @psql_params = ($self->installed_command('psql'),
- '-XAtq', '-d', $psql_connstr, '-f', '-');
+ my @psql_params = (
+ $self->installed_command('psql'),
+ '-XAtq', '-d', $psql_connstr, '-f', '-');
# If the caller wants an array and hasn't passed stdout/stderr
# references, allocate temporary ones to capture them so we
local %ENV = $self->_get_env();
- my @psql_params = ($self->installed_command('psql'),
- '-XAt', '-d', $self->connstr($dbname));
+ my @psql_params = (
+ $self->installed_command('psql'),
+ '-XAt', '-d', $self->connstr($dbname));
push @psql_params, @{ $params{extra_params} }
if defined $params{extra_params};
}
if (@log_like or @log_unlike)
{
- my $log_contents = TestLib::slurp_file($self->logfile,
- $log_location);
+ my $log_contents = TestLib::slurp_file($self->logfile, $log_location);
while (my $regex = shift @log_like)
{
if (@log_like or @log_unlike)
{
- my $log_contents = TestLib::slurp_file($self->logfile,
- $log_location);
+ my $log_contents = TestLib::slurp_file($self->logfile, $log_location);
while (my $regex = shift @log_like)
{
$expected = 't' unless defined($expected); # default value
- my $cmd = [ $self->installed_command('psql'),
- '-XAt', '-c', $query, '-d', $self->connstr($dbname) ];
+ my $cmd = [
+ $self->installed_command('psql'),
+ '-XAt', '-c', $query, '-d', $self->connstr($dbname)
+ ];
my ($stdout, $stderr);
my $max_attempts = 180 * 10;
my $attempts = 0;
my @cmd = (
$self->installed_command('pg_recvlogical'),
- '-S', $slot_name, '--dbname',
- $self->connstr($dbname));
+ '-S', $slot_name, '--dbname', $self->connstr($dbname));
push @cmd, '--endpos', $endpos;
push @cmd, '-f', '-', '--no-loop', '--start';
# Accept standard formats, in case caller has handed us the output of a
# postgres command line tool
my $devel;
- ($arg,$devel) = ($1, $2)
- if ($arg =~
- m!^ # beginning of line
+ ($arg, $devel) = ($1, $2)
+ if (
+ $arg =~ m!^ # beginning of line
(?:\(?PostgreSQL\)?\s)? # ignore PostgreSQL marker
(\d+(?:\.\d+)*) # version number, dotted notation
(devel|(?:alpha|beta|rc)\d+)? # dev marker - see version_stamp.pl
$devel ||= "";
- return bless { str => "$arg$devel", num => \@numbers }, $class;
+ return bless { str => "$arg$devel", num => \@numbers }, $class;
}
# Routine which compares the _pg_version_array obtained for the two
# Render the version number using the saved string.
sub _stringify
{
- my $self = shift;
+ my $self = shift;
return $self->{str};
}
if ($windows_os)
{
require Win32API::File;
- Win32API::File->import(qw(createFile OsFHandleOpen CloseHandle setFilePointer));
+ Win32API::File->import(
+ qw(createFile OsFHandleOpen CloseHandle setFilePointer));
}
# Specifies whether to use Unix sockets for test setups. On
# TESTDIR environment variable, which is normally set by the invoking
# Makefile.
$tmp_check = $ENV{TESTDIR} ? "$ENV{TESTDIR}/tmp_check" : "tmp_check";
- $log_path = "$tmp_check/log";
+ $log_path = "$tmp_check/log";
mkdir $tmp_check;
mkdir $log_path;
# long as the process was not terminated by an exception. To work around
# that, use $h->full_results on Windows instead.
my $result =
- ($Config{osname} eq "MSWin32")
+ ($Config{osname} eq "MSWin32")
? ($h->full_results)[0]
: $h->result(0);
is($result, $expected, $test_name);
my $mode = shift;
my $status = shift;
- my $node1_host = $node1->host;
- my $node1_port = $node1->port;
- my $node1_name = $node1->name;
- my $node2_host = $node2->host;
- my $node2_port = $node2->port;
- my $node2_name = $node2->name;
+ my $node1_host = $node1->host;
+ my $node1_port = $node1->port;
+ my $node1_name = $node1->name;
+ my $node2_host = $node2->host;
+ my $node2_port = $node2->port;
+ my $node2_name = $node2->name;
my $target_port = undef;
$target_port = $target_node->port if (defined $target_node);
my $target_name = undef;
'postgres', "
CREATE ROLE repl_role REPLICATION LOGIN;
GRANT pg_read_all_settings TO repl_role;");
-my $primary_host = $node_primary->host;
-my $primary_port = $node_primary->port;
+my $primary_host = $node_primary->host;
+my $primary_port = $node_primary->port;
my $connstr_common = "host=$primary_host port=$primary_port user=repl_role";
my $connstr_rep = "$connstr_common replication=1";
my $connstr_db = "$connstr_common replication=database dbname=postgres";
{
my $test_name = shift;
my $node_name = shift;
- my $node_primary = shift;
+ my $node_primary = shift;
my $recovery_params = shift;
my $num_rows = shift;
my $until_lsn = shift;
sub start_standby_and_wait
{
my ($primary, $standby) = @_;
- my $primary_name = $primary->name;
+ my $primary_name = $primary->name;
my $standby_name = $standby->name;
my $query =
"SELECT count(1) = 1 FROM pg_stat_replication WHERE application_name = '$standby_name'";
'aborted', 'xid is aborted after crash');
$stdin .= "\\q\n";
-$tx->finish; # wait for psql to quit gracefully
+$tx->finish; # wait for psql to quit gracefully
# One psql to primary and standby each, for all queries. That allows
# to check uncommitted changes being replicated and such.
my %psql_primary = (stdin => '', stdout => '', stderr => '');
-$psql_primary{run} =
- IPC::Run::start(
- ['psql', '-XA', '-f', '-', '-d', $node_primary->connstr('postgres')],
- '<', \$psql_primary{stdin},
- '>', \$psql_primary{stdout},
- '2>', \$psql_primary{stderr},
- $psql_timeout);
+$psql_primary{run} = IPC::Run::start(
+ [ 'psql', '-XA', '-f', '-', '-d', $node_primary->connstr('postgres') ],
+ '<',
+ \$psql_primary{stdin},
+ '>',
+ \$psql_primary{stdout},
+ '2>',
+ \$psql_primary{stderr},
+ $psql_timeout);
my %psql_standby = ('stdin' => '', 'stdout' => '', 'stderr' => '');
-$psql_standby{run} =
- IPC::Run::start(
- ['psql', '-XA', '-f', '-', '-d', $node_standby->connstr('postgres')],
- '<', \$psql_standby{stdin},
- '>', \$psql_standby{stdout},
- '2>', \$psql_standby{stderr},
- $psql_timeout);
+$psql_standby{run} = IPC::Run::start(
+ [ 'psql', '-XA', '-f', '-', '-d', $node_standby->connstr('postgres') ],
+ '<',
+ \$psql_standby{stdin},
+ '>',
+ \$psql_standby{stdout},
+ '2>',
+ \$psql_standby{stderr},
+ $psql_timeout);
#
# 1. Check initial data is the same
#
-ok(send_query_and_wait(\%psql_standby,
- q/SELECT * FROM test_visibility ORDER BY data;/,
- qr/^\(0 rows\)$/m),
- 'data not visible');
+ok( send_query_and_wait(
+ \%psql_standby,
+ q/SELECT * FROM test_visibility ORDER BY data;/,
+ qr/^\(0 rows\)$/m),
+ 'data not visible');
#
# 2. Check if an INSERT is replayed and visible
#
-$node_primary->psql('postgres', "INSERT INTO test_visibility VALUES ('first insert')");
+$node_primary->psql('postgres',
+ "INSERT INTO test_visibility VALUES ('first insert')");
$node_primary->wait_for_catchup($node_standby, 'replay',
$node_primary->lsn('insert'));
-ok(send_query_and_wait(\%psql_standby,
- q[SELECT * FROM test_visibility ORDER BY data;],
- qr/first insert.*\n\(1 row\)/m),
- 'insert visible');
+ok( send_query_and_wait(
+ \%psql_standby,
+ q[SELECT * FROM test_visibility ORDER BY data;],
+ qr/first insert.*\n\(1 row\)/m),
+ 'insert visible');
#
# 3. Verify that uncommitted changes aren't visible.
#
-ok(send_query_and_wait(\%psql_primary,
- q[
+ok( send_query_and_wait(
+ \%psql_primary,
+ q[
BEGIN;
UPDATE test_visibility SET data = 'first update' RETURNING data;
],
- qr/^UPDATE 1$/m),
- 'UPDATE');
+ qr/^UPDATE 1$/m),
+ 'UPDATE');
-$node_primary->psql('postgres', "SELECT txid_current();"); # ensure WAL flush
+$node_primary->psql('postgres', "SELECT txid_current();"); # ensure WAL flush
$node_primary->wait_for_catchup($node_standby, 'replay',
- $node_primary->lsn('insert'));
+ $node_primary->lsn('insert'));
-ok(send_query_and_wait(\%psql_standby,
- q[SELECT * FROM test_visibility ORDER BY data;],
- qr/first insert.*\n\(1 row\)/m),
- 'uncommitted update invisible');
+ok( send_query_and_wait(
+ \%psql_standby,
+ q[SELECT * FROM test_visibility ORDER BY data;],
+ qr/first insert.*\n\(1 row\)/m),
+ 'uncommitted update invisible');
#
# 4. That a commit turns 3. visible
#
-ok(send_query_and_wait(\%psql_primary,
- q[COMMIT;],
- qr/^COMMIT$/m),
- 'COMMIT');
+ok(send_query_and_wait(\%psql_primary, q[COMMIT;], qr/^COMMIT$/m), 'COMMIT');
$node_primary->wait_for_catchup($node_standby, 'replay',
$node_primary->lsn('insert'));
-ok(send_query_and_wait(\%psql_standby,
- q[SELECT * FROM test_visibility ORDER BY data;],
- qr/first update\n\(1 row\)$/m),
- 'committed update visible');
+ok( send_query_and_wait(
+ \%psql_standby,
+ q[SELECT * FROM test_visibility ORDER BY data;],
+ qr/first update\n\(1 row\)$/m),
+ 'committed update visible');
#
# 5. Check that changes in prepared xacts is invisible
#
-ok(send_query_and_wait(\%psql_primary, q[
+ok( send_query_and_wait(
+ \%psql_primary, q[
DELETE from test_visibility; -- delete old data, so we start with clean slate
BEGIN;
INSERT INTO test_visibility VALUES('inserted in prepared will_commit');
PREPARE TRANSACTION 'will_commit';],
- qr/^PREPARE TRANSACTION$/m),
- 'prepared will_commit');
+ qr/^PREPARE TRANSACTION$/m),
+ 'prepared will_commit');
-ok(send_query_and_wait(\%psql_primary, q[
+ok( send_query_and_wait(
+ \%psql_primary, q[
BEGIN;
INSERT INTO test_visibility VALUES('inserted in prepared will_abort');
PREPARE TRANSACTION 'will_abort';
],
- qr/^PREPARE TRANSACTION$/m),
- 'prepared will_abort');
+ qr/^PREPARE TRANSACTION$/m),
+ 'prepared will_abort');
$node_primary->wait_for_catchup($node_standby, 'replay',
- $node_primary->lsn('insert'));
+ $node_primary->lsn('insert'));
-ok(send_query_and_wait(\%psql_standby,
- q[SELECT * FROM test_visibility ORDER BY data;],
- qr/^\(0 rows\)$/m),
- 'uncommitted prepared invisible');
+ok( send_query_and_wait(
+ \%psql_standby,
+ q[SELECT * FROM test_visibility ORDER BY data;],
+ qr/^\(0 rows\)$/m),
+ 'uncommitted prepared invisible');
# For some variation, finish prepared xacts via separate connections
-$node_primary->safe_psql('postgres',
- "COMMIT PREPARED 'will_commit';");
-$node_primary->safe_psql('postgres',
- "ROLLBACK PREPARED 'will_abort';");
+$node_primary->safe_psql('postgres', "COMMIT PREPARED 'will_commit';");
+$node_primary->safe_psql('postgres', "ROLLBACK PREPARED 'will_abort';");
$node_primary->wait_for_catchup($node_standby, 'replay',
$node_primary->lsn('insert'));
-ok(send_query_and_wait(\%psql_standby,
- q[SELECT * FROM test_visibility ORDER BY data;],
- qr/will_commit.*\n\(1 row\)$/m),
- 'finished prepared visible');
+ok( send_query_and_wait(
+ \%psql_standby,
+ q[SELECT * FROM test_visibility ORDER BY data;],
+ qr/will_commit.*\n\(1 row\)$/m),
+ 'finished prepared visible');
# explicitly shut down psql instances gracefully - to avoid hangs
# or worse on windows
-$psql_primary{stdin} .= "\\q\n";
+$psql_primary{stdin} .= "\\q\n";
$psql_primary{run}->finish;
$psql_standby{stdin} .= "\\q\n";
$psql_standby{run}->finish;
if ($psql_timeout->is_expired)
{
- BAIL_OUT("aborting wait: program timed out\n".
- "stream contents: >>$$psql{stdout}<<\n".
- "pattern searched for: $untl\n");
+ BAIL_OUT("aborting wait: program timed out\n"
+ . "stream contents: >>$$psql{stdout}<<\n"
+ . "pattern searched for: $untl\n");
return 0;
}
if (not $$psql{run}->pumpable())
{
- BAIL_OUT("aborting wait: program died\n".
- "stream contents: >>$$psql{stdout}<<\n".
- "pattern searched for: $untl\n");
+ BAIL_OUT("aborting wait: program died\n"
+ . "stream contents: >>$$psql{stdout}<<\n"
+ . "pattern searched for: $untl\n");
return 0;
}
$$psql{run}->pump();
SELECT pg_reload_conf();]);
# create table, insert rows
-$node->safe_psql(
- 'postgres',
- q[CREATE TABLE tab_crash (a integer UNIQUE);]);
+$node->safe_psql('postgres', q[CREATE TABLE tab_crash (a integer UNIQUE);]);
# Run psql, keeping session alive, so we have an alive backend to kill.
my ($killme_stdin, $killme_stdout, $killme_stderr) = ('', '', '');
c INT;
BEGIN
LOOP
- SELECT COUNT(*) INTO c FROM pg_locks WHERE pid = ] . $pid . q[ AND NOT granted;
+ SELECT COUNT(*) INTO c FROM pg_locks WHERE pid = ] . $pid
+ . q[ AND NOT granted;
IF c > 0 THEN
EXIT;
END IF;
$node->poll_query_until('postgres', 'SELECT 1', '1');
# Check for temporary files
-is($node->safe_psql(
- 'postgres',
- 'SELECT COUNT(1) FROM pg_ls_dir($$base/pgsql_tmp$$)'),
- qq(0), 'no temporary files');
+is( $node->safe_psql(
+ 'postgres', 'SELECT COUNT(1) FROM pg_ls_dir($$base/pgsql_tmp$$)'),
+ qq(0),
+ 'no temporary files');
#
# Test old behavior (don't remove temporary files after crash)
c INT;
BEGIN
LOOP
- SELECT COUNT(*) INTO c FROM pg_locks WHERE pid = ] . $pid . q[ AND NOT granted;
+ SELECT COUNT(*) INTO c FROM pg_locks WHERE pid = ] . $pid
+ . q[ AND NOT granted;
IF c > 0 THEN
EXIT;
END IF;
$node->poll_query_until('postgres', 'SELECT 1', '1');
# Check for temporary files -- should be there
-is($node->safe_psql(
- 'postgres',
- 'SELECT COUNT(1) FROM pg_ls_dir($$base/pgsql_tmp$$)'),
- qq(1), 'one temporary file');
+is( $node->safe_psql(
+ 'postgres', 'SELECT COUNT(1) FROM pg_ls_dir($$base/pgsql_tmp$$)'),
+ qq(1),
+ 'one temporary file');
# Restart should remove the temporary files
$node->restart();
# Check the temporary files -- should be gone
-is($node->safe_psql(
- 'postgres',
- 'SELECT COUNT(1) FROM pg_ls_dir($$base/pgsql_tmp$$)'),
- qq(0), 'temporary file was removed');
+is( $node->safe_psql(
+ 'postgres', 'SELECT COUNT(1) FROM pg_ls_dir($$base/pgsql_tmp$$)'),
+ qq(0),
+ 'temporary file was removed');
$node->stop();
# Make WAL segment eligible for archival
$node->safe_psql('postgres', 'SELECT pg_switch_wal()');
-my $archive_wait_query
- = "SELECT '$walfile_to_be_archived' <= last_archived_wal FROM pg_stat_archiver;";
+my $archive_wait_query =
+ "SELECT '$walfile_to_be_archived' <= last_archived_wal FROM pg_stat_archiver;";
# Wait until the WAL segment has been archived.
$node->poll_query_until('postgres', $archive_wait_query)
my $recovery_node = get_new_node($node_name);
$recovery_node->init_from_backup(
$node, $backup_name,
- has_restoring => 1, standby => $standby_setting);
+ has_restoring => 1,
+ standby => $standby_setting);
# Use run_log instead of recovery_node->start because this test expects
# that the server ends with an error during recovery.
run_log(
- ['pg_ctl','-D', $recovery_node->data_dir, '-l',
- $recovery_node->logfile, 'start']);
+ [
+ 'pg_ctl', '-D',
+ $recovery_node->data_dir, '-l',
+ $recovery_node->logfile, 'start'
+ ]);
# Wait up to 180s for postgres to terminate
foreach my $i (0 .. 1800)
# Confirm that the archive recovery fails with an expected error
my $logfile = slurp_file($recovery_node->logfile());
ok( $logfile =~
- qr/FATAL: .* WAL was generated with wal_level=minimal, cannot continue recovering/,
- "$node_text ends with an error because it finds WAL generated with wal_level=minimal");
+ qr/FATAL: .* WAL was generated with wal_level=minimal, cannot continue recovering/,
+ "$node_text ends with an error because it finds WAL generated with wal_level=minimal"
+ );
}
# Test for archive recovery
char data_before[4];
slock_t lock;
char data_after[4];
- } struct_w_lock;
+ } struct_w_lock;
memcpy(struct_w_lock.data_before, "abcd", 4);
memcpy(struct_w_lock.data_after, "ef12", 4);
}
/*
- * Ensure that allocating more than INT32_MAX emulated spinlocks
- * works. That's interesting because the spinlock emulation uses a 32bit
- * integer to map spinlocks onto semaphores. There've been bugs...
+ * Ensure that allocating more than INT32_MAX emulated spinlocks works.
+ * That's interesting because the spinlock emulation uses a 32bit integer
+ * to map spinlocks onto semaphores. There've been bugs...
*/
#ifndef HAVE_SPINLOCKS
{
/*
- * Initialize enough spinlocks to advance counter close to
- * wraparound. It's too expensive to perform acquire/release for each,
- * as those may be syscalls when the spinlock emulation is used (and
- * even just atomic TAS would be expensive).
+ * Initialize enough spinlocks to advance counter close to wraparound.
+ * It's too expensive to perform acquire/release for each, as those
+ * may be syscalls when the spinlock emulation is used (and even just
+ * atomic TAS would be expensive).
*/
for (uint32 i = 0; i < INT32_MAX - 100000; i++)
{
- slock_t lock;
+ slock_t lock;
SpinLockInit(&lock);
}
for (uint32 i = 0; i < 200000; i++)
{
- slock_t lock;
+ slock_t lock;
SpinLockInit(&lock);
static void
test_atomic_spin_nest(void)
{
- slock_t lock;
+ slock_t lock;
#define NUM_TEST_ATOMICS (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES + 27)
pg_atomic_uint32 atomics32[NUM_TEST_ATOMICS];
pg_atomic_uint64 atomics64[NUM_TEST_ATOMICS];
# This changes ssl/client.key to ssl/client_tmp.key etc for the rest
# of the tests.
my @keys = (
- "client", "client-revoked",
- "client-der", "client-encrypted-pem",
+ "client", "client-revoked",
+ "client-der", "client-encrypted-pem",
"client-encrypted-der", "client-dn");
foreach my $key (@keys)
{
my $cafile = $_[2] || "root+client_ca";
my $crlfile = "root+client.crl";
my $crldir;
- my $pgdata = $node->data_dir;
+ my $pgdata = $node->data_dir;
# defaults to use crl file
if (defined $_[3] || defined $_[4])
{
$crlfile = $_[3];
- $crldir = $_[4];
+ $crldir = $_[4];
}
open my $sslconf, '>', "$pgdata/sslconfig.conf";
print $sslconf "ssl_cert_file='$certfile.crt'\n";
print $sslconf "ssl_key_file='$certfile.key'\n";
print $sslconf "ssl_crl_file='$crlfile'\n" if defined $crlfile;
- print $sslconf "ssl_crl_dir='$crldir'\n" if defined $crldir;
+ print $sslconf "ssl_crl_dir='$crldir'\n" if defined $crldir;
close $sslconf;
$node->restart;
# from the publication.
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_ins");
-is($result, qq(1052|1|1002), 'check rows on subscriber before table drop from publication');
+is($result, qq(1052|1|1002),
+ 'check rows on subscriber before table drop from publication');
# Drop the table from publication
$node_publisher->safe_psql('postgres',
# publication, so row count should remain the same.
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_ins");
-is($result, qq(1052|1|1002), 'check rows on subscriber after table drop from publication');
+is($result, qq(1052|1|1002),
+ 'check rows on subscriber after table drop from publication');
# Delete the inserted row in publisher
$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a = 8888");
# Setup logical replication that will only be used for this test
$node_publisher->safe_psql('postgres',
- "CREATE PUBLICATION tap_pub_temp1 FOR TABLE temp1 WITH (publish = insert)");
+ "CREATE PUBLICATION tap_pub_temp1 FOR TABLE temp1 WITH (publish = insert)"
+);
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_temp2 FOR TABLE temp2");
$node_subscriber->safe_psql('postgres',
or die "Timed out while waiting for subscriber to synchronize data";
# Subscriber table will have no rows initially
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT count(*) FROM temp1");
-is($result, qq(0), 'check initial rows on subscriber with multiple publications');
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM temp1");
+is($result, qq(0),
+ 'check initial rows on subscriber with multiple publications');
# Insert a row into the table that's part of first publication in subscriber
# list of publications.
$node_publisher->wait_for_catchup('tap_sub_temp1');
# Subscriber should receive the inserted row
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT count(*) FROM temp1");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM temp1");
is($result, qq(1), 'check rows on subscriber with multiple publications');
# Drop subscription as we don't need it anymore
# at this time. Recreate the subscription which will do the initial copy of
# the table again and fails due to unique constraint violation.
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub");
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub"
+);
$result = $node_subscriber->poll_query_until('postgres', $started_query)
- or die "Timed out while waiting for subscriber to start sync";
+ or die "Timed out while waiting for subscriber to start sync";
# DROP SUBSCRIPTION must clean up slots on the publisher side when the
# subscriber is stuck on data copy for constraint violation.
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
-is($result, qq(0), 'DROP SUBSCRIPTION during error can clean up the slots on the publisher');
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
+is($result, qq(0),
+ 'DROP SUBSCRIPTION during error can clean up the slots on the publisher');
$node_subscriber->stop('fast');
$node_publisher->stop('fast');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab1");
-is($result, qq(0||), 'truncate replicated in synchronous logical replication');
+is($result, qq(0||),
+ 'truncate replicated in synchronous logical replication');
# Create publisher node
my $node_publisher = get_new_node('publisher');
$node_publisher->init(allows_streaming => 'logical');
-$node_publisher->append_conf('postgresql.conf', 'logical_decoding_work_mem = 64kB');
+$node_publisher->append_conf('postgresql.conf',
+ 'logical_decoding_work_mem = 64kB');
$node_publisher->start;
# Create subscriber node
"INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
# Setup structure on subscriber
-$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
-"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
);
$node_publisher->wait_for_catchup($appname);
# Also wait for initial table sync to finish
my $synced_query =
-"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
my $result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(2|2|2), 'check initial data was copied to subscriber');
# Interleave a pair of transactions, each exceeding the 64kB limit.
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(3334|3334|3334), 'check extra columns contain local defaults');
# Test the streaming in binary mode
$node_subscriber->safe_psql('postgres',
-"ALTER SUBSCRIPTION tap_sub SET (binary = on)"
-);
+ "ALTER SUBSCRIPTION tap_sub SET (binary = on)");
# Insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(5001, 10000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(6667|6667|6667), 'check extra columns contain local defaults');
# Change the local values of the extra columns on the subscriber,
# update publisher, and check that subscriber retains the expected
# values. This is to ensure that non-streaming transactions behave
# properly after a streaming transaction.
-$node_subscriber->safe_psql('postgres', "UPDATE test_tab SET c = 'epoch'::timestamptz + 987654321 * interval '1s'");
-$node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = md5(a::text)");
+$node_subscriber->safe_psql('postgres',
+ "UPDATE test_tab SET c = 'epoch'::timestamptz + 987654321 * interval '1s'"
+);
+$node_publisher->safe_psql('postgres',
+ "UPDATE test_tab SET b = md5(a::text)");
$node_publisher->wait_for_catchup($appname);
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(extract(epoch from c) = 987654321), count(d = 999) FROM test_tab");
-is($result, qq(6667|6667|6667), 'check extra columns contain locally changed data');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(extract(epoch from c) = 987654321), count(d = 999) FROM test_tab"
+);
+is($result, qq(6667|6667|6667),
+ 'check extra columns contain locally changed data');
$node_subscriber->stop;
$node_publisher->stop;
# Create publisher node
my $node_publisher = get_new_node('publisher');
$node_publisher->init(allows_streaming => 'logical');
-$node_publisher->append_conf('postgresql.conf', 'logical_decoding_work_mem = 64kB');
+$node_publisher->append_conf('postgresql.conf',
+ 'logical_decoding_work_mem = 64kB');
$node_publisher->start;
# Create subscriber node
"INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
# Setup structure on subscriber
-$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
-"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
);
$node_publisher->wait_for_catchup($appname);
# Also wait for initial table sync to finish
my $synced_query =
-"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
my $result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(2|2|2), 'check initial data was copied to subscriber');
# Insert, update and delete enough rows to exceed 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series( 3, 500) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(1667|1667|1667), 'check data was copied to subscriber in streaming mode and extra columns contain local defaults');
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(1667|1667|1667),
+ 'check data was copied to subscriber in streaming mode and extra columns contain local defaults'
+);
$node_subscriber->stop;
$node_publisher->stop;
# Create publisher node
my $node_publisher = get_new_node('publisher');
$node_publisher->init(allows_streaming => 'logical');
-$node_publisher->append_conf('postgresql.conf', 'logical_decoding_work_mem = 64kB');
+$node_publisher->append_conf('postgresql.conf',
+ 'logical_decoding_work_mem = 64kB');
$node_publisher->start;
# Create subscriber node
"INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
# Setup structure on subscriber
-$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b text, c INT, d INT, e INT, f INT)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c INT, d INT, e INT, f INT)"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
-"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
);
$node_publisher->wait_for_catchup($appname);
# Also wait for initial table sync to finish
my $synced_query =
-"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
my $result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(2|0|0), 'check initial data was copied to subscriber');
# a small (non-streamed) transaction with DDL and DML
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab VALUES (3, md5(3::text));
ALTER TABLE test_tab ADD COLUMN c INT;
});
# large (streamed) transaction with DDL and DML
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text), -i FROM generate_series(5, 1000) s(i);
ALTER TABLE test_tab ADD COLUMN d INT;
});
# a small (non-streamed) transaction with DDL and DML
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab VALUES (2001, md5(2001::text), -2001, 2*2001);
ALTER TABLE test_tab ADD COLUMN e INT;
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d), count(e) FROM test_tab");
-is($result, qq(2002|1999|1002|1), 'check data was copied to subscriber in streaming mode and extra columns contain local defaults');
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d), count(e) FROM test_tab");
+is($result, qq(2002|1999|1002|1),
+ 'check data was copied to subscriber in streaming mode and extra columns contain local defaults'
+);
# A large (streamed) transaction with DDL and DML. One of the DDL is performed
# after DML to ensure that we invalidate the schema sent for test_tab so that
# the next transaction has to send the schema again.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text), -i, 2*i, -3*i FROM generate_series(2003,5000) s(i);
ALTER TABLE test_tab ADD COLUMN f INT;
# A small transaction that won't get streamed. This is just to ensure that we
# send the schema again to reflect the last column added in the previous test.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text), -i, 2*i, -3*i, 4*i FROM generate_series(5001,5005) s(i);
COMMIT;
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d), count(e), count(f) FROM test_tab");
-is($result, qq(5005|5002|4005|3004|5), 'check data was copied to subscriber for both streaming and non-streaming transactions');
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d), count(e), count(f) FROM test_tab");
+is($result, qq(5005|5002|4005|3004|5),
+ 'check data was copied to subscriber for both streaming and non-streaming transactions'
+);
$node_subscriber->stop;
$node_publisher->stop;
# Create publisher node
my $node_publisher = get_new_node('publisher');
$node_publisher->init(allows_streaming => 'logical');
-$node_publisher->append_conf('postgresql.conf', 'logical_decoding_work_mem = 64kB');
+$node_publisher->append_conf('postgresql.conf',
+ 'logical_decoding_work_mem = 64kB');
$node_publisher->start;
# Create subscriber node
"INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
# Setup structure on subscriber
-$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b text, c INT, d INT, e INT)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c INT, d INT, e INT)");
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
-"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
);
$node_publisher->wait_for_catchup($appname);
# Also wait for initial table sync to finish
my $synced_query =
-"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
my $result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c) FROM test_tab");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
is($result, qq(2|0), 'check initial data was copied to subscriber');
# large (streamed) transaction with DDL, DML and ROLLBACKs
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3,500) s(i);
SAVEPOINT s1;
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c) FROM test_tab");
-is($result, qq(2000|0), 'check rollback to savepoint was reflected on subscriber and extra columns contain local defaults');
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
+is($result, qq(2000|0),
+ 'check rollback to savepoint was reflected on subscriber and extra columns contain local defaults'
+);
# large (streamed) transaction with subscriber receiving out of order
# subtransaction ROLLBACKs
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(4001,4500) s(i);
SAVEPOINT s1;
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c) FROM test_tab");
-is($result, qq(2500|0), 'check rollback to savepoint was reflected on subscriber');
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
+is($result, qq(2500|0),
+ 'check rollback to savepoint was reflected on subscriber');
# large (streamed) transaction with subscriber receiving rollback
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(8501,9000) s(i);
SAVEPOINT s1;
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c) FROM test_tab");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
is($result, qq(2500|0), 'check rollback was reflected on subscriber');
$node_subscriber->stop;
# Create publisher node
my $node_publisher = get_new_node('publisher');
$node_publisher->init(allows_streaming => 'logical');
-$node_publisher->append_conf('postgresql.conf', 'logical_decoding_work_mem = 64kB');
+$node_publisher->append_conf('postgresql.conf',
+ 'logical_decoding_work_mem = 64kB');
$node_publisher->start;
# Create subscriber node
"INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
# Setup structure on subscriber
-$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b text, c INT, d INT, e INT)");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b text, c INT, d INT, e INT)");
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
-"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
+ "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (streaming = on)"
);
$node_publisher->wait_for_catchup($appname);
# Also wait for initial table sync to finish
my $synced_query =
-"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
my $result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c) FROM test_tab");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
is($result, qq(2|0), 'check initial data was copied to subscriber');
# large (streamed) transaction with DDL, DML and ROLLBACKs
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3,500) s(i);
ALTER TABLE test_tab ADD COLUMN c INT;
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c) FROM test_tab");
-is($result, qq(1000|500), 'check rollback to savepoint was reflected on subscriber and extra columns contain local defaults');
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c) FROM test_tab");
+is($result, qq(1000|500),
+ 'check rollback to savepoint was reflected on subscriber and extra columns contain local defaults'
+);
$node_subscriber->stop;
$node_publisher->stop;
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE tab_test");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE tab_test");
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub"
));
# 66 77 67 == B M C == BEGIN MESSAGE COMMIT
-is($result, qq(66
+is( $result, qq(66
77
67),
'messages on slot are B M C with message option');
));
# 66 67 == B C == BEGIN COMMIT
-is($result, qq(66
+is( $result, qq(66
67),
- 'option messages defaults to false so message (M) is not available on slot');
+ 'option messages defaults to false so message (M) is not available on slot'
+);
$node_subscriber->safe_psql('postgres', "ALTER SUBSCRIPTION tap_sub ENABLE");
$node_publisher->wait_for_catchup('tap_sub');
$node_publisher->safe_psql('postgres', "INSERT INTO tab_test VALUES (1)");
my $message_lsn = $node_publisher->safe_psql('postgres',
- "SELECT pg_logical_emit_message(false, 'pgoutput', 'a non-transactional message')");
+ "SELECT pg_logical_emit_message(false, 'pgoutput', 'a non-transactional message')"
+);
$node_publisher->safe_psql('postgres', "INSERT INTO tab_test VALUES (2)");
'messages', 'true')
));
-is($result, qq(77|0
+is( $result, qq(77|0
77|0),
'non-transactional message on slot from aborted transaction is M');
INSERT INTO t SELECT * FROM generate_series(1, $rows);
INSERT INTO t2 SELECT * FROM generate_series(1, $rows);
});
-$node_twoways->safe_psql(
- 'd1', 'ALTER PUBLICATION testpub ADD TABLE t2');
-$node_twoways->safe_psql(
- 'd2', 'ALTER SUBSCRIPTION testsub REFRESH PUBLICATION');
+$node_twoways->safe_psql('d1', 'ALTER PUBLICATION testpub ADD TABLE t2');
+$node_twoways->safe_psql('d2',
+ 'ALTER SUBSCRIPTION testsub REFRESH PUBLICATION');
# We cannot rely solely on wait_for_catchup() here; it isn't sufficient
# when tablesync workers might still be running. So in addition to that,
print "Generating timezone files...";
- my @args = (
- "$conf/zic/zic", '-d', "$target/share/timezone");
+ my @args = ("$conf/zic/zic", '-d', "$target/share/timezone");
foreach (@tzfiles)
{
my $tzfile = $_;
('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo', 'libpq_pipeline');
my @contrib_uselibpgport = ('libpq_pipeline', 'oid2name', 'vacuumlo');
my @contrib_uselibpgcommon = ('libpq_pipeline', 'oid2name', 'vacuumlo');
-my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] };
-my $contrib_extraincludes = { 'dblink' => ['src/backend'] };
-my $contrib_extrasource = {
+my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] };
+my $contrib_extraincludes = { 'dblink' => ['src/backend'] };
+my $contrib_extrasource = {
'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ],
'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ],
};
# Set of variables for frontend modules
my $frontend_defines = { 'initdb' => 'FRONTEND' };
-my @frontend_uselibpq = ('pg_amcheck', 'pg_ctl', 'pg_upgrade', 'pgbench', 'psql', 'initdb');
+my @frontend_uselibpq =
+ ('pg_amcheck', 'pg_ctl', 'pg_upgrade', 'pgbench', 'psql', 'initdb');
my @frontend_uselibpgport = (
- 'pg_amcheck', 'pg_archivecleanup', 'pg_test_fsync',
- 'pg_test_timing', 'pg_upgrade',
- 'pg_waldump', 'pgbench');
+ 'pg_amcheck', 'pg_archivecleanup',
+ 'pg_test_fsync', 'pg_test_timing',
+ 'pg_upgrade', 'pg_waldump',
+ 'pgbench');
my @frontend_uselibpgcommon = (
- 'pg_amcheck', 'pg_archivecleanup', 'pg_test_fsync',
- 'pg_test_timing', 'pg_upgrade',
- 'pg_waldump', 'pgbench');
+ 'pg_amcheck', 'pg_archivecleanup',
+ 'pg_test_fsync', 'pg_test_timing',
+ 'pg_upgrade', 'pg_waldump',
+ 'pgbench');
my $frontend_extralibs = {
'initdb' => ['ws2_32.lib'],
'pg_amcheck' => ['ws2_32.lib'],
elsif (/\bAC_DEFINE\(OPENSSL_API_COMPAT, \[([0-9xL]+)\]/)
{
$ac_define_openssl_api_compat_found = 1;
- $openssl_api_compat = $1;
+ $openssl_api_compat = $1;
}
}
close($c);
# for backwards compatibility, "serial" runs the tests in
# parallel_schedule one by one.
my $maxconn = $maxconn;
- $maxconn = "--max-connections=1" if $schedule eq 'serial';
- $schedule = 'parallel' if $schedule eq 'serial';
+ $maxconn = "--max-connections=1" if $schedule eq 'serial';
+ $schedule = 'parallel' if $schedule eq 'serial';
my @args = (
"../../../$Config/pg_regress/pg_regress",
# for backwards compatibility, "serial" runs the tests in
# parallel_schedule one by one.
my $maxconn = $maxconn;
- $maxconn = "--max-connections=1" if $schedule eq 'serial';
- $schedule = 'parallel' if $schedule eq 'serial';
+ $maxconn = "--max-connections=1" if $schedule eq 'serial';
+ $schedule = 'parallel' if $schedule eq 'serial';
InstallTemp();
chdir "${topdir}/src/test/regress";
# Fetch and adjust PROVE_TESTS, applying glob() to each element
# defined to build a list of all the tests matching patterns.
- my $prove_tests_val = $ENV{PROVE_TESTS} || "t/*.pl";
+ my $prove_tests_val = $ENV{PROVE_TESTS} || "t/*.pl";
my @prove_tests_array = split(/\s+/, $prove_tests_val);
- my @prove_tests = ();
+ my @prove_tests = ();
foreach (@prove_tests_array)
{
push(@prove_tests, glob($_));
# Fetch and adjust PROVE_FLAGS, handling multiple arguments.
my $prove_flags_val = $ENV{PROVE_FLAGS} || "";
- my @prove_flags = split(/\s+/, $prove_flags_val);
+ my @prove_flags = split(/\s+/, $prove_flags_val);
my @args = ("prove", @flags, @prove_tests, @prove_flags);
$ENV{PGDATA} = "$data.old";
my $outputdir = "$tmp_root/regress";
my @EXTRA_REGRESS_OPTS = ("--outputdir=$outputdir");
- mkdir "$outputdir" || die $!;
+ mkdir "$outputdir" || die $!;
my $logdir = "$topdir/src/bin/pg_upgrade/log";
rmtree($logdir);
AggStatePerPhase
AggStatePerTrans
AggStrategy
+AggTransInfo
Aggref
+AggregateInstrumentation
AlenState
Alias
AllocBlock
AlterUserMappingStmt
AlteredTableInfo
AlternativeSubPlan
-AlternativeSubPlanState
+AmcheckOptions
AnalyzeAttrComputeStatsFunc
AnalyzeAttrFetchFunc
AnalyzeForeignTable_function
+AnlExprData
AnlIndexData
AnyArrayType
Append
ArrayMapState
ArrayMetaState
ArrayParseState
+ArraySubWorkspace
ArrayType
AsyncQueueControl
AsyncQueueEntry
+AsyncRequest
AttInMetadata
AttStatsSlot
AttoptCacheEntry
BTDedupInterval
BTDedupState
BTDedupStateData
+BTDeletedPageData
BTIndexStat
BTInsertState
BTInsertStateData
BTPageStat
BTPageState
BTParallelScanDesc
+BTPendingFSM
BTScanInsert
BTScanInsertData
BTScanOpaque
BTVacuumPosting
BTVacuumPostingData
BTWriteState
+BUF_MEM
BYTE
+BY_HANDLE_FILE_INFORMATION
Backend
BackendId
BackendParameters
BgWorkerStartTime
BgwHandleStatus
BinaryArithmFunc
+BindParamCbData
BipartiteMatchState
BitmapAnd
BitmapAndPath
BlockedProcData
BlockedProcsData
BloomBuildState
+BloomFilter
BloomMetaPageData
+BloomOpaque
BloomOptions
BloomPageOpaque
BloomPageOpaqueData
CRITICAL_SECTION
CRSSnapshotAction
CState
+CTECycleClause
CTEMaterialize
+CTESearchClause
CV
CachedExpression
CachedPlan
CkptTsStatus
ClientAuthentication_hook_type
ClientCertMode
+ClientCertName
ClientData
ClonePtrType
ClosePortalStmt
ConnStatusType
ConnType
ConnectionStateEnum
+ConnsAllowedState
ConsiderSplitContext
Const
ConstrCheck
ConvertRowtypeExpr
CookedConstraint
CopyDest
+CopyFormatOptions
+CopyFromState
+CopyFromStateData
CopyInsertMethod
CopyMultiInsertBuffer
CopyMultiInsertInfo
-CopyState
-CopyStateData
+CopySource
CopyStmt
+CopyToState
+CopyToStateData
Cost
CostSelector
Counters
DWORD
DataDumperPtr
DataPageDeleteStack
+DatabaseInfo
DateADT
Datum
DatumTupleFields
DisableTimeoutParams
DiscardMode
DiscardStmt
+DistanceValue
DistinctExpr
DoStmt
DocRepresentation
ErrorContextCallback
ErrorData
EstimateDSMForeignScan_function
+EstimationInfo
EventTriggerCacheEntry
EventTriggerCacheItem
EventTriggerCacheStateType
ExceptionMap
ExclusiveBackupState
ExecAuxRowMark
+ExecEvalBoolSubroutine
ExecEvalSubroutine
+ExecForeignBatchInsert_function
ExecForeignDelete_function
ExecForeignInsert_function
+ExecForeignTruncate_function
ExecForeignUpdate_function
ExecParallelEstimateContext
ExecParallelInitializeDSMContext
ExpandedArrayHeader
ExpandedObjectHeader
ExpandedObjectMethods
+ExpandedRange
ExpandedRecordFieldInfo
ExpandedRecordHeader
ExplainDirectModify_function
ExprDoneCond
ExprEvalOp
ExprEvalOpLookup
+ExprEvalRowtypeCache
ExprEvalStep
ExprState
ExprStateEvalFunc
FD_SET
FILE
FILETIME
+FILE_INFORMATION_CLASS
+FILE_STANDARD_INFORMATION
FSMAddress
FSMPage
FSMPageData
FileNameMap
FileTag
FinalPathExtraData
+FindColsContext
FindSplitData
FindSplitStrat
FixedParallelExecutorState
ForFiveState
ForFourState
ForThreeState
+ForeignAsyncConfigureWait_function
+ForeignAsyncNotify_function
+ForeignAsyncRequest_function
ForeignDataWrapper
ForeignKeyCacheInfo
ForeignKeyOptInfo
FuncExpr
FuncInfo
FuncLookupError
-Function
FunctionCallInfo
FunctionCallInfoBaseData
FunctionParameter
GenericXLogState
GeqoPrivateData
GetForeignJoinPaths_function
+GetForeignModifyBatchSize_function
GetForeignPaths_function
GetForeignPlan_function
GetForeignRelSize_function
GinTernaryValue
GinTupleCollector
GinVacuumState
-GistBufferingMode
+GistBuildMode
GistEntryVector
GistHstoreOptions
GistInetKey
GistNSN
GistOptBufferingMode
+GistSortedBuildPageState
GistSplitUnion
GistSplitVector
GistTsVectorOptions
GrantStmt
GrantTargetType
Group
+GroupClause
GroupPath
GroupPathExtraData
GroupResultPath
HIST_ENTRY
HKEY
HLOCAL
+HMAC_CTX
HMODULE
HOldEntry
HRESULT
INT128
INTERFACE_INFO
IOFuncSelector
+IO_STATUS_BLOCK
IPCompareMethod
ITEM
IV
IndexBulkDeleteResult
IndexClause
IndexClauseSet
+IndexDeleteCounts
+IndexDeletePrefetchState
IndexElem
IndexFetchHeapData
IndexFetchTableData
InfoItem
InhInfo
InheritableSocket
-InheritanceKind
InitSampleScan_function
InitializeDSMForeignScan_function
InitializeWorkerForeignScan_function
IpcMemoryState
IpcSemaphoreId
IpcSemaphoreKey
+IsForeignPathAsyncCapable_function
IsForeignRelUpdatable_function
IsForeignScanParallelSafe_function
IspellDict
JsonbSubWorkspace
JsonbTypeCategory
JsonbValue
+JumbleState
JunkFilter
KeyArray
KeySuffix
LLVMMemoryBufferRef
LLVMModuleRef
LLVMOrcJITStackRef
-LLVMOrcLookupStateRef
LLVMOrcModuleHandle
LLVMOrcTargetAddress
LLVMPassManagerBuilderRef
LPSECURITY_ATTRIBUTES
LPSERVICE_STATUS
LPSTR
+LPTHREAD_START_ROUTINE
LPTSTR
LPVOID
LPWSTR
LSEG
LUID
LVDeadTuples
+LVPagePruneState
LVParallelState
-LVRelStats
+LVRelState
LVSavedErrInfo
LVShared
LVSharedIndStats
LWLock
LWLockHandle
-LWLockMinimallyPadded
LWLockMode
LWLockPadded
LabelProvider
LocalPgBackendStatus
LocalTransactionId
LocationIndex
+LocationLen
LockAcquireResult
LockClauseStrength
LockData
LogOpts
LogStmtLevel
LogicalDecodeBeginCB
+LogicalDecodeBeginPrepareCB
LogicalDecodeChangeCB
LogicalDecodeCommitCB
-LogicalDecodeFilterPrepareCB
-LogicalDecodeBeginPrepareCB
-LogicalDecodePrepareCB
LogicalDecodeCommitPreparedCB
-LogicalDecodeRollbackPreparedCB
LogicalDecodeFilterByOriginCB
+LogicalDecodeFilterPrepareCB
LogicalDecodeMessageCB
+LogicalDecodePrepareCB
+LogicalDecodeRollbackPreparedCB
LogicalDecodeShutdownCB
-LogicalDecodeStreamStartCB
-LogicalDecodeStreamStopCB
+LogicalDecodeStartupCB
LogicalDecodeStreamAbortCB
-LogicalDecodeStreamPrepareCB
-LogicalDecodeStreamCommitCB
LogicalDecodeStreamChangeCB
+LogicalDecodeStreamCommitCB
LogicalDecodeStreamMessageCB
-LogicalDecodeStartupCB
+LogicalDecodeStreamPrepareCB
+LogicalDecodeStreamStartCB
+LogicalDecodeStreamStopCB
+LogicalDecodeStreamTruncateCB
LogicalDecodeTruncateCB
LogicalDecodingContext
LogicalErrorCallbackState
LogicalRepBeginData
LogicalRepCommitData
LogicalRepCtxStruct
+LogicalRepMsgType
LogicalRepPartMapEntry
LogicalRepRelId
LogicalRepRelMapEntry
LogicalRepTupleData
LogicalRepTyp
LogicalRepWorker
-LogicalRepWorkerId
LogicalRewriteMappingData
LogicalTape
LogicalTapeSet
MINIDUMPWRITEDUMP
MINIDUMP_TYPE
MJEvalResult
+MTTargetRelLookup
MVDependencies
MVDependency
MVNDistinct
MinMaxAggInfo
MinMaxAggPath
MinMaxExpr
+MinMaxMultiOptions
MinMaxOp
MinimalTuple
MinimalTupleData
MinimalTupleTableSlot
+MinmaxMultiOpaque
MinmaxOpaque
ModifyTable
ModifyTablePath
MultirangeType
NDBOX
NODE
+NTSTATUS
NUMCacheEntry
NUMDesc
NUMProc
OidOptions
OkeysState
OldSnapshotControlData
+OldSnapshotTimeMapping
OldToNewMapping
OldToNewMappingData
OnCommitAction
PBOOL
PCtxtHandle
PFN
+PFN_NTQUERYINFORMATIONFILE
PGAlignedBlock
PGAlignedXLogBlock
PGAsyncStatusType
PGRUsage
PGSemaphore
PGSemaphoreData
-PGSetenvStatusType
PGShmemHeader
+PGTargetServerType
+PGTernaryBool
PGTransactionStatusType
PGVerbosity
PG_Locale_Strategy
PGresParamDesc
PGresult
PGresult_data
-PgArchData
PHANDLE
+PIO_STATUS_BLOCK
PLAINTREE
+PLAssignStmt
PLUID_AND_ATTRIBUTES
PLcword
-PLpgSQL_arrayelem
PLpgSQL_case_when
PLpgSQL_condition
PLpgSQL_datum
ParallelAppendState
ParallelBitmapHeapState
ParallelBlockTableScanDesc
+ParallelBlockTableScanWorker
+ParallelBlockTableScanWorkerData
ParallelCompletionPtr
ParallelContext
ParallelExecutorInfo
ParallelReadyList
ParallelSlot
ParallelSlotArray
+ParallelSlotResultHandler
ParallelState
ParallelTableScanDesc
ParallelTableScanDescData
PartitionRangeBound
PartitionRangeDatum
PartitionRangeDatumKind
-PartitionRoutingInfo
PartitionScheme
PartitionSpec
PartitionTupleRouting
PathKey
PathKeysComparison
PathTarget
+PatternInfo
+PatternInfoArray
Pattern_Prefix_Status
Pattern_Type
PendingFsyncEntry
Perl_check_t
Perl_ppaddr_t
Permutation
+PgArchData
PgBackendGSSStatus
PgBackendSSLStatus
PgBackendStatus
PgBenchValueType
PgChecksumMode
PgFdwAnalyzeState
+PgFdwConnState
PgFdwDirectModifyState
PgFdwModifyState
PgFdwOption
PgStat_GlobalStats
PgStat_Msg
PgStat_MsgAnalyze
+PgStat_MsgAnlAncestors
PgStat_MsgArchiver
PgStat_MsgAutovacStart
PgStat_MsgBgWriter
PgStat_MsgChecksumFailure
+PgStat_MsgConn
PgStat_MsgDeadlock
PgStat_MsgDropdb
PgStat_MsgDummy
PrefetchBufferResult
PrepParallelRestorePtrType
PrepareStmt
-PreparedParamsData
PreparedStatement
PresortedKeyData
PrewarmType
ProcSignalReason
ProcSignalSlot
ProcState
+ProcWaitStatus
ProcessUtilityContext
ProcessUtility_hook_type
ProcessingMode
QuerySource
QueueBackendStatus
QueuePosition
+QuitSignalReason
RBTNode
RBTOrderControl
RBTree
RangeType
RangeVar
RangeVarGetRelidCallback
+Ranges
RawColumnDefault
+RawParseMode
RawStmt
ReInitializeDSMForeignScan_function
ReScanForeignScan_function
RecordCompareData
RecordIOData
RecoveryLockListsEntry
+RecoveryPauseState
RecoveryState
RecoveryTargetTimeLineGoal
RecoveryTargetType
RelabelType
Relation
RelationData
+RelationInfo
RelationPtr
RelationSyncEntry
RelcacheCallbackFunction
ReorderBufferBeginCB
ReorderBufferChange
ReorderBufferCommitCB
+ReorderBufferCommitPreparedCB
ReorderBufferDiskChange
ReorderBufferIterTXNEntry
ReorderBufferIterTXNState
ReorderBufferMessageCB
+ReorderBufferPrepareCB
+ReorderBufferRollbackPreparedCB
+ReorderBufferStreamAbortCB
+ReorderBufferStreamChangeCB
+ReorderBufferStreamCommitCB
+ReorderBufferStreamMessageCB
+ReorderBufferStreamPrepareCB
+ReorderBufferStreamStartCB
+ReorderBufferStreamStopCB
+ReorderBufferStreamTruncateCB
ReorderBufferTXN
ReorderBufferTXNByIdEnt
ReorderBufferToastEnt
RestorePass
RestrictInfo
Result
+ResultCache
+ResultCacheEntry
+ResultCacheInstrumentation
+ResultCacheKey
+ResultCachePath
+ResultCacheState
+ResultCacheTuple
ResultRelInfo
ResultState
ReturnSetInfo
+ReturnStmt
RevmapContents
RewriteMappingDataEntry
RewriteMappingFile
RowCompareExpr
RowCompareType
RowExpr
+RowIdentityVarInfo
RowMarkClause
RowMarkType
RowSecurityDesc
SERVICE_STATUS
SERVICE_STATUS_HANDLE
SERVICE_TABLE_ENTRY
-SHA1_CTX
-SHA256_CTX
-SHA512_CTX
SHM_QUEUE
SID_AND_ATTRIBUTES
SID_IDENTIFIER_AUTHORITY
SOCKADDR
SOCKET
SPELL
+SPICallbackArg
+SPIExecuteOptions
+SPIParseOpenOptions
SPIPlanPtr
+SPIPrepareOptions
SPITupleTable
SPLITCOST
SPNode
SQLDropObject
SQLFunctionCache
SQLFunctionCachePtr
+SQLFunctionParseInfo
SQLFunctionParseInfoPtr
SQLValueFunction
SQLValueFunctionOp
SSL_CTX
STARTUPINFO
STRLEN
-ST_ELEMENT_TYPE
-ST_POINTER_TYPE
SV
+SYNCHRONIZATION_BARRIER
SampleScan
SampleScanGetSampleSize_function
SampleScanState
SamplerRandomState
ScalarArrayOpExpr
+ScalarArrayOpExprHashEntry
+ScalarArrayOpExprHashTable
ScalarIOData
ScalarItem
ScalarMCVItem
SerialControl
SerializableXactHandle
SerializedActiveRelMaps
+SerializedRanges
SerializedReindexState
SerializedSnapshotData
SerializedTransactionState
Session
SessionBackupState
+SessionEndType
SetConstraintState
SetConstraintStateData
SetConstraintTriggerData
SetOpStrategy
SetOperation
SetOperationStmt
+SetQuantifier
SetToDefault
SetupWorkerPtrType
ShDependObjectInfo
+SharedAggInfo
SharedBitmapState
SharedDependencyObjectType
SharedDependencyType
SharedRecordTableEntry
SharedRecordTableKey
SharedRecordTypmodRegistry
+SharedResultCacheInfo
SharedSortInfo
SharedTuplestore
SharedTuplestoreAccessor
SplitInterval
SplitLR
SplitPoint
+SplitTextOutputData
SplitVar
SplitedPageLayout
StackElem
StartBlobsPtrType
StartDataPtrType
StartReplicationCmd
-StartupPacket
StartupStatusEnum
StatEntry
StatExtEntry
StateFileChunk
StatisticExtInfo
Stats
+StatsBuildData
StatsData
+StatsElem
StatsExtInfo
StdAnalyzeData
StdRdOptions
SubXactCallbackItem
SubXactEvent
SubXactInfo
-SubplanResultRelHashElem
SubqueryScan
SubqueryScanPath
SubqueryScanState
+SubscriptExecSetup
+SubscriptExecSteps
+SubscriptRoutines
+SubscriptTransform
SubscriptingRef
SubscriptingRefState
Subscription
SyncRepStandbyData
SyncRequestHandler
SyncRequestType
+SysFKRelationship
SysScanDesc
SyscacheCallbackFunction
SystemRowsSamplerData
TBlockState
TIDBitmap
TM_FailureData
+TM_IndexDelete
+TM_IndexDeleteOp
+TM_IndexStatus
TM_Result
TOKEN_DEFAULT_DACL
TOKEN_INFORMATION_CLASS
TabStatHashEntry
TabStatusArray
TableAmRoutine
+TableAttachInfo
TableDataInfo
TableFunc
TableFuncRoutine
TheLexeme
TheSubstitute
TidExpr
+TidExprType
TidHashKey
+TidOpExpr
TidPath
+TidRangePath
+TidRangeScan
+TidRangeScanState
TidScan
TidScanState
TimeADT
TmFromChar
TmToChar
ToastAttrInfo
+ToastCompressionId
ToastTupleContext
ToastedAttribute
TocEntry
ViewStmt
VirtualTransactionId
VirtualTupleTableSlot
+VolatileFunctionStatus
Vsrt
WAIT_ORDER
WALAvailability
WCOKind
WFW_WaitOption
WIDGET
-WIN32_FILE_ATTRIBUTE_DATA
WORD
WORKSTATE
WSABUF
XactEvent
XactLockTableWaitInfo
XidBoundsViolation
+XidCacheStatus
XidCommitStatus
-XidHorizonPrefetchState
XidStatus
XmlExpr
XmlExprOp
__IsProcessInJob
__QueryInformationJobObject
__SetInformationJobObject
+__time64_t
+_dev_t
+_ino_t
_resultmap
_stringlist
-abs
acquireLocksOnSubLinks_context
adjust_appendrel_attrs_context
+aff_regex_struct
allocfunc
+amadjustmembers_function
ambeginscan_function
ambuild_function
ambuildempty_function
bits8
bloom_filter
brin_column_state
+brin_serialize_callback_type
bytea
cached_re_str
cashKEY
collation_cache_entry
color
colormaprange
+compare_context
config_var_value
contain_aggs_of_level_context
convert_testexpr_context
core_yyscan_t
corrupt_items
cost_qual_eval_context
+cp_hash_func
create_upper_paths_hook_type
createdb_failure_params
crosstab_HashEnt
file_entry_t
file_type_t
filehash_hash
+filehash_iterator
filemap_t
fill_string_relopt
finalize_primnode_context
generate_series_timestamp_fctx
generate_series_timestamptz_fctx
generate_subscripts_fctx
-get_agg_clause_costs_context
get_attavgwidth_hook_type
get_index_stats_hook_type
get_relation_info_hook_type
intset_leaf_node
intset_node
intvKEY
-itemIdSort
-itemIdSortData
+itemIdCompact
+itemIdCompactData
iterator
jmp_buf
join_search_hook_type
map_variable_attnos_context
max_parallel_hazard_context
mb2wchar_with_len_converter
+mbchar_verifier
mbcharacter_incrementer
mbdisplaylen_converter
mblen_converter
-mbverifier
-md5_ctxt
+mbstr_verifier
metastring
mix_data_t
mixedStruct
pg_enc2gettext
pg_enc2name
pg_encname
+pg_funcptr_t
pg_gssinfo
pg_hmac_ctx
pg_int64
pg_tz
pg_tz_cache
pg_tzenum
+pg_unicode_decompinfo
pg_unicode_decomposition
pg_unicode_norminfo
pg_unicode_normprops
+pg_unicode_recompinfo
pg_utf_to_local_combined
pg_uuid_t
pg_wc_probefunc
pgssEntry
pgssGlobalStats
pgssHashKey
-pgssJumbleState
-pgssLocationLen
pgssSharedState
pgssStoreKind
pgssVersion
polymorphic_actuals
pos_trgm
post_parse_analyze_hook_type
+postprocess_result_function
pqbool
pqsigfunc
printQueryOpt
proclist_mutable_iter
proclist_node
promptStatus_t
-pthread_attr_t
pthread_barrier_t
+pthread_cond_t
pthread_key_t
pthread_mutex_t
pthread_once_t
pull_vars_context
pullup_replace_vars_context
pushdown_safety_info
+qc_hash_func
qsort_arg_comparator
qsort_comparator
query_pathkeys_callback
rendezvousHashEntry
replace_rte_variables_callback
replace_rte_variables_context
+resultcache_hash
+resultcache_iterator
ret_type
rewind_source
rewrite_event
role_auth_extra
row_security_policy_hook_type
rsv_callback
+saophash_hash
save_buffer
scram_state
scram_state_enum
svtype
symbol
tablespaceinfo
-teReqs
teSection
temp_tablespaces_extra
-test_function
+test_re_flags
+test_regex_ctx
test_shm_mq_header
test_spec
+test_start_function
text
timeKEY
time_t
wchar2mb_with_len_converter
wchar_t
win32_deadchild_waitinfo
-win32_pthread
wint_t
worker_state
worktable
xl_btree_update
xl_btree_vacuum
xl_clog_truncate
-xl_commit_ts_set
xl_commit_ts_truncate
xl_dbase_create_rec
xl_dbase_drop_rec