perltidy run not included.
BloomFillMetapage(index, metapage);
/*
- * Write the page and log it. It might seem that an immediate sync
- * would be sufficient to guarantee that the file exists on disk, but
- * recovery itself might remove it while replaying, for example, an
- * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we
- * need this even when wal_level=minimal.
+ * Write the page and log it. It might seem that an immediate sync would
+ * be sufficient to guarantee that the file exists on disk, but recovery
+ * itself might remove it while replaying, for example, an
+ * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we need
+ * this even when wal_level=minimal.
*/
PageSetChecksumInplace(metapage, BLOOM_METAPAGE_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, BLOOM_METAPAGE_BLKNO,
bl_relopt_tab[i + 1].optname = MemoryContextStrdup(TopMemoryContext,
buf);
bl_relopt_tab[i + 1].opttype = RELOPT_TYPE_INT;
- bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) + sizeof(int) * i;
+ bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) +sizeof(int) * i;
}
}
cmp;
cmp = DatumGetInt32(CallerFInfoFunctionCall2(
- data->typecmp,
- fcinfo->flinfo,
- PG_GET_COLLATION(),
- (data->strategy == BTLessStrategyNumber ||
- data->strategy == BTLessEqualStrategyNumber)
- ? data->datum : a,
- b));
+ data->typecmp,
+ fcinfo->flinfo,
+ PG_GET_COLLATION(),
+ (data->strategy == BTLessStrategyNumber ||
+ data->strategy == BTLessEqualStrategyNumber)
+ ? data->datum : a,
+ b));
switch (data->strategy)
{
*/
-#define ENUM_IS_LEFTMOST(x) ((x) == InvalidOid)
+#define ENUM_IS_LEFTMOST(x) ((x) == InvalidOid)
PG_FUNCTION_INFO_V1(gin_enum_cmp);
Datum
gin_enum_cmp(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
- int res = 0;
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
+ int res = 0;
if (ENUM_IS_LEFTMOST(a))
{
else
{
res = DatumGetInt32(CallerFInfoFunctionCall2(
- enum_cmp,
- fcinfo->flinfo,
- PG_GET_COLLATION(),
- ObjectIdGetDatum(a),
- ObjectIdGetDatum(b)));
+ enum_cmp,
+ fcinfo->flinfo,
+ PG_GET_COLLATION(),
+ ObjectIdGetDatum(a),
+ ObjectIdGetDatum(b)));
}
PG_RETURN_INT32(res);
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
gbt_enumgt(const void *a, const void *b, FmgrInfo *flinfo)
{
return DatumGetBool(
- CallerFInfoFunctionCall2(enum_gt, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
+ CallerFInfoFunctionCall2(enum_gt, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
);
}
static bool
gbt_enumge(const void *a, const void *b, FmgrInfo *flinfo)
{
return DatumGetBool(
- CallerFInfoFunctionCall2(enum_ge, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
+ CallerFInfoFunctionCall2(enum_ge, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
);
}
static bool
return 0;
return DatumGetInt32(
- CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->upper), ObjectIdGetDatum(ib->upper))
+ CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->upper), ObjectIdGetDatum(ib->upper))
);
}
return DatumGetInt32(
- CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->lower), ObjectIdGetDatum(ib->lower))
+ CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->lower), ObjectIdGetDatum(ib->lower))
);
}
gbt_enumle,
gbt_enumlt,
gbt_enumkey_cmp,
- NULL /* no KNN support at least for now */
+ NULL /* no KNN support at least for now */
};
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_BOOL(gbt_num_consistent(&key, (void *) &query,
- &strategy, GIST_LEAF(entry), &tinfo, fcinfo->flinfo));
+ &strategy, GIST_LEAF(entry), &tinfo, fcinfo->flinfo));
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
qqq = tstz_to_ts_gmt(query);
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
/* Methods */
- bool (*f_gt) (const void *, const void *, FmgrInfo *); /* greater than */
- bool (*f_ge) (const void *, const void *, FmgrInfo *); /* greater or equal */
- bool (*f_eq) (const void *, const void *, FmgrInfo *); /* equal */
- bool (*f_le) (const void *, const void *, FmgrInfo *); /* less or equal */
- bool (*f_lt) (const void *, const void *, FmgrInfo *); /* less than */
- int (*f_cmp) (const void *, const void *, FmgrInfo *); /* key compare function */
- float8 (*f_dist) (const void *, const void *, FmgrInfo *); /* key distance function */
+ bool (*f_gt) (const void *, const void *, FmgrInfo *); /* greater than */
+ bool (*f_ge) (const void *, const void *, FmgrInfo *); /* greater or equal */
+ bool (*f_eq) (const void *, const void *, FmgrInfo *); /* equal */
+ bool (*f_le) (const void *, const void *, FmgrInfo *); /* less or equal */
+ bool (*f_lt) (const void *, const void *, FmgrInfo *); /* less than */
+ int (*f_cmp) (const void *, const void *, FmgrInfo *); /* key compare function */
+ float8 (*f_dist) (const void *, const void *, FmgrInfo *); /* key distance function */
} gbtree_ninfo;
{
const gbtree_vinfo *tinfo;
Oid collation;
- FmgrInfo *flinfo;
+ FmgrInfo *flinfo;
} gbt_vsrt_arg;
*res = 0.0;
else if (!(((*tinfo->f_cmp) (nk.lower, ok.lower, collation, flinfo) >= 0 ||
gbt_bytea_pf_match(ok.lower, nk.lower, tinfo)) &&
- ((*tinfo->f_cmp) (nk.upper, ok.upper, collation, flinfo) <= 0 ||
- gbt_bytea_pf_match(ok.upper, nk.upper, tinfo))))
+ ((*tinfo->f_cmp) (nk.upper, ok.upper, collation, flinfo) <= 0 ||
+ gbt_bytea_pf_match(ok.upper, nk.upper, tinfo))))
{
Datum d = PointerGetDatum(0);
double dres;
/* Methods */
- bool (*f_gt) (const void *, const void *, Oid, FmgrInfo *); /* greater than */
- bool (*f_ge) (const void *, const void *, Oid, FmgrInfo *); /* greater equal */
- bool (*f_eq) (const void *, const void *, Oid, FmgrInfo *); /* equal */
- bool (*f_le) (const void *, const void *, Oid, FmgrInfo *); /* less equal */
- bool (*f_lt) (const void *, const void *, Oid, FmgrInfo *); /* less than */
- int32 (*f_cmp) (const void *, const void *, Oid, FmgrInfo *); /* compare */
+ bool (*f_gt) (const void *, const void *, Oid, FmgrInfo *); /* greater than */
+ bool (*f_ge) (const void *, const void *, Oid, FmgrInfo *); /* greater equal */
+ bool (*f_eq) (const void *, const void *, Oid, FmgrInfo *); /* equal */
+ bool (*f_le) (const void *, const void *, Oid, FmgrInfo *); /* less equal */
+ bool (*f_lt) (const void *, const void *, Oid, FmgrInfo *); /* less than */
+ int32 (*f_cmp) (const void *, const void *, Oid, FmgrInfo *); /* compare */
GBT_VARKEY *(*f_l2n) (GBT_VARKEY *, FmgrInfo *flinfo); /* convert leaf to node */
} gbtree_vinfo;
PG_RETURN_BOOL(
gbt_num_consistent(&key, (void *) query, &strategy,
- GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+ GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
);
}
static void dblink_connstr_check(const char *connstr);
static void dblink_security_check(PGconn *conn, remoteConn *rconn);
static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
- const char *dblink_context_msg, bool fail);
+ const char *dblink_context_msg, bool fail);
static char *get_connect_string(const char *servername);
static char *escape_param_str(const char *from);
static void validate_pkattnums(Relation rel,
return pstrdup(in);
}
-static void pg_attribute_noreturn()
+static void
+pg_attribute_noreturn()
dblink_res_internalerror(PGconn *conn, PGresult *res, const char *p2)
{
char *msg = pchomp(PQerrorMessage(conn));
+
if (res)
PQclear(res);
elog(ERROR, "%s: %s", p2, msg);
}
-static void pg_attribute_noreturn()
+static void
+pg_attribute_noreturn()
dblink_conn_not_avail(const char *conname)
{
if (conname)
static void
dblink_get_conn(char *conname_or_str,
- PGconn * volatile *conn_p, char **conname_p, volatile bool *freeconn_p)
+ PGconn *volatile * conn_p, char **conname_p, volatile bool *freeconn_p)
{
remoteConn *rconn = getConnectionByName(conname_or_str);
PGconn *conn;
if (PQstatus(conn) == CONNECTION_BAD)
{
char *msg = pchomp(PQerrorMessage(conn));
+
PQfinish(conn);
ereport(ERROR,
- (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
- errmsg("could not establish connection"),
- errdetail_internal("%s", msg)));
+ (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
+ errmsg("could not establish connection"),
+ errdetail_internal("%s", msg)));
}
dblink_security_check(conn, rconn);
if (PQclientEncoding(conn) != GetDatabaseEncoding())
dblink_get_named_conn(const char *conname)
{
remoteConn *rconn = getConnectionByName(conname);
+
if (rconn)
return rconn->conn;
dblink_conn_not_avail(conname);
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
}
static void
message_context = xpstrdup(pg_diag_context);
/*
- * If we don't get a message from the PGresult, try the PGconn. This
- * is needed because for connection-level failures, PQexec may just
- * return NULL, not a PGresult at all.
+ * If we don't get a message from the PGresult, try the PGconn. This is
+ * needed because for connection-level failures, PQexec may just return
+ * NULL, not a PGresult at all.
*/
if (message_primary == NULL)
message_primary = pchomp(PQerrorMessage(conn));
ForeignServer *foreign_server = NULL;
UserMapping *user_mapping;
ListCell *cell;
- StringInfoData buf;
+ StringInfoData buf;
ForeignDataWrapper *fdw;
AclResult aclresult;
char *srvname;
escape_param_str(const char *str)
{
const char *cp;
- StringInfoData buf;
+ StringInfoData buf;
initStringInfo(&buf);
todo = psprintf(
"SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
"FROM pg_catalog.pg_class c\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
" LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
" pg_catalog.pg_tablespace t\n"
"WHERE relkind IN (" CppAsString2(RELKIND_RELATION) ","
if (ItemIdIsUsed(itemId))
{
dtup = brin_deform_tuple(bdesc,
- (BrinTuple *) PageGetItem(page, itemId),
- NULL);
+ (BrinTuple *) PageGetItem(page, itemId),
+ NULL);
attno = 1;
unusedItem = false;
}
*/
typedef struct HashPageStat
{
- int live_items;
- int dead_items;
- int page_size;
- int free_size;
+ int live_items;
+ int dead_items;
+ int page_size;
+ int free_size;
/* opaque data */
BlockNumber hasho_prevblkno;
Bucket hasho_bucket;
uint16 hasho_flag;
uint16 hasho_page_id;
-} HashPageStat;
+} HashPageStat;
/*
case LH_BUCKET_PAGE | LH_OVERFLOW_PAGE:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("page is not a hash bucket or overflow page")));
+ errmsg("page is not a hash bucket or overflow page")));
case LH_OVERFLOW_PAGE:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
default:
elog(ERROR,
"hash page of type %08x not in mask %08x",
- pagetype, flags);
+ pagetype, flags);
}
}
* -------------------------------------------------
*/
static void
-GetHashPageStatistics(Page page, HashPageStat * stat)
+GetHashPageStatistics(Page page, HashPageStat *stat)
{
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
HashPageOpaque opaque = (HashPageOpaque) PageGetSpecialPointer(page);
j;
Datum values[16];
bool nulls[16];
- Datum spares[HASH_MAX_SPLITPOINTS];
- Datum mapp[HASH_MAX_BITMAPS];
+ Datum spares[HASH_MAX_SPLITPOINTS];
+ Datum mapp[HASH_MAX_BITMAPS];
if (!superuser())
ereport(ERROR,
if (raw_page_size != BLCKSZ)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("incorrect size of input page (%d bytes)", raw_page_size)));
+ errmsg("incorrect size of input page (%d bytes)", raw_page_size)));
page = (PageHeader) VARDATA(raw_page);
- PG_RETURN_INT16(pg_checksum_page((char *)page, blkno));
+ PG_RETURN_INT16(pg_checksum_page((char *) page, blkno));
}
char *nextWALFileName; /* the file we need to get from archive */
char *restartWALFileName; /* the file from which we can restart restore */
char *priorWALFileName; /* the file we need to get from archive */
-char WALFilePath[MAXPGPATH * 2]; /* the file path including archive */
+char WALFilePath[MAXPGPATH * 2]; /* the file path including archive */
char restoreCommand[MAXPGPATH]; /* run this to restore */
char exclusiveCleanupFileName[MAXFNAMELEN]; /* the file we need to
* get from archive */
rel->rd_rel->relkind != RELKIND_TOASTVALUE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table, materialized view, or TOAST table",
- RelationGetRelationName(rel))));
+ errmsg("\"%s\" is not a table, materialized view, or TOAST table",
+ RelationGetRelationName(rel))));
}
* prototype for the EVP functions that return an algorithm, e.g.
* EVP_aes_128_cbc().
*/
-typedef const EVP_CIPHER *(*ossl_EVP_cipher_func)(void);
+typedef const EVP_CIPHER *(*ossl_EVP_cipher_func) (void);
/*
* ossl_cipher contains the static information about each cipher.
static const struct ossl_cipher ossl_aes_ecb = {
ossl_aes_ecb_init,
- NULL, /* EVP_aes_XXX_ecb(), determined in init function */
+ NULL, /* EVP_aes_XXX_ecb(), determined in init
+ * function */
128 / 8, 256 / 8
};
static const struct ossl_cipher ossl_aes_cbc = {
ossl_aes_cbc_init,
- NULL, /* EVP_aes_XXX_cbc(), determined in init function */
+ NULL, /* EVP_aes_XXX_cbc(), determined in init
+ * function */
128 / 8, 256 / 8
};
uint8 *buf = (uint8 *) palloc(UUID_LEN);
/*
- * Generate random bits. pg_backend_random() will do here, we don't
- * promis UUIDs to be cryptographically random, when built with
+ * Generate random bits. pg_backend_random() will do here, we don't promis
+ * UUIDs to be cryptographically random, when built with
* --disable-strong-random.
*/
if (!pg_backend_random((char *) buf, UUID_LEN))
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = heap_openrv(relrv, AccessShareLock);
- /* check permissions: must have SELECT on table or be in pg_stat_scan_tables */
+ /*
+ * check permissions: must have SELECT on table or be in
+ * pg_stat_scan_tables
+ */
aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
ACL_SELECT);
if (aclresult != ACLCHECK_OK)
PG_FUNCTION_INFO_V1(pgstattuple_approx);
PG_FUNCTION_INFO_V1(pgstattuple_approx_v1_5);
-Datum pgstattuple_approx_internal(Oid relid, FunctionCallInfo fcinfo);
+Datum pgstattuple_approx_internal(Oid relid, FunctionCallInfo fcinfo);
typedef struct output_type
{
PG_FUNCTION_INFO_V1(pg_relpagesbyid_v1_5);
PG_FUNCTION_INFO_V1(pgstatginindex_v1_5);
-Datum pgstatginindex_internal(Oid relid, FunctionCallInfo fcinfo);
+Datum pgstatginindex_internal(Oid relid, FunctionCallInfo fcinfo);
#define IS_INDEX(r) ((r)->rd_rel->relkind == RELKIND_INDEX)
#define IS_BTREE(r) ((r)->rd_rel->relam == BTREE_AM_OID)
*/
typedef struct HashIndexStat
{
- int32 version;
- int32 space_per_page;
+ int32 version;
+ int32 space_per_page;
- BlockNumber bucket_pages;
+ BlockNumber bucket_pages;
BlockNumber overflow_pages;
BlockNumber bitmap_pages;
BlockNumber unused_pages;
- int64 live_items;
- int64 dead_items;
- uint64 free_space;
+ int64 live_items;
+ int64 dead_items;
+ uint64 free_space;
} HashIndexStat;
static Datum pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo);
pgstathashindex(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- BlockNumber nblocks;
- BlockNumber blkno;
+ BlockNumber nblocks;
+ BlockNumber blkno;
Relation rel;
HashIndexStat stats;
BufferAccessStrategy bstrategy;
Datum values[8];
bool nulls[8];
Buffer metabuf;
- HashMetaPage metap;
+ HashMetaPage metap;
float8 free_percent;
uint64 total_space;
MAXALIGN(sizeof(HashPageOpaqueData)))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("index \"%s\" contains corrupted page at block %u",
- RelationGetRelationName(rel),
- BufferGetBlockNumber(buf))));
+ errmsg("index \"%s\" contains corrupted page at block %u",
+ RelationGetRelationName(rel),
+ BufferGetBlockNumber(buf))));
else
{
- HashPageOpaque opaque;
- int pagetype;
+ HashPageOpaque opaque;
+ int pagetype;
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
pagetype = opaque->hasho_flag & LH_PAGE_TYPE;
else
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("unexpected page type 0x%04X in HASH index \"%s\" block %u",
+ errmsg("unexpected page type 0x%04X in HASH index \"%s\" block %u",
opaque->hasho_flag, RelationGetRelationName(rel),
- BufferGetBlockNumber(buf))));
+ BufferGetBlockNumber(buf))));
}
UnlockReleaseBuffer(buf);
}
GetHashPageStats(Page page, HashIndexStat *stats)
{
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
- int off;
+ int off;
/* count live and dead tuples, and free space */
for (off = FirstOffsetNumber; off <= maxoff; off++)
{
- ItemId id = PageGetItemId(page, off);
+ ItemId id = PageGetItemId(page, off);
if (!ItemIdIsDead(id))
stats->live_items++;
RelOptInfo *joinrel, bool use_alias, List **params_list);
static void deparseFromExpr(List *quals, deparse_expr_cxt *context);
static void deparseRangeTblRef(StringInfo buf, PlannerInfo *root,
- RelOptInfo *foreignrel, bool make_subquery,
- List **params_list);
+ RelOptInfo *foreignrel, bool make_subquery,
+ List **params_list);
static void deparseAggref(Aggref *node, deparse_expr_cxt *context);
static void appendGroupByClause(List *tlist, deparse_expr_cxt *context);
static void appendAggOrderBy(List *orderList, List *targetList,
* Helper functions
*/
static bool is_subquery_var(Var *node, RelOptInfo *foreignrel,
- int *relno, int *colno);
+ int *relno, int *colno);
static void get_relation_column_alias_ids(Var *node, RelOptInfo *foreignrel,
- int *relno, int *colno);
+ int *relno, int *colno);
/*
{
/*
* For a relation that is deparsed as a subquery, emit expressions
- * specified in the relation's reltarget. Note that since this is
- * for the subquery, no need to care about *retrieved_attrs.
+ * specified in the relation's reltarget. Note that since this is for
+ * the subquery, no need to care about *retrieved_attrs.
*/
deparseSubqueryTargetList(context);
}
/*
* If the Var belongs to the foreign relation that is deparsed as a
- * subquery, use the relation and column alias to the Var provided
- * by the subquery, instead of the remote name.
+ * subquery, use the relation and column alias to the Var provided by the
+ * subquery, instead of the remote name.
*/
if (is_subquery_var(node, context->scanrel, &relno, &colno))
{
static void apply_server_options(PgFdwRelationInfo *fpinfo);
static void apply_table_options(PgFdwRelationInfo *fpinfo);
static void merge_fdw_options(PgFdwRelationInfo *fpinfo,
- const PgFdwRelationInfo *fpinfo_o,
- const PgFdwRelationInfo *fpinfo_i);
+ const PgFdwRelationInfo *fpinfo_o,
+ const PgFdwRelationInfo *fpinfo_i);
/*
fpinfo->jointype = jointype;
/*
- * By default, both the input relations are not required to be deparsed
- * as subqueries, but there might be some relations covered by the input
+ * By default, both the input relations are not required to be deparsed as
+ * subqueries, but there might be some relations covered by the input
* relations that are required to be deparsed as subqueries, so save the
* relids of those relations for later use by the deparser.
*/
case JOIN_FULL:
/*
- * In this case, if any of the input relations has conditions,
- * we need to deparse that relation as a subquery so that the
+ * In this case, if any of the input relations has conditions, we
+ * need to deparse that relation as a subquery so that the
* conditions can be evaluated before the join. Remember it in
* the fpinfo of this relation so that the deparser can take
* appropriate action. Also, save the relids of base relations
* Note that since this joinrel is at the end of the join_rel_list list
* when we are called, we can get the position by list_length.
*/
- Assert(fpinfo->relation_index == 0); /* shouldn't be set yet */
+ Assert(fpinfo->relation_index == 0); /* shouldn't be set yet */
fpinfo->relation_index =
list_length(root->parse->rtable) + list_length(root->join_rel_list);
static void
apply_server_options(PgFdwRelationInfo *fpinfo)
{
- ListCell *lc;
+ ListCell *lc;
foreach(lc, fpinfo->server->options)
{
static void
apply_table_options(PgFdwRelationInfo *fpinfo)
{
- ListCell *lc;
+ ListCell *lc;
foreach(lc, fpinfo->table->options)
{
* best.
*/
fpinfo->use_remote_estimate = fpinfo_o->use_remote_estimate ||
- fpinfo_i->use_remote_estimate;
+ fpinfo_i->use_remote_estimate;
/*
* Set fetch size to maximum of the joining sides, since we are
fpinfo->table = ifpinfo->table;
fpinfo->server = ifpinfo->server;
fpinfo->user = ifpinfo->user;
- merge_fdw_options(fpinfo, ifpinfo , NULL);
+ merge_fdw_options(fpinfo, ifpinfo, NULL);
/* Assess if it is safe to push down aggregation and grouping. */
if (!foreign_grouping_ok(root, grouped_rel))
List *grouped_tlist;
/* Subquery information */
- bool make_outerrel_subquery; /* do we deparse outerrel as a
+ bool make_outerrel_subquery; /* do we deparse outerrel as a
* subquery? */
- bool make_innerrel_subquery; /* do we deparse innerrel as a
+ bool make_innerrel_subquery; /* do we deparse innerrel as a
* subquery? */
Relids lower_subquery_rels; /* all relids appearing in lower
* subqueries */
MemoryContext oldcxt;
MemoryContext perRangeCxt;
BrinMemTuple *dtup;
- BrinTuple *btup = NULL;
+ BrinTuple *btup = NULL;
Size btupsz = 0;
opaque = (BrinOpaque *) scan->opaque;
Datum
brin_desummarize_range(PG_FUNCTION_ARGS)
{
- Oid indexoid = PG_GETARG_OID(0);
- int64 heapBlk64 = PG_GETARG_INT64(1);
+ Oid indexoid = PG_GETARG_OID(0);
+ int64 heapBlk64 = PG_GETARG_INT64(1);
BlockNumber heapBlk;
- Oid heapoid;
- Relation heapRel;
- Relation indexRel;
- bool done;
+ Oid heapoid;
+ Relation heapRel;
+ Relation indexRel;
+ bool done;
if (heapBlk64 > MaxBlockNumber || heapBlk64 < 0)
{
RelationGetRelationName(indexRel))));
/* the revmap does the hard work */
- do {
+ do
+ {
done = brinRevmapDesummarizeRange(indexRel, heapBlk);
}
while (!done);
brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
{
BrinRevmap *revmap;
- BlockNumber pagesPerRange;
+ BlockNumber pagesPerRange;
RevmapContents *contents;
ItemPointerData *iptr;
- ItemPointerData invalidIptr;
- BlockNumber revmapBlk;
+ ItemPointerData invalidIptr;
+ BlockNumber revmapBlk;
Buffer revmapBuf;
Buffer regBuf;
Page revmapPg;
if (RelationNeedsWAL(idxrel))
{
xl_brin_desummarize xlrec;
- XLogRecPtr recptr;
+ XLogRecPtr recptr;
xlrec.pagesPerRange = revmap->rm_pagesPerRange;
xlrec.heapBlk = heapBlk;
action = XLogReadBufferForRedo(record, 0, &buffer);
if (action == BLK_NEEDS_REDO)
{
- ItemPointerData iptr;
+ ItemPointerData iptr;
ItemPointerSetInvalid(&iptr);
brinSetHeapBlockItemptr(buffer, xlrec->pagesPerRange, xlrec->heapBlk, iptr);
action = XLogReadBufferForRedo(record, 1, &buffer);
if (action == BLK_NEEDS_REDO)
{
- Page regPg = BufferGetPage(buffer);
+ Page regPg = BufferGetPage(buffer);
PageIndexTupleDeleteNoCompact(regPg, xlrec->regOffset);
case INT4OID:
{
- int32 num = DatumGetInt32(value);
- char str[12]; /* sign, 10 digits and '\0' */
+ int32 num = DatumGetInt32(value);
+ char str[12]; /* sign, 10 digits and '\0' */
pg_ltoa(num, str);
pq_sendcountedtext(&buf, str, strlen(str), false);
case INT8OID:
{
- int64 num = DatumGetInt64(value);
- char str[23]; /* sign, 21 digits and '\0' */
+ int64 num = DatumGetInt64(value);
+ char str[23]; /* sign, 21 digits and '\0' */
pg_lltoa(num, str);
pq_sendcountedtext(&buf, str, strlen(str), false);
* exclusive cleanup lock. This guarantees that no insertions currently
* happen in this subtree. Caller also acquire Exclusive lock on deletable
* page and is acquiring and releasing exclusive lock on left page before.
- * Left page was locked and released. Then parent and this page are locked.
- * We acquire left page lock here only to mark page dirty after changing
- * right pointer.
+ * Left page was locked and released. Then parent and this page are
+ * locked. We acquire left page lock here only to mark page dirty after
+ * changing right pointer.
*/
lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
RBM_NORMAL, gvs->strategy);
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
RBM_NORMAL, gvs->strategy);
- if(!isRoot)
+ if (!isRoot)
LockBuffer(buffer, GIN_EXCLUSIVE);
page = BufferGetPage(buffer);
}
}
- if(!isRoot)
- LockBuffer(buffer, GIN_UNLOCK);
+ if (!isRoot)
+ LockBuffer(buffer, GIN_UNLOCK);
ReleaseBuffer(buffer);
RBM_NORMAL, gvs->strategy);
page = BufferGetPage(buffer);
- ginTraverseLock(buffer,false);
+ ginTraverseLock(buffer, false);
Assert(GinPageIsData(page));
}
else
{
- OffsetNumber i;
- bool hasEmptyChild = FALSE;
- bool hasNonEmptyChild = FALSE;
- OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
- BlockNumber* children = palloc(sizeof(BlockNumber) * (maxoff + 1));
+ OffsetNumber i;
+ bool hasEmptyChild = FALSE;
+ bool hasNonEmptyChild = FALSE;
+ OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
+ BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1));
/*
- * Read all children BlockNumbers.
- * Not sure it is safe if there are many concurrent vacuums.
+ * Read all children BlockNumbers. Not sure it is safe if there are
+ * many concurrent vacuums.
*/
for (i = FirstOffsetNumber; i <= maxoff; i++)
vacuum_delay_point();
/*
- * All subtree is empty - just return TRUE to indicate that parent must
- * do a cleanup. Unless we are ROOT an there is way to go upper.
+ * All subtree is empty - just return TRUE to indicate that parent
+ * must do a cleanup. Unless we are ROOT an there is way to go upper.
*/
- if(hasEmptyChild && !hasNonEmptyChild && !isRoot)
+ if (hasEmptyChild && !hasNonEmptyChild && !isRoot)
return TRUE;
- if(hasEmptyChild)
+ if (hasEmptyChild)
{
DataPageDeleteStack root,
*ptr,
*tmp;
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
- RBM_NORMAL, gvs->strategy);
+ RBM_NORMAL, gvs->strategy);
LockBufferForCleanup(buffer);
memset(&root, 0, sizeof(DataPageDeleteStack));
- root.leftBlkno = InvalidBlockNumber;
- root.isRoot = TRUE;
+ root.leftBlkno = InvalidBlockNumber;
+ root.isRoot = TRUE;
ginScanToDelete(gvs, blkno, TRUE, &root, InvalidOffsetNumber);
if (scan->kill_prior_tuple)
{
/*
- * Yes, so remember it for later. (We'll deal with all such
- * tuples at once right after leaving the index page or at
- * end of scan.) In case if caller reverses the indexscan
- * direction it is quite possible that the same item might
- * get entered multiple times. But, we don't detect that;
- * instead, we just forget any excess entries.
+ * Yes, so remember it for later. (We'll deal with all such tuples
+ * at once right after leaving the index page or at end of scan.)
+ * In case if caller reverses the indexscan direction it is quite
+ * possible that the same item might get entered multiple times.
+ * But, we don't detect that; instead, we just forget any excess
+ * entries.
*/
if (so->killedItems == NULL)
so->killedItems = palloc(MaxIndexTuplesPerPage *
{
so->killedItems[so->numKilled].heapTid = so->hashso_heappos;
so->killedItems[so->numKilled].indexOffset =
- ItemPointerGetOffsetNumber(&(so->hashso_curpos));
+ ItemPointerGetOffsetNumber(&(so->hashso_curpos));
so->numKilled++;
}
}
Relation rel = scan->indexRelation;
/*
- * Before leaving current page, deal with any killed items.
- * Also, ensure that we acquire lock on current page before
- * calling _hash_kill_items.
+ * Before leaving current page, deal with any killed items. Also, ensure
+ * that we acquire lock on current page before calling _hash_kill_items.
*/
if (so->numKilled > 0)
{
Relation rel = scan->indexRelation;
/*
- * Before leaving current page, deal with any killed items.
- * Also, ensure that we acquire lock on current page before
- * calling _hash_kill_items.
+ * Before leaving current page, deal with any killed items. Also, ensure
+ * that we acquire lock on current page before calling _hash_kill_items.
*/
if (so->numKilled > 0)
{
/*
* Let us mark the page as clean if vacuum removes the DEAD tuples
- * from an index page. We do this by clearing LH_PAGE_HAS_DEAD_TUPLES
- * flag.
+ * from an index page. We do this by clearing
+ * LH_PAGE_HAS_DEAD_TUPLES flag.
*/
if (tuples_removed && *tuples_removed > 0 &&
H_HAS_DEAD_TUPLES(opaque))
static TransactionId
hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
{
- xl_hash_vacuum_one_page *xlrec;
- OffsetNumber *unused;
+ xl_hash_vacuum_one_page *xlrec;
+ OffsetNumber *unused;
Buffer ibuffer,
hbuffer;
Page ipage,
hpage;
- RelFileNode rnode;
- BlockNumber blkno;
+ RelFileNode rnode;
+ BlockNumber blkno;
ItemId iitemid,
hitemid;
IndexTuple itup;
- HeapTupleHeader htuphdr;
- BlockNumber hblkno;
- OffsetNumber hoffnum;
- TransactionId latestRemovedXid = InvalidTransactionId;
- int i;
+ HeapTupleHeader htuphdr;
+ BlockNumber hblkno;
+ OffsetNumber hoffnum;
+ TransactionId latestRemovedXid = InvalidTransactionId;
+ int i;
xlrec = (xl_hash_vacuum_one_page *) XLogRecGetData(record);
return latestRemovedXid;
/*
- * Check if WAL replay has reached a consistent database state. If not,
- * we must PANIC. See the definition of btree_xlog_delete_get_latestRemovedXid
- * for more details.
+ * Check if WAL replay has reached a consistent database state. If not, we
+ * must PANIC. See the definition of
+ * btree_xlog_delete_get_latestRemovedXid for more details.
*/
if (!reachedConsistency)
elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data");
static void
hash_xlog_vacuum_one_page(XLogReaderState *record)
{
- XLogRecPtr lsn = record->EndRecPtr;
+ XLogRecPtr lsn = record->EndRecPtr;
xl_hash_vacuum_one_page *xldata;
- Buffer buffer;
- Buffer metabuf;
- Page page;
+ Buffer buffer;
+ Buffer metabuf;
+ Page page;
XLogRedoAction action;
HashPageOpaque pageopaque;
if (InHotStandby)
{
TransactionId latestRemovedXid =
- hash_xlog_vacuum_get_latestRemovedXid(record);
+ hash_xlog_vacuum_get_latestRemovedXid(record);
RelFileNode rnode;
XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
}
/*
- * Mark the page as not containing any LP_DEAD items. See comments
- * in _hash_vacuum_one_page() for details.
+ * Mark the page as not containing any LP_DEAD items. See comments in
+ * _hash_vacuum_one_page() for details.
*/
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
{
- Page metapage;
+ Page metapage;
HashMetaPage metap;
metapage = BufferGetPage(metabuf);
#include "storage/buf_internals.h"
static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
- RelFileNode hnode);
+ RelFileNode hnode);
/*
* _hash_doinsert() -- Handle insertion of a single index tuple.
/*
* Read the metapage. We don't lock it yet; HashMaxItemSize() will
- * examine pd_pagesize_version, but that can't change so we can examine
- * it without a lock.
+ * examine pd_pagesize_version, but that can't change so we can examine it
+ * without a lock.
*/
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE);
metapage = BufferGetPage(metabuf);
BlockNumber nextblkno;
/*
- * Check if current page has any DEAD tuples. If yes,
- * delete these tuples and see if we can get a space for
- * the new item to be inserted before moving to the next
- * page in the bucket chain.
+ * Check if current page has any DEAD tuples. If yes, delete these
+ * tuples and see if we can get a space for the new item to be
+ * inserted before moving to the next page in the bucket chain.
*/
if (H_HAS_DEAD_TUPLES(pageopaque))
{
_hash_vacuum_one_page(rel, metabuf, buf, heapRel->rd_node);
if (PageGetFreeSpace(page) >= itemsz)
- break; /* OK, now we have enough space */
+ break; /* OK, now we have enough space */
}
}
_hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
RelFileNode hnode)
{
- OffsetNumber deletable[MaxOffsetNumber];
- int ndeletable = 0;
+ OffsetNumber deletable[MaxOffsetNumber];
+ int ndeletable = 0;
OffsetNumber offnum,
- maxoff;
- Page page = BufferGetPage(buf);
- HashPageOpaque pageopaque;
- HashMetaPage metap;
+ maxoff;
+ Page page = BufferGetPage(buf);
+ HashPageOpaque pageopaque;
+ HashMetaPage metap;
/* Scan each tuple in page to see if it is marked as LP_DEAD */
maxoff = PageGetMaxOffsetNumber(page);
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
- ItemId itemId = PageGetItemId(page, offnum);
+ ItemId itemId = PageGetItemId(page, offnum);
if (ItemIdIsDead(itemId))
deletable[ndeletable++] = offnum;
if (ndeletable > 0)
{
/*
- * Write-lock the meta page so that we can decrement
- * tuple count.
+ * Write-lock the meta page so that we can decrement tuple count.
*/
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
* Mark the page as not containing any LP_DEAD items. This is not
* certainly true (there might be some that have recently been marked,
* but weren't included in our target-item list), but it will almost
- * always be true and it doesn't seem worth an additional page scan
- * to check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
+ * always be true and it doesn't seem worth an additional page scan to
+ * check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
* anyway.
*/
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
/* XLOG stuff */
if (RelationNeedsWAL(rel))
{
- xl_hash_vacuum_one_page xlrec;
+ xl_hash_vacuum_one_page xlrec;
XLogRecPtr recptr;
xlrec.hnode = hnode;
XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage);
/*
- * We need the target-offsets array whether or not we store the whole
- * buffer, to allow us to find the latestRemovedXid on a standby
- * server.
+ * We need the target-offsets array whether or not we store the
+ * whole buffer, to allow us to find the latestRemovedXid on a
+ * standby server.
*/
XLogRegisterData((char *) deletable,
- ndeletable * sizeof(OffsetNumber));
+ ndeletable * sizeof(OffsetNumber));
XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
}
END_CRIT_SECTION();
+
/*
- * Releasing write lock on meta page as we have updated
- * the tuple count.
+ * Releasing write lock on meta page as we have updated the tuple
+ * count.
*/
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
}
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
/*
- * Set hasho_prevblkno with current hashm_maxbucket. This value will
- * be used to validate cached HashMetaPageData. See
+ * Set hasho_prevblkno with current hashm_maxbucket. This value will be
+ * used to validate cached HashMetaPageData. See
* _hash_getbucketbuf_from_hashkey().
*/
pageopaque->hasho_prevblkno = max_bucket;
* Choose the number of initial bucket pages to match the fill factor
* given the estimated number of tuples. We round up the result to the
* total number of buckets which has to be allocated before using its
- * _hashm_spare element. However always force at least 2 bucket pages.
- * The upper limit is determined by considerations explained in
+ * _hashm_spare element. However always force at least 2 bucket pages. The
+ * upper limit is determined by considerations explained in
* _hash_expandtable().
*/
dnumbuckets = num_tuples / ffactor;
metap->hashm_maxbucket = num_buckets - 1;
/*
- * Set highmask as next immediate ((2 ^ x) - 1), which should be sufficient
- * to cover num_buckets.
+ * Set highmask as next immediate ((2 ^ x) - 1), which should be
+ * sufficient to cover num_buckets.
*/
metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
{
/*
* Copy bucket mapping info now; refer to the comment in code below
- * where we copy this information before calling _hash_splitbucket
- * to see why this is okay.
+ * where we copy this information before calling _hash_splitbucket to
+ * see why this is okay.
*/
maxbucket = metap->hashm_maxbucket;
highmask = metap->hashm_highmask;
* We treat allocation of buckets as a separate WAL-logged action.
* Even if we fail after this operation, won't leak bucket pages;
* rather, the next split will consume this space. In any case, even
- * without failure we don't use all the space in one split
- * operation.
+ * without failure we don't use all the space in one split operation.
*/
buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
/*
* Mark the old bucket to indicate that split is in progress. (At
- * operation end, we will clear the split-in-progress flag.) Also,
- * for a primary bucket page, hasho_prevblkno stores the number of
- * buckets that existed as of the last split, so we must update that
- * value here.
+ * operation end, we will clear the split-in-progress flag.) Also, for a
+ * primary bucket page, hasho_prevblkno stores the number of buckets that
+ * existed as of the last split, so we must update that value here.
*/
oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
oopaque->hasho_prevblkno = maxbucket;
/*
* Initialize the page. Just zeroing the page won't work; see
- * _hash_freeovflpage for similar usage. We take care to make the
- * special space valid for the benefit of tools such as pageinspect.
+ * _hash_freeovflpage for similar usage. We take care to make the special
+ * space valid for the benefit of tools such as pageinspect.
*/
_hash_pageinit(page, BLCKSZ);
* _hash_getcachedmetap() -- Returns cached metapage data.
*
* If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
- * the metapage. If not set, we'll set it before returning if we have to
- * refresh the cache, and return with a pin but no lock on it; caller is
- * responsible for releasing the pin.
+ * the metapage. If not set, we'll set it before returning if we have to
+ * refresh the cache, and return with a pin but no lock on it; caller is
+ * responsible for releasing the pin.
*
- * We refresh the cache if it's not initialized yet or force_refresh is true.
+ * We refresh the cache if it's not initialized yet or force_refresh is true.
*/
HashMetaPage
_hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
Assert(metabuf);
if (force_refresh || rel->rd_amcache == NULL)
{
- char *cache = NULL;
+ char *cache = NULL;
/*
- * It's important that we don't set rd_amcache to an invalid
- * value. Either MemoryContextAlloc or _hash_getbuf could fail,
- * so don't install a pointer to the newly-allocated storage in the
- * actual relcache entry until both have succeeeded.
+ * It's important that we don't set rd_amcache to an invalid value.
+ * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
+ * install a pointer to the newly-allocated storage in the actual
+ * relcache entry until both have succeeeded.
*/
if (rel->rd_amcache == NULL)
cache = MemoryContextAlloc(rel->rd_indexcxt,
* us an opportunity to use the previously saved metapage contents to reach
* the target bucket buffer, instead of reading from the metapage every time.
* This saves one buffer access every time we want to reach the target bucket
- * buffer, which is very helpful savings in bufmgr traffic and contention.
+ * buffer, which is very helpful savings in bufmgr traffic and contention.
*
* The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
* bucket buffer has to be locked for reading or writing.
void
_hash_kill_items(IndexScanDesc scan)
{
- HashScanOpaque so = (HashScanOpaque) scan->opaque;
- Page page;
- HashPageOpaque opaque;
- OffsetNumber offnum, maxoff;
- int numKilled = so->numKilled;
- int i;
- bool killedsomething = false;
+ HashScanOpaque so = (HashScanOpaque) scan->opaque;
+ Page page;
+ HashPageOpaque opaque;
+ OffsetNumber offnum,
+ maxoff;
+ int numKilled = so->numKilled;
+ int i;
+ bool killedsomething = false;
Assert(so->numKilled > 0);
Assert(so->killedItems != NULL);
/*
- * Always reset the scan state, so we don't look for same
- * items on other pages.
+ * Always reset the scan state, so we don't look for same items on other
+ * pages.
*/
so->numKilled = 0;
while (offnum <= maxoff)
{
- ItemId iid = PageGetItemId(page, offnum);
+ ItemId iid = PageGetItemId(page, offnum);
IndexTuple ituple = (IndexTuple) PageGetItem(page, iid);
if (ItemPointerEquals(&ituple->t_tid, &so->killedItems[i].heapTid))
/* found the item */
ItemIdMarkDead(iid);
killedsomething = true;
- break; /* out of inner search loop */
+ break; /* out of inner search loop */
}
offnum = OffsetNumberNext(offnum);
}
}
/*
- * Since this can be redone later if needed, mark as dirty hint.
- * Whenever we mark anything LP_DEAD, we also set the page's
+ * Since this can be redone later if needed, mark as dirty hint. Whenever
+ * we mark anything LP_DEAD, we also set the page's
* LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint.
*/
if (killedsomething)
*
* For HOT considerations, this is wasted effort if we fail to update or
* have to put the new tuple on a different page. But we must compute the
- * list before obtaining buffer lock --- in the worst case, if we are doing
- * an update on one of the relevant system catalogs, we could deadlock if
- * we try to fetch the list later. In any case, the relcache caches the
- * data so this is usually pretty cheap.
+ * list before obtaining buffer lock --- in the worst case, if we are
+ * doing an update on one of the relevant system catalogs, we could
+ * deadlock if we try to fetch the list later. In any case, the relcache
+ * caches the data so this is usually pretty cheap.
*
* We also need columns used by the replica identity and columns that are
* considered the "key" of rows in the table.
page = BufferGetPage(buffer);
interesting_attrs = NULL;
+
/*
* If the page is already full, there is hardly any chance of doing a HOT
* update on this page. It might be wasteful effort to look for index
- * column updates only to later reject HOT updates for lack of space in the
- * same page. So we be conservative and only fetch hot_attrs if the page is
- * not already full. Since we are already holding a pin on the buffer,
- * there is no chance that the buffer can get cleaned up concurrently and
- * even if that was possible, in the worst case we lose a chance to do a
- * HOT update.
+ * column updates only to later reject HOT updates for lack of space in
+ * the same page. So we be conservative and only fetch hot_attrs if the
+ * page is not already full. Since we are already holding a pin on the
+ * buffer, there is no chance that the buffer can get cleaned up
+ * concurrently and even if that was possible, in the worst case we lose a
+ * chance to do a HOT update.
*/
if (!PageIsFull(page))
{
* logged.
*/
old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
- bms_overlap(modified_attrs, id_attrs),
+ bms_overlap(modified_attrs, id_attrs),
&old_key_copied);
/* NO EREPORT(ERROR) from here till changes are logged */
HeapDetermineModifiedColumns(Relation relation, Bitmapset *interesting_cols,
HeapTuple oldtup, HeapTuple newtup)
{
- int attnum;
- Bitmapset *modified = NULL;
+ int attnum;
+ Bitmapset *modified = NULL;
while ((attnum = bms_first_member(interesting_cols)) >= 0)
{
attnum += FirstLowInvalidHeapAttributeNumber;
if (!heap_tuple_attr_equals(RelationGetDescr(relation),
- attnum, oldtup, newtup))
+ attnum, oldtup, newtup))
modified = bms_add_member(modified,
- attnum - FirstLowInvalidHeapAttributeNumber);
+ attnum - FirstLowInvalidHeapAttributeNumber);
}
return modified;
* scan */
slock_t btps_mutex; /* protects above variables */
ConditionVariable btps_cv; /* used to synchronize parallel scan */
-} BTParallelScanDescData;
+} BTParallelScanDescData;
typedef struct BTParallelScanDescData *BTParallelScanDesc;
_bt_initmetapage(metapage, P_NONE, 0);
/*
- * Write the page and log it. It might seem that an immediate sync
- * would be sufficient to guarantee that the file exists on disk, but
- * recovery itself might remove it while replaying, for example, an
- * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we
- * need this even when wal_level=minimal.
+ * Write the page and log it. It might seem that an immediate sync would
+ * be sufficient to guarantee that the file exists on disk, but recovery
+ * itself might remove it while replaying, for example, an
+ * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we need
+ * this even when wal_level=minimal.
*/
PageSetChecksumInplace(metapage, BTREE_METAPAGE);
smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
xl_brin_desummarize *xlrec = (xl_brin_desummarize *) rec;
appendStringInfo(buf, "pagesPerRange %u, heapBlk %u, page offset %u",
- xlrec->pagesPerRange, xlrec->heapBlk, xlrec->regOffset);
+ xlrec->pagesPerRange, xlrec->heapBlk, xlrec->regOffset);
}
}
memcpy(&xlrec, rec, sizeof(xl_clog_truncate));
appendStringInfo(buf, "page %d; oldestXact %u",
- xlrec.pageno, xlrec.oldestXact);
+ xlrec.pageno, xlrec.oldestXact);
}
}
if (!(xlrec->flags & GIN_INSERT_ISDATA))
appendStringInfo(buf, " isdelete: %c",
- (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F');
+ (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F');
else if (xlrec->flags & GIN_INSERT_ISLEAF)
desc_recompress_leaf(buf, (ginxlogRecompressDataLeaf *) payload);
else
{
ginxlogInsertDataInternal *insertData =
- (ginxlogInsertDataInternal *) payload;
+ (ginxlogInsertDataInternal *) payload;
appendStringInfo(buf, " pitem: %u-%u/%u",
- PostingItemGetBlockNumber(&insertData->newitem),
- ItemPointerGetBlockNumber(&insertData->newitem.key),
- ItemPointerGetOffsetNumber(&insertData->newitem.key));
+ PostingItemGetBlockNumber(&insertData->newitem),
+ ItemPointerGetBlockNumber(&insertData->newitem.key),
+ ItemPointerGetOffsetNumber(&insertData->newitem.key));
}
}
}
else
{
ginxlogVacuumDataLeafPage *xlrec =
- (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
+ (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
desc_recompress_leaf(buf, &xlrec->data);
}
/*
* Write the page and log it unconditionally. This is important
- * particularly for indexes created on tablespaces and databases
- * whose creation happened after the last redo pointer as recovery
- * removes any of their existing content when the corresponding
- * create records are replayed.
+ * particularly for indexes created on tablespaces and databases whose
+ * creation happened after the last redo pointer as recovery removes any
+ * of their existing content when the corresponding create records are
+ * replayed.
*/
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
static bool CLOGPagePrecedes(int page1, int page2);
static void WriteZeroPageXlogRec(int pageno);
static void WriteTruncateXlogRec(int pageno, TransactionId oldestXact,
- Oid oldestXidDb);
+ Oid oldestXidDb);
static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids,
TransactionId *subxids, XidStatus status,
XLogRecPtr lsn, int pageno);
/* vac_truncate_clog already advanced oldestXid */
Assert(TransactionIdPrecedesOrEquals(oldestXact,
- ShmemVariableCache->oldestXid));
+ ShmemVariableCache->oldestXid));
/*
- * Write XLOG record and flush XLOG to disk. We record the oldest xid we're
- * keeping information about here so we can ensure that it's always ahead
- * of clog truncation in case we crash, and so a standby finds out the new
- * valid xid before the next checkpoint.
+ * Write XLOG record and flush XLOG to disk. We record the oldest xid
+ * we're keeping information about here so we can ensure that it's always
+ * ahead of clog truncation in case we crash, and so a standby finds out
+ * the new valid xid before the next checkpoint.
*/
WriteTruncateXlogRec(cutoffPage, oldestXact, oldestxid_datoid);
SimpleLruFlush(CommitTsCtl, false);
/*
- * fsync pg_commit_ts to ensure that any files flushed previously are durably
- * on disk.
+ * fsync pg_commit_ts to ensure that any files flushed previously are
+ * durably on disk.
*/
fsync_fname("pg_commit_ts", true);
}
SimpleLruFlush(CommitTsCtl, true);
/*
- * fsync pg_commit_ts to ensure that any files flushed previously are durably
- * on disk.
+ * fsync pg_commit_ts to ensure that any files flushed previously are
+ * durably on disk.
*/
fsync_fname("pg_commit_ts", true);
}
ptr += entryno;
/*
- * It's possible we'll try to set the parent xid multiple times
- * but we shouldn't ever be changing the xid from one valid xid
- * to another valid xid, which would corrupt the data structure.
+ * It's possible we'll try to set the parent xid multiple times but we
+ * shouldn't ever be changing the xid from one valid xid to another valid
+ * xid, which would corrupt the data structure.
*/
if (*ptr != parent)
{
parentXid = SubTransGetParent(parentXid);
/*
- * By convention the parent xid gets allocated first, so should
- * always precede the child xid. Anything else points to a corrupted
- * data structure that could lead to an infinite loop, so exit.
+ * By convention the parent xid gets allocated first, so should always
+ * precede the child xid. Anything else points to a corrupted data
+ * structure that could lead to an infinite loop, so exit.
*/
if (!TransactionIdPrecedes(parentXid, previousXid))
elog(ERROR, "pg_subtrans contains invalid entry: xid %u points to parent xid %u",
- previousXid, parentXid);
+ previousXid, parentXid);
}
Assert(TransactionIdIsValid(previousXid));
*/
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
- TransactionId xid; /* The GXACT id */
+ TransactionId xid; /* The GXACT id */
Oid owner; /* ID of user that executed the xact */
BackendId locking_backend; /* backend currently working on the xact */
static void XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len);
static char *ProcessTwoPhaseBuffer(TransactionId xid,
- XLogRecPtr prepare_start_lsn,
- bool fromdisk, bool setParent, bool setNextXid);
+ XLogRecPtr prepare_start_lsn,
+ bool fromdisk, bool setParent, bool setNextXid);
static void MarkAsPreparingGuts(GlobalTransaction gxact, TransactionId xid,
- const char *gid, TimestampTz prepared_at, Oid owner,
- Oid databaseid);
+ const char *gid, TimestampTz prepared_at, Oid owner,
+ Oid databaseid);
static void RemoveTwoPhaseFile(TransactionId xid, bool giveWarning);
static void RecreateTwoPhaseFile(TransactionId xid, void *content, int len);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while allocating a WAL reading processor.")));
+ errdetail("Failed while allocating a WAL reading processor.")));
record = XLogReadRecord(xlogreader, lsn, &errormsg);
if (record == NULL)
(XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("expected two-phase state data is not present in WAL at %X/%X",
- (uint32) (lsn >> 32),
- (uint32) lsn)));
+ errmsg("expected two-phase state data is not present in WAL at %X/%X",
+ (uint32) (lsn >> 32),
+ (uint32) lsn)));
if (len != NULL)
*len = XLogRecGetDataLen(xlogreader);
LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
- /* Note that we are using gxact not pgxact so this works in recovery also */
+ /*
+ * Note that we are using gxact not pgxact so this works in recovery
+ * also
+ */
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
if ((gxact->valid || gxact->inredo) &&
void
restoreTwoPhaseData(void)
{
- DIR *cldir;
- struct dirent *clde;
+ DIR *cldir;
+ struct dirent *clde;
cldir = AllocateDir(TWOPHASE_DIR);
while ((clde = ReadDir(cldir, TWOPHASE_DIR)) != NULL)
xid = gxact->xid;
buf = ProcessTwoPhaseBuffer(xid,
- gxact->prepare_start_lsn,
- gxact->ondisk, false, true);
+ gxact->prepare_start_lsn,
+ gxact->ondisk, false, true);
if (buf == NULL)
continue;
xid = gxact->xid;
buf = ProcessTwoPhaseBuffer(xid,
- gxact->prepare_start_lsn,
- gxact->ondisk, false, false);
+ gxact->prepare_start_lsn,
+ gxact->ondisk, false, false);
if (buf != NULL)
pfree(buf);
}
xid = gxact->xid;
/*
- * Reconstruct subtrans state for the transaction --- needed
- * because pg_subtrans is not preserved over a restart. Note that
- * we are linking all the subtransactions directly to the
- * top-level XID; there may originally have been a more complex
- * hierarchy, but there's no need to restore that exactly.
- * It's possible that SubTransSetParent has been set before, if
- * the prepared transaction generated xid assignment records.
+ * Reconstruct subtrans state for the transaction --- needed because
+ * pg_subtrans is not preserved over a restart. Note that we are
+ * linking all the subtransactions directly to the top-level XID;
+ * there may originally have been a more complex hierarchy, but
+ * there's no need to restore that exactly. It's possible that
+ * SubTransSetParent has been set before, if the prepared transaction
+ * generated xid assignment records.
*/
buf = ProcessTwoPhaseBuffer(xid,
- gxact->prepare_start_lsn,
- gxact->ondisk, true, false);
+ gxact->prepare_start_lsn,
+ gxact->ondisk, true, false);
if (buf == NULL)
continue;
bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
/*
- * Recreate its GXACT and dummy PGPROC. But, check whether
- * it was added in redo and already has a shmem entry for
- * it.
+ * Recreate its GXACT and dummy PGPROC. But, check whether it was
+ * added in redo and already has a shmem entry for it.
*/
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
MarkAsPreparingGuts(gxact, xid, gid,
StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
/*
- * We're done with recovering this transaction. Clear
- * MyLockedGxact, like we do in PrepareTransaction() during normal
- * operation.
+ * We're done with recovering this transaction. Clear MyLockedGxact,
+ * like we do in PrepareTransaction() during normal operation.
*/
PostPrepare_Twophase();
else
{
ereport(WARNING,
- (errmsg("removing future two-phase state from memory for \"%u\"",
- xid)));
+ (errmsg("removing future two-phase state from memory for \"%u\"",
+ xid)));
PrepareRedoRemove(xid, true);
}
return NULL;
if (buf == NULL)
{
ereport(WARNING,
- (errmsg("removing corrupt two-phase state file for \"%u\"",
- xid)));
+ (errmsg("removing corrupt two-phase state file for \"%u\"",
+ xid)));
RemoveTwoPhaseFile(xid, true);
return NULL;
}
if (fromdisk)
{
ereport(WARNING,
- (errmsg("removing corrupt two-phase state file for \"%u\"",
- xid)));
+ (errmsg("removing corrupt two-phase state file for \"%u\"",
+ xid)));
RemoveTwoPhaseFile(xid, true);
}
else
{
ereport(WARNING,
- (errmsg("removing corrupt two-phase state from memory for \"%u\"",
- xid)));
+ (errmsg("removing corrupt two-phase state from memory for \"%u\"",
+ xid)));
PrepareRedoRemove(xid, true);
}
pfree(buf);
}
/*
- * Examine subtransaction XIDs ... they should all follow main
- * XID, and they may force us to advance nextXid.
+ * Examine subtransaction XIDs ... they should all follow main XID, and
+ * they may force us to advance nextXid.
*/
subxids = (TransactionId *) (buf +
MAXALIGN(sizeof(TwoPhaseFileHeader)) +
*/
LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
if (TransactionIdFollowsOrEquals(subxid,
- ShmemVariableCache->nextXid))
+ ShmemVariableCache->nextXid))
{
ShmemVariableCache->nextXid = subxid;
TransactionIdAdvance(ShmemVariableCache->nextXid);
MyPgXact->delayChkpt = true;
/*
- * Emit the XLOG commit record. Note that we mark 2PC commits as potentially
- * having AccessExclusiveLocks since we don't know whether or not they do.
+ * Emit the XLOG commit record. Note that we mark 2PC commits as
+ * potentially having AccessExclusiveLocks since we don't know whether or
+ * not they do.
*/
recptr = XactLogCommitRecord(committs,
nchildren, children, nrels, rels,
ninvalmsgs, invalmsgs,
initfileinval, false,
- MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
+ MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
xid);
START_CRIT_SECTION();
/*
- * Emit the XLOG commit record. Note that we mark 2PC aborts as potentially
- * having AccessExclusiveLocks since we don't know whether or not they do.
+ * Emit the XLOG commit record. Note that we mark 2PC aborts as
+ * potentially having AccessExclusiveLocks since we don't know whether or
+ * not they do.
*/
recptr = XactLogAbortRecord(GetCurrentTimestamp(),
nchildren, children,
nrels, rels,
- MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
+ MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
xid);
/* Always flush, since we're about to remove the 2PC state file */
PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
TwoPhaseFileHeader *hdr = (TwoPhaseFileHeader *) buf;
- char *bufptr;
- const char *gid;
+ char *bufptr;
+ const char *gid;
GlobalTransaction gxact;
Assert(RecoveryInProgress());
*
* This creates a gxact struct and puts it into the active array.
*
- * In redo, this struct is mainly used to track PREPARE/COMMIT entries
- * in shared memory. Hence, we only fill up the bare minimum contents here.
+ * In redo, this struct is mainly used to track PREPARE/COMMIT entries in
+ * shared memory. Hence, we only fill up the bare minimum contents here.
* The gxact also gets marked with gxact->inredo set to true to indicate
* that it got added in the redo phase
*/
gxact->locking_backend = InvalidBackendId;
gxact->valid = false;
gxact->ondisk = XLogRecPtrIsInvalid(start_lsn);
- gxact->inredo = true; /* yes, added in redo */
+ gxact->inredo = true; /* yes, added in redo */
strcpy(gxact->gid, gid);
/* And insert it into the active array */
{
LWLockAcquire(CLogTruncationLock, LW_EXCLUSIVE);
if (TransactionIdPrecedes(ShmemVariableCache->oldestClogXid,
- oldest_datfrozenxid))
+ oldest_datfrozenxid))
{
ShmemVariableCache->oldestClogXid = oldest_datfrozenxid;
}
* globally accessible, so can be set from anywhere in the code that requires
* recording flags.
*/
-int MyXactFlags;
+int MyXactFlags;
/*
* transaction states - transaction state from server perspective
* do abort cleanup processing
*/
AtCleanup_Portals(); /* now safe to release portal memory */
- AtEOXact_Snapshot(false, true); /* and release the transaction's snapshots */
+ AtEOXact_Snapshot(false, true); /* and release the transaction's
+ * snapshots */
CurrentResourceOwner = NULL; /* and resource owner */
if (TopTransactionResourceOwner)
else if (info == XLOG_XACT_PREPARE)
{
/*
- * Store xid and start/end pointers of the WAL record in
- * TwoPhaseState gxact entry.
+ * Store xid and start/end pointers of the WAL record in TwoPhaseState
+ * gxact entry.
*/
PrepareRedoAdd(XLogRecGetData(record),
record->ReadRecPtr,
bool fullPageWrites;
/*
- * exclusiveBackupState indicates the state of an exclusive backup
- * (see comments of ExclusiveBackupState for more details).
- * nonExclusiveBackups is a counter indicating the number of streaming
- * base backups currently in progress. forcePageWrites is set to true
- * when either of these is non-zero. lastBackupStart is the latest
- * checkpoint redo location used as a starting point for an online
- * backup.
+ * exclusiveBackupState indicates the state of an exclusive backup (see
+ * comments of ExclusiveBackupState for more details). nonExclusiveBackups
+ * is a counter indicating the number of streaming base backups currently
+ * in progress. forcePageWrites is set to true when either of these is
+ * non-zero. lastBackupStart is the latest checkpoint redo location used
+ * as a starting point for an online backup.
*/
ExclusiveBackupState exclusiveBackupState;
int nonExclusiveBackups;
*/
if ((flags & XLOG_MARK_UNIMPORTANT) == 0)
{
- int lockno = holdingAllLocks ? 0 : MyLockNo;
+ int lockno = holdingAllLocks ? 0 : MyLockNo;
WALInsertLocks[lockno].l.lastImportantAt = StartPos;
}
/*
* If the block LSN is already ahead of this WAL record, we can't
- * expect contents to match. This can happen if recovery is restarted.
+ * expect contents to match. This can happen if recovery is
+ * restarted.
*/
if (PageGetLSN(replay_image_masked) > record->EndRecPtr)
continue;
sysidentifier |= getpid() & 0xFFF;
/*
- * Generate a random nonce. This is used for authentication requests
- * that will fail because the user does not exist. The nonce is used to
- * create a genuine-looking password challenge for the non-existent user,
- * in lieu of an actual stored password.
+ * Generate a random nonce. This is used for authentication requests that
+ * will fail because the user does not exist. The nonce is used to create
+ * a genuine-looking password challenge for the non-existent user, in lieu
+ * of an actual stored password.
*/
if (!pg_backend_random(mock_auth_nonce, MOCK_AUTH_NONCE_LEN))
ereport(PANIC,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("could not generate secret authorization token")));
+ (errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg("could not generate secret authorization token")));
/* First timeline ID is always 1 */
ThisTimeLineID = 1;
DatumGetLSN(DirectFunctionCall3(pg_lsn_in,
CStringGetDatum(item->value),
ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(-1)));
+ Int32GetDatum(-1)));
ereport(DEBUG2,
(errmsg_internal("recovery_target_lsn = '%X/%X'",
(uint32) (recoveryTargetLSN >> 32),
recoveryStopTime = 0;
recoveryStopName[0] = '\0';
ereport(LOG,
- (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"",
- (uint32) (recoveryStopLSN >> 32),
- (uint32) recoveryStopLSN)));
+ (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"",
+ (uint32) (recoveryStopLSN >> 32),
+ (uint32) recoveryStopLSN)));
return true;
}
recoveryStopTime = 0;
recoveryStopName[0] = '\0';
ereport(LOG,
- (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"",
- (uint32) (recoveryStopLSN >> 32),
- (uint32) recoveryStopLSN)));
+ (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"",
+ (uint32) (recoveryStopLSN >> 32),
+ (uint32) recoveryStopLSN)));
return true;
}
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while allocating a WAL reading processor.")));
+ errdetail("Failed while allocating a WAL reading processor.")));
xlogreader->system_identifier = ControlFile->system_identifier;
/*
- * Allocate pages dedicated to WAL consistency checks, those had better
- * be aligned.
+ * Allocate pages dedicated to WAL consistency checks, those had better be
+ * aligned.
*/
replay_image_masked = (char *) palloc(BLCKSZ);
master_image_masked = (char *) palloc(BLCKSZ);
/*
* Copy any missing timeline history files between 'now' and the recovery
- * target timeline from archive to pg_wal. While we don't need those
- * files ourselves - the history file of the recovery target timeline
- * covers all the previous timelines in the history too - a cascading
- * standby server might be interested in them. Or, if you archive the WAL
- * from this server to a different archive than the master, it'd be good
- * for all the history files to get archived there after failover, so that
- * you can use one of the old timelines as a PITR target. Timeline history
- * files are small, so it's better to copy them unnecessarily than not
- * copy them and regret later.
+ * target timeline from archive to pg_wal. While we don't need those files
+ * ourselves - the history file of the recovery target timeline covers all
+ * the previous timelines in the history too - a cascading standby server
+ * might be interested in them. Or, if you archive the WAL from this
+ * server to a different archive than the master, it'd be good for all the
+ * history files to get archived there after failover, so that you can use
+ * one of the old timelines as a PITR target. Timeline history files are
+ * small, so it's better to copy them unnecessarily than not copy them and
+ * regret later.
*/
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
/*
- * Before running in recovery, scan pg_twophase and fill in its status
- * to be able to work on entries generated by redo. Doing a scan before
+ * Before running in recovery, scan pg_twophase and fill in its status to
+ * be able to work on entries generated by redo. Doing a scan before
* taking any recovery action has the merit to discard any 2PC files that
* are newer than the first record to replay, saving from any conflicts at
* replay. This avoids as well any subsequent scans when doing recovery
snprintf(reason, sizeof(reason),
"%s LSN %X/%X\n",
recoveryStopAfter ? "after" : "before",
- (uint32 ) (recoveryStopLSN >> 32),
+ (uint32) (recoveryStopLSN >> 32),
(uint32) recoveryStopLSN);
else if (recoveryTarget == RECOVERY_TARGET_NAME)
snprintf(reason, sizeof(reason),
MultiXactAdvanceOldest(checkPoint.oldestMulti,
checkPoint.oldestMultiDB);
+
/*
* No need to set oldestClogXid here as well; it'll be set when we
* redo an xl_clog_truncate if it changed since initialization.
if (exclusive)
{
/*
- * At first, mark that we're now starting an exclusive backup,
- * to ensure that there are no other sessions currently running
+ * At first, mark that we're now starting an exclusive backup, to
+ * ensure that there are no other sessions currently running
* pg_start_backup() or pg_stop_backup().
*/
if (XLogCtl->Insert.exclusiveBackupState != EXCLUSIVE_BACKUP_NONE)
{
/*
* Check for existing backup label --- implies a backup is already
- * running. (XXX given that we checked exclusiveBackupState above,
- * maybe it would be OK to just unlink any such label file?)
+ * running. (XXX given that we checked exclusiveBackupState
+ * above, maybe it would be OK to just unlink any such label
+ * file?)
*/
if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
{
if (exclusive)
{
/*
- * At first, mark that we're now stopping an exclusive backup,
- * to ensure that there are no other sessions currently running
+ * At first, mark that we're now stopping an exclusive backup, to
+ * ensure that there are no other sessions currently running
* pg_start_backup() or pg_stop_backup().
*/
WALInsertLockAcquireExclusive();
durable_unlink(BACKUP_LABEL_FILE, ERROR);
/*
- * Remove tablespace_map file if present, it is created only if there
- * are tablespaces.
+ * Remove tablespace_map file if present, it is created only if
+ * there are tablespaces.
*/
durable_unlink(TABLESPACE_MAP, DEBUG1);
}
* archived before returning. If archiving isn't enabled, the required WAL
* needs to be transported via streaming replication (hopefully with
* wal_keep_segments set high enough), or some more exotic mechanism like
- * polling and copying files from pg_wal with script. We have no
- * knowledge of those mechanisms, so it's up to the user to ensure that he
- * gets all the required WAL.
+ * polling and copying files from pg_wal with script. We have no knowledge
+ * of those mechanisms, so it's up to the user to ensure that he gets all
+ * the required WAL.
*
* We wait until both the last WAL file filled during backup and the
* history file have been archived, and assume that the alphabetic sorting
* We wait forever, since archive_command is supposed to work and we
* assume the admin wanted his backup to work completely. If you don't
* wish to wait, then either waitforarchive should be passed in as false,
- * or you can set statement_timeout. Also, some notices are
- * issued to clue in anyone who might be doing this interactively.
+ * or you can set statement_timeout. Also, some notices are issued to
+ * clue in anyone who might be doing this interactively.
*/
if (waitforarchive && XLogArchivingActive())
{
* little chance that the problem will just go away, but
* PANIC is not good for availability either, especially
* in hot standby mode. So, we treat that the same as
- * disconnection, and retry from archive/pg_wal again.
- * The WAL in the archive should be identical to what was
+ * disconnection, and retry from archive/pg_wal again. The
+ * WAL in the archive should be identical to what was
* streamed, so it's unlikely that it helps, but one can
* hope...
*/
* not open already. Also read the timeline history
* file if we haven't initialized timeline history
* yet; it should be streamed over and present in
- * pg_wal by now. Use XLOG_FROM_STREAM so that
- * source info is set correctly and XLogReceiptTime
- * isn't changed.
+ * pg_wal by now. Use XLOG_FROM_STREAM so that source
+ * info is set correctly and XLogReceiptTime isn't
+ * changed.
*/
if (readFile < 0)
{
* Exclusive backups were typically started in a different connection, so
* don't try to verify that status of backup is set to
* SESSION_BACKUP_EXCLUSIVE in this function. Actual verification that an
- * exclusive backup is in fact running is handled inside do_pg_stop_backup.
+ * exclusive backup is in fact running is handled inside
+ * do_pg_stop_backup.
*/
stoppoint = do_pg_stop_backup(NULL, true, NULL);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("recovery is in progress"),
- errhint("pg_walfile_name() cannot be executed during recovery.")));
+ errhint("pg_walfile_name() cannot be executed during recovery.")));
XLByteToPrevSeg(locationpoint, xlogsegno);
XLogFileName(xlogfilename, ThisTimeLineID, xlogsegno);
*
* The flags that can be used here are:
* - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
- * included in the record.
+ * included in the record.
* - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
- * durability, which allows to avoid triggering WAL archiving and other
- * background activity.
+ * durability, which allows to avoid triggering WAL archiving and other
+ * background activity.
*/
void
XLogSetRecordFlags(uint8 flags)
hdr_rdt.data = hdr_scratch;
/*
- * Enforce consistency checks for this record if user is looking for
- * it. Do this before at the beginning of this routine to give the
- * possibility for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY
- * directly for a record.
+ * Enforce consistency checks for this record if user is looking for it.
+ * Do this before at the beginning of this routine to give the possibility
+ * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
+ * a record.
*/
if (wal_consistency_checking[rmid])
info |= XLR_CHECK_CONSISTENCY;
bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
/*
- * If needs_backup is true or WAL checking is enabled for
- * current resource manager, log a full-page write for the current
- * block.
+ * If needs_backup is true or WAL checking is enabled for current
+ * resource manager, log a full-page write for the current block.
*/
include_image = needs_backup || (info & XLR_CHECK_CONSISTENCY) != 0;
bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
/*
- * If WAL consistency checking is enabled for the resource manager of
- * this WAL record, a full-page image is included in the record
+ * If WAL consistency checking is enabled for the resource manager
+ * of this WAL record, a full-page image is included in the record
* for the block modified. During redo, the full-page is replayed
* only if BKPIMAGE_APPLY is set.
*/
* that, except when caller has explicitly specified the offset that
* falls somewhere there or when we are skipping multi-page
* continuation record. It doesn't matter though because
- * ReadPageInternal() is prepared to handle that and will read at least
- * short page-header worth of data
+ * ReadPageInternal() is prepared to handle that and will read at
+ * least short page-header worth of data
*/
targetRecOff = tmpRecPtr % XLOG_BLCKSZ;
Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
/*
- * If the desired page is currently read in and valid, we have nothing to do.
+ * If the desired page is currently read in and valid, we have nothing to
+ * do.
*
* The caller should've ensured that it didn't previously advance readOff
- * past the valid limit of this timeline, so it doesn't matter if the current
- * TLI has since become historical.
+ * past the valid limit of this timeline, so it doesn't matter if the
+ * current TLI has since become historical.
*/
if (lastReadPage == wantPage &&
state->readLen != 0 &&
- lastReadPage + state->readLen >= wantPage + Min(wantLength,XLOG_BLCKSZ-1))
+ lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
return;
/*
* If we're reading from the current timeline, it hasn't become historical
* and the page we're reading is after the last page read, we can again
- * just carry on. (Seeking backwards requires a check to make sure the older
- * page isn't on a prior timeline).
+ * just carry on. (Seeking backwards requires a check to make sure the
+ * older page isn't on a prior timeline).
*
* ThisTimeLineID might've become historical since we last looked, but the
* caller is required not to read past the flush limit it saw at the time
/*
* If we're just reading pages from a previously validated historical
- * timeline and the timeline we're reading from is valid until the
- * end of the current segment we can just keep reading.
+ * timeline and the timeline we're reading from is valid until the end of
+ * the current segment we can just keep reading.
*/
if (state->currTLIValidUntil != InvalidXLogRecPtr &&
state->currTLI != ThisTimeLineID &&
return;
/*
- * If we reach this point we're either looking up a page for random access,
- * the current timeline just became historical, or we're reading from a new
- * segment containing a timeline switch. In all cases we need to determine
- * the newest timeline on the segment.
+ * If we reach this point we're either looking up a page for random
+ * access, the current timeline just became historical, or we're reading
+ * from a new segment containing a timeline switch. In all cases we need
+ * to determine the newest timeline on the segment.
*
* If it's the current timeline we can just keep reading from here unless
* we detect a timeline switch that makes the current timeline historical.
* We need to re-read the timeline history in case it's been changed
* by a promotion or replay from a cascaded replica.
*/
- List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
+ List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
- XLogRecPtr endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1;
+ XLogRecPtr endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1;
Assert(wantPage / XLogSegSize == endOfSegment / XLogSegSize);
- /* Find the timeline of the last LSN on the segment containing wantPage. */
+ /*
+ * Find the timeline of the last LSN on the segment containing
+ * wantPage.
+ */
state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
- &state->nextTLI);
+ &state->nextTLI);
Assert(state->currTLIValidUntil == InvalidXLogRecPtr ||
- wantPage + wantLength < state->currTLIValidUntil);
+ wantPage + wantLength < state->currTLIValidUntil);
list_free_deep(timelineHistory);
elog(DEBUG3, "switched to timeline %u valid until %X/%X",
- state->currTLI,
- (uint32)(state->currTLIValidUntil >> 32),
- (uint32)(state->currTLIValidUntil));
+ state->currTLI,
+ (uint32) (state->currTLIValidUntil >> 32),
+ (uint32) (state->currTLIValidUntil));
}
}
*
* We have to do it each time through the loop because if we're in
* recovery as a cascading standby, the current timeline might've
- * become historical. We can't rely on RecoveryInProgress() because
- * in a standby configuration like
+ * become historical. We can't rely on RecoveryInProgress() because in
+ * a standby configuration like
*
- * A => B => C
+ * A => B => C
*
* if we're a logical decoding session on C, and B gets promoted, our
* timeline will change while we remain in recovery.
*
* We can't just keep reading from the old timeline as the last WAL
- * archive in the timeline will get renamed to .partial by StartupXLOG().
+ * archive in the timeline will get renamed to .partial by
+ * StartupXLOG().
*
* If that happens after our caller updated ThisTimeLineID but before
* we actually read the xlog page, we might still try to read from the
- * old (now renamed) segment and fail. There's not much we can do about
- * this, but it can only happen when we're a leaf of a cascading
+ * old (now renamed) segment and fail. There's not much we can do
+ * about this, but it can only happen when we're a leaf of a cascading
* standby whose master gets promoted while we're decoding, so a
* one-off ERROR isn't too bad.
*/
heap_drop_with_catalog(object->objectId);
}
- /* for a sequence, in addition to dropping the heap, also
- * delete pg_sequence tuple */
+ /*
+ * for a sequence, in addition to dropping the heap, also
+ * delete pg_sequence tuple
+ */
if (relKind == RELKIND_SEQUENCE)
DeleteSequenceTuple(object->objectId);
break;
}
else if (IsA(node, NextValueExpr))
{
- NextValueExpr *nve = (NextValueExpr *) node;
+ NextValueExpr *nve = (NextValueExpr *) node;
add_object_address(OCLASS_CLASS, nve->seqid, 0,
context->addrs);
/*
* To drop a partition safely, we must grab exclusive lock on its parent,
* because another backend might be about to execute a query on the parent
- * table. If it relies on previously cached partition descriptor, then
- * it could attempt to access the just-dropped relation as its partition.
- * We must therefore take a table lock strong enough to prevent all
- * queries on the table from proceeding until we commit and send out a
+ * table. If it relies on previously cached partition descriptor, then it
+ * could attempt to access the just-dropped relation as its partition. We
+ * must therefore take a table lock strong enough to prevent all queries
+ * on the table from proceeding until we commit and send out a
* shared-cache-inval notice that will make them update their index lists.
*/
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
objlist = castNode(List, object);
domaddr = get_object_address_type(OBJECT_DOMAIN,
- linitial_node(TypeName, objlist),
+ linitial_node(TypeName, objlist),
missing_ok);
constrname = strVal(lsecond(objlist));
case OBJECT_PUBLICATION:
case OBJECT_SUBSCRIPTION:
address = get_object_address_unqualified(objtype,
- (Value *) object, missing_ok);
+ (Value *) object, missing_ok);
break;
case OBJECT_TYPE:
case OBJECT_DOMAIN:
if (relation != NULL)
heap_close(relation, AccessShareLock);
- relation = NULL; /* department of accident prevention */
+ relation = NULL; /* department of accident prevention */
return address;
}
relname = linitial(object);
relation = relation_openrv_extended(makeRangeVarFromNameList(relname),
- AccessShareLock, missing_ok);
+ AccessShareLock, missing_ok);
if (!relation)
return address;
if (list_length(name) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("name list length must be exactly %d", 1)));
+ errmsg("name list length must be exactly %d", 1)));
objnode = linitial(name);
break;
case OBJECT_TYPE:
case OBJECT_FUNCTION:
case OBJECT_AGGREGATE:
case OBJECT_OPERATOR:
- {
- ObjectWithArgs *owa = makeNode(ObjectWithArgs);
+ {
+ ObjectWithArgs *owa = makeNode(ObjectWithArgs);
- owa->objname = name;
- owa->objargs = args;
- objnode = (Node *) owa;
- break;
- }
+ owa->objname = name;
+ owa->objargs = args;
+ objnode = (Node *) owa;
+ break;
+ }
case OBJECT_LARGEOBJECT:
/* already handled above */
break;
- /* no default, to let compiler warn about missing case */
+ /* no default, to let compiler warn about missing case */
}
if (objnode == NULL)
{
HeapTuple tup;
char *pubname;
- Form_pg_publication_rel prform;
+ Form_pg_publication_rel prform;
tup = SearchSysCache1(PUBLICATIONREL,
ObjectIdGetDatum(object->objectId));
{
HeapTuple tup;
char *pubname;
- Form_pg_publication_rel prform;
+ Form_pg_publication_rel prform;
tup = SearchSysCache1(PUBLICATIONREL,
ObjectIdGetDatum(object->objectId));
if (object)
*object = list_make3(pstrdup(NameStr(amForm->amname)),
- pstrdup(schema),
- pstrdup(NameStr(opfForm->opfname)));
+ pstrdup(schema),
+ pstrdup(NameStr(opfForm->opfname)));
ReleaseSysCache(amTup);
ReleaseSysCache(opfTup);
if (if_not_exists)
{
ereport(NOTICE,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- collencoding == -1
- ? errmsg("collation \"%s\" already exists, skipping",
- collname)
- : errmsg("collation \"%s\" for encoding \"%s\" already exists, skipping",
- collname, pg_encoding_to_char(collencoding))));
+ (errcode(ERRCODE_DUPLICATE_OBJECT),
+ collencoding == -1
+ ? errmsg("collation \"%s\" already exists, skipping",
+ collname)
+ : errmsg("collation \"%s\" for encoding \"%s\" already exists, skipping",
+ collname, pg_encoding_to_char(collencoding))));
return InvalidOid;
}
else
collencoding == -1
? errmsg("collation \"%s\" already exists",
collname)
- : errmsg("collation \"%s\" for encoding \"%s\" already exists",
- collname, pg_encoding_to_char(collencoding))));
+ : errmsg("collation \"%s\" for encoding \"%s\" already exists",
+ collname, pg_encoding_to_char(collencoding))));
}
/* open pg_collation; see below about the lock level */
{
heap_close(rel, NoLock);
ereport(NOTICE,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("collation \"%s\" already exists, skipping",
- collname)));
+ (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("collation \"%s\" already exists, skipping",
+ collname)));
return InvalidOid;
}
else
ereport(ERROR,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("collation \"%s\" already exists",
- collname)));
+ (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("collation \"%s\" already exists",
+ collname)));
}
tupDesc = RelationGetDescr(rel);
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
/*
- * We assume any auto or internal dependency of a sequence on a column must be
- * what we are looking for. (We need the relkind test because indexes
- * can also have auto dependencies on columns.)
+ * We assume any auto or internal dependency of a sequence on a column
+ * must be what we are looking for. (We need the relkind test because
+ * indexes can also have auto dependencies on columns.)
*/
if (deprec->classid == RelationRelationId &&
deprec->objsubid == 0 &&
*/
typedef struct SeenRelsEntry
{
- Oid rel_id; /* relation oid */
- ListCell *numparents_cell; /* corresponding list cell */
+ Oid rel_id; /* relation oid */
+ ListCell *numparents_cell; /* corresponding list cell */
} SeenRelsEntry;
/*
find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
{
/* hash table for O(1) rel_oid -> rel_numparents cell lookup */
- HTAB *seen_rels;
- HASHCTL ctl;
+ HTAB *seen_rels;
+ HASHCTL ctl;
List *rels_list,
*rel_numparents;
ListCell *l;
foreach(lc, currentchildren)
{
Oid child_oid = lfirst_oid(lc);
- bool found;
- SeenRelsEntry *hash_entry;
+ bool found;
+ SeenRelsEntry *hash_entry;
hash_entry = hash_search(seen_rels, &child_oid, HASH_ENTER, &found);
if (found)
TupleDesc tupDesc;
ObjectAddress myself;
int i;
- Acl *nspacl;
+ Acl *nspacl;
/* sanity checks */
if (!nspName)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"%s\" is a system table",
RelationGetRelationName(targetrel)),
- errdetail("System tables cannot be added to publications.")));
+ errdetail("System tables cannot be added to publications.")));
/* UNLOGGED and TEMP relations cannot be part of publication. */
if (!RelationNeedsWAL(targetrel))
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("table \"%s\" cannot be replicated",
RelationGetRelationName(targetrel)),
- errdetail("Temporary and unlogged relations cannot be replicated.")));
+ errdetail("Temporary and unlogged relations cannot be replicated.")));
}
/*
Oid relid = RelationGetRelid(targetrel);
Oid prrelid;
Publication *pub = GetPublication(pubid);
- ObjectAddress myself,
- referenced;
+ ObjectAddress myself,
+ referenced;
rel = heap_open(PublicationRelRelationId, RowExclusiveLock);
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("relation \"%s\" is already member of publication \"%s\"",
- RelationGetRelationName(targetrel), pub->name)));
+ errmsg("relation \"%s\" is already member of publication \"%s\"",
+ RelationGetRelationName(targetrel), pub->name)));
}
check_publication_add_relation(targetrel);
List *
GetRelationPublications(Oid relid)
{
- List *result = NIL;
- CatCList *pubrellist;
- int i;
+ List *result = NIL;
+ CatCList *pubrellist;
+ int i;
/* Find all publications associated with the relation. */
pubrellist = SearchSysCacheList1(PUBLICATIONRELMAP,
List *
GetPublicationRelations(Oid pubid)
{
- List *result;
- Relation pubrelsrel;
- ScanKeyData scankey;
- SysScanDesc scan;
- HeapTuple tup;
+ List *result;
+ Relation pubrelsrel;
+ ScanKeyData scankey;
+ SysScanDesc scan;
+ HeapTuple tup;
/* Find all publications associated with the relation. */
pubrelsrel = heap_open(PublicationRelRelationId, AccessShareLock);
result = NIL;
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- Form_pg_publication_rel pubrel;
+ Form_pg_publication_rel pubrel;
pubrel = (Form_pg_publication_rel) GETSTRUCT(tup);
List *
GetAllTablesPublications(void)
{
- List *result;
- Relation rel;
- ScanKeyData scankey;
- SysScanDesc scan;
- HeapTuple tup;
+ List *result;
+ Relation rel;
+ ScanKeyData scankey;
+ SysScanDesc scan;
+ HeapTuple tup;
/* Find all publications that are marked as for all tables. */
rel = heap_open(PublicationRelationId, AccessShareLock);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid relid = HeapTupleGetOid(tuple);
- Form_pg_class relForm = (Form_pg_class) GETSTRUCT(tuple);
+ Oid relid = HeapTupleGetOid(tuple);
+ Form_pg_class relForm = (Form_pg_class) GETSTRUCT(tuple);
if (is_publishable_class(relid, relForm))
result = lappend_oid(result, relid);
Publication *
GetPublication(Oid pubid)
{
- HeapTuple tup;
- Publication *pub;
- Form_pg_publication pubform;
+ HeapTuple tup;
+ Publication *pub;
+ Form_pg_publication pubform;
tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
char *
get_publication_name(Oid pubid)
{
- HeapTuple tup;
- char *pubname;
- Form_pg_publication pubform;
+ HeapTuple tup;
+ char *pubname;
+ Form_pg_publication pubform;
tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
pg_get_publication_tables(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
- char *pubname = text_to_cstring(PG_GETARG_TEXT_PP(0));
- Publication *publication;
- List *tables;
- ListCell **lcp;
+ char *pubname = text_to_cstring(PG_GETARG_TEXT_PP(0));
+ Publication *publication;
+ List *tables;
+ ListCell **lcp;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
while (*lcp != NULL)
{
- Oid relid = lfirst_oid(*lcp);
+ Oid relid = lfirst_oid(*lcp);
*lcp = lnext(*lcp);
SRF_RETURN_NEXT(funcctx, ObjectIdGetDatum(relid));
Subscription *
GetSubscription(Oid subid, bool missing_ok)
{
- HeapTuple tup;
- Subscription *sub;
- Form_pg_subscription subform;
- Datum datum;
- bool isnull;
+ HeapTuple tup;
+ Subscription *sub;
+ Form_pg_subscription subform;
+ Datum datum;
+ bool isnull;
tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
int
CountDBSubscriptions(Oid dbid)
{
- int nsubs = 0;
- Relation rel;
- ScanKeyData scankey;
- SysScanDesc scan;
- HeapTuple tup;
+ int nsubs = 0;
+ Relation rel;
+ ScanKeyData scankey;
+ SysScanDesc scan;
+ HeapTuple tup;
rel = heap_open(SubscriptionRelationId, RowExclusiveLock);
char *
get_subscription_name(Oid subid)
{
- HeapTuple tup;
- char *subname;
+ HeapTuple tup;
+ char *subname;
Form_pg_subscription subform;
tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
static List *
textarray_to_stringlist(ArrayType *textarray)
{
- Datum *elems;
- int nelems, i;
- List *res = NIL;
+ Datum *elems;
+ int nelems,
+ i;
+ List *res = NIL;
deconstruct_array(textarray,
TEXTOID, -1, false, 'i',
*/
Oid
SetSubscriptionRelState(Oid subid, Oid relid, char state,
- XLogRecPtr sublsn)
+ XLogRecPtr sublsn)
{
Relation rel;
HeapTuple tup;
ObjectIdGetDatum(subid));
/*
- * If the record for given table does not exist yet create new
- * record, otherwise update the existing one.
+ * If the record for given table does not exist yet create new record,
+ * otherwise update the existing one.
*/
if (!HeapTupleIsValid(tup))
{
Relation rel;
HeapTuple tup;
int nkeys = 0;
- ScanKeyData skey[2];
- SysScanDesc scan;
+ ScanKeyData skey[2];
+ SysScanDesc scan;
rel = heap_open(SubscriptionRelRelationId, AccessShareLock);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- Form_pg_subscription_rel subrel;
- SubscriptionRelState *relstate;
+ Form_pg_subscription_rel subrel;
+ SubscriptionRelState *relstate;
subrel = (Form_pg_subscription_rel) GETSTRUCT(tup);
- relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState));
+ relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState));
relstate->relid = subrel->srrelid;
relstate->state = subrel->srsubstate;
relstate->lsn = subrel->srsublsn;
Relation rel;
HeapTuple tup;
int nkeys = 0;
- ScanKeyData skey[2];
- SysScanDesc scan;
+ ScanKeyData skey[2];
+ SysScanDesc scan;
rel = heap_open(SubscriptionRelRelationId, AccessShareLock);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- Form_pg_subscription_rel subrel;
- SubscriptionRelState *relstate;
+ Form_pg_subscription_rel subrel;
+ SubscriptionRelState *relstate;
subrel = (Form_pg_subscription_rel) GETSTRUCT(tup);
- relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState));
+ relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState));
relstate->relid = subrel->srrelid;
relstate->state = subrel->srsubstate;
relstate->lsn = subrel->srsublsn;
address =
get_object_address_rv(stmt->objectType, stmt->relation, (List *) stmt->object,
- &rel, AccessExclusiveLock, false);
+ &rel, AccessExclusiveLock, false);
/*
* If a relation was involved, it would have been opened and locked. We
nrels,
i;
ListCell *lc;
- bool has_child;
+ bool has_child;
/*
* Find all members of inheritance set. We only need AccessShareLock on
elog(ERROR, "invalid collation version change");
else if (oldversion && newversion && strcmp(newversion, oldversion) != 0)
{
- bool nulls[Natts_pg_collation];
- bool replaces[Natts_pg_collation];
- Datum values[Natts_pg_collation];
+ bool nulls[Natts_pg_collation];
+ bool replaces[Natts_pg_collation];
+ Datum values[Natts_pg_collation];
ereport(NOTICE,
(errmsg("changing version from %s to %s",
uloc_toLanguageTag(localename, buf, sizeof(buf), TRUE, &status);
if (U_FAILURE(status))
ereport(ERROR,
- (errmsg("could not convert locale name \"%s\" to language tag: %s",
- localename, u_errorName(status))));
+ (errmsg("could not convert locale name \"%s\" to language tag: %s",
+ localename, u_errorName(status))));
return pstrdup(buf);
}
return result;
}
-#endif /* USE_ICU */
+#endif /* USE_ICU */
Datum
CollationCreate(localebuf, nspid, GetUserId(), COLLPROVIDER_LIBC, enc,
localebuf, localebuf,
- get_collation_actual_version(COLLPROVIDER_LIBC, localebuf),
+ get_collation_actual_version(COLLPROVIDER_LIBC, localebuf),
if_not_exists);
CommandCounterIncrement();
CollationCreate(alias, nspid, GetUserId(), COLLPROVIDER_LIBC, enc,
locale, locale,
- get_collation_actual_version(COLLPROVIDER_LIBC, locale),
+ get_collation_actual_version(COLLPROVIDER_LIBC, locale),
true);
CommandCounterIncrement();
}
}
else
{
- int i;
+ int i;
/*
* Start the loop at -1 to sneak in the root locale without too much
Oid collid;
if (i == -1)
- name = ""; /* ICU root locale */
+ name = ""; /* ICU root locale */
else
name = ucol_getAvailable(i);
collid = CollationCreate(psprintf("%s-x-icu", langtag),
nspid, GetUserId(), COLLPROVIDER_ICU, -1,
collcollate, collcollate,
- get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
+ get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
if_not_exists);
CreateComments(collid, CollationRelationId, 0,
en = ucol_getKeywordValuesForLocale("collation", name, TRUE, &status);
if (U_FAILURE(status))
ereport(ERROR,
- (errmsg("could not get keyword values for locale \"%s\": %s",
- name, u_errorName(status))));
+ (errmsg("could not get keyword values for locale \"%s\": %s",
+ name, u_errorName(status))));
status = U_ZERO_ERROR;
uenum_reset(en, &status);
while ((val = uenum_next(en, NULL, &status)))
{
- char *localeid = psprintf("%s@collation=%s", name, val);
+ char *localeid = psprintf("%s@collation=%s", name, val);
- langtag = get_icu_language_tag(localeid);
+ langtag = get_icu_language_tag(localeid);
collcollate = U_ICU_VERSION_MAJOR_NUM >= 54 ? langtag : localeid;
collid = CollationCreate(psprintf("%s-x-icu", langtag),
- nspid, GetUserId(), COLLPROVIDER_ICU, -1,
+ nspid, GetUserId(), COLLPROVIDER_ICU, -1,
collcollate, collcollate,
- get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
+ get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
if_not_exists);
CreateComments(collid, CollationRelationId, 0,
get_icu_locale_comment(localeid));
}
if (U_FAILURE(status))
ereport(ERROR,
- (errmsg("could not get keyword values for locale \"%s\": %s",
- name, u_errorName(status))));
+ (errmsg("could not get keyword values for locale \"%s\": %s",
+ name, u_errorName(status))));
uenum_close(en);
}
}
List *attnumlist; /* integer list of attnums to copy */
char *filename; /* filename, or NULL for STDIN/STDOUT */
bool is_program; /* is 'filename' a program to popen? */
- copy_data_source_cb data_source_cb; /* function for reading data*/
+ copy_data_source_cb data_source_cb; /* function for reading data */
bool binary; /* binary format? */
bool oids; /* include OIDs? */
bool freeze; /* freeze rows on loading? */
(void) pq_putmessage('d', fe_msgbuf->data, fe_msgbuf->len);
break;
case COPY_CALLBACK:
- Assert(false); /* Not yet supported. */
+ Assert(false); /* Not yet supported. */
break;
}
{
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is used by an active logical replication slot",
- dbname),
+ errmsg("database \"%s\" is used by an active logical replication slot",
+ dbname),
errdetail_plural("There is %d active slot",
"There are %d active slots",
nslots_active, nslots_active)));
* which can happen in some cases.
*
* This will lock out walsenders trying to connect to db-specific
- * slots for logical decoding too, so it's safe for us to drop slots.
+ * slots for logical decoding too, so it's safe for us to drop
+ * slots.
*/
LockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock);
ResolveRecoveryConflictWithDatabase(xlrec->db_id);
if (nodeTag(def->arg) != T_List)
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(def->arg));
- foreach(cell, (List *)def->arg)
+ foreach(cell, (List *) def->arg)
{
Node *str = (Node *) lfirst(cell);
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
- NameListToString(castNode(ObjectWithArgs, object)->objname)),
+ NameListToString(castNode(ObjectWithArgs, object)->objname)),
errhint("Use DROP AGGREGATE to drop aggregate functions.")));
ReleaseSysCache(tup);
RangeVar *parent_rel;
parent_object = list_truncate(list_copy(object),
- list_length(object) - 1);
+ list_length(object) - 1);
if (schema_does_not_exist_skipping(parent_object, msg, name))
return true;
case OBJECT_FUNCTION:
{
ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
+
if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
!type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
{
case OBJECT_AGGREGATE:
{
ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
+
if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
!type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
{
case OBJECT_OPERATOR:
{
ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
+
if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
!type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
{
msg = gettext_noop("trigger \"%s\" for relation \"%s\" does not exist, skipping");
name = strVal(llast(castNode(List, object)));
args = NameListToString(list_truncate(list_copy(castNode(List, object)),
- list_length(castNode(List, object)) - 1));
+ list_length(castNode(List, object)) - 1));
}
break;
case OBJECT_POLICY:
msg = gettext_noop("policy \"%s\" for relation \"%s\" does not exist, skipping");
name = strVal(llast(castNode(List, object)));
args = NameListToString(list_truncate(list_copy(castNode(List, object)),
- list_length(castNode(List, object)) - 1));
+ list_length(castNode(List, object)) - 1));
}
break;
case OBJECT_EVENT_TRIGGER:
msg = gettext_noop("rule \"%s\" for relation \"%s\" does not exist, skipping");
name = strVal(llast(castNode(List, object)));
args = NameListToString(list_truncate(list_copy(castNode(List, object)),
- list_length(castNode(List, object)) - 1));
+ list_length(castNode(List, object)) - 1));
}
break;
case OBJECT_FDW:
}
elog(ERROR, "unrecognized grant object type: %d", (int) objtype);
- return "???"; /* keep compiler quiet */
+ return "???"; /* keep compiler quiet */
}
/*
}
elog(ERROR, "unrecognized grant object type: %d", (int) objtype);
- return "???"; /* keep compiler quiet */
+ return "???"; /* keep compiler quiet */
}
ownerId = GetUserId();
/*
- * Check that there is no other foreign server by this name.
- * Do nothing if IF NOT EXISTS was enforced.
+ * Check that there is no other foreign server by this name. Do nothing if
+ * IF NOT EXISTS was enforced.
*/
if (GetForeignServerByName(stmt->servername, true) != NULL)
{
if (stmt->if_not_exists)
{
ereport(NOTICE,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("user mapping for \"%s\" already exists for server %s, skipping",
- MappingUserName(useId),
- stmt->servername)));
+ (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("user mapping for \"%s\" already exists for server %s, skipping",
+ MappingUserName(useId),
+ stmt->servername)));
heap_close(rel, RowExclusiveLock);
return InvalidObjectAddress;
}
else
ereport(ERROR,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("user mapping for \"%s\" already exists for server %s",
- MappingUserName(useId),
- stmt->servername)));
+ (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("user mapping for \"%s\" already exists for server %s",
+ MappingUserName(useId),
+ stmt->servername)));
}
fdw = GetForeignDataWrapper(srv->fdwid);
if (!OidIsValid(umId))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("user mapping for \"%s\" does not exist for the server",
- MappingUserName(useId))));
+ errmsg("user mapping for \"%s\" does not exist for the server",
+ MappingUserName(useId))));
user_mapping_ddl_aclcheck(useId, srv->serverid, stmt->servername);
if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("user mapping for \"%s\" does not exist for the server",
- MappingUserName(useId))));
+ errmsg("user mapping for \"%s\" does not exist for the server",
+ MappingUserName(useId))));
/* IF EXISTS specified, just note it */
ereport(NOTICE,
- (errmsg("user mapping for \"%s\" does not exist for the server, skipping",
- MappingUserName(useId))));
+ (errmsg("user mapping for \"%s\" does not exist for the server, skipping",
+ MappingUserName(useId))));
return InvalidOid;
}
*publish_delete = true;
/* Parse options */
- foreach (lc, options)
+ foreach(lc, options)
{
DefElem *defel = (DefElem *) lfirst(lc);
errmsg("invalid publish list")));
/* Process the option list. */
- foreach (lc, publish_list)
+ foreach(lc, publish_list)
{
- char *publish_opt = (char *)lfirst(lc);
+ char *publish_opt = (char *) lfirst(lc);
if (strcmp(publish_opt, "insert") == 0)
*publish_insert = true;
if (stmt->for_all_tables && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to create FOR ALL TABLES publication"))));
+ (errmsg("must be superuser to create FOR ALL TABLES publication"))));
rel = heap_open(PublicationRelationId, RowExclusiveLock);
*/
static void
AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
- HeapTuple tup)
+ HeapTuple tup)
{
bool nulls[Natts_pg_publication];
bool replaces[Natts_pg_publication];
bool publish_insert;
bool publish_update;
bool publish_delete;
- ObjectAddress obj;
+ ObjectAddress obj;
parse_publication_options(stmt->options,
&publish_given, &publish_insert,
}
else
{
- List *relids = GetPublicationRelations(HeapTupleGetOid(tup));
+ List *relids = GetPublicationRelations(HeapTupleGetOid(tup));
/*
* We don't want to send too many individual messages, at some point
*/
if (list_length(relids) < MAX_RELCACHE_INVAL_MSGS)
{
- ListCell *lc;
+ ListCell *lc;
- foreach (lc, relids)
+ foreach(lc, relids)
{
- Oid relid = lfirst_oid(lc);
+ Oid relid = lfirst_oid(lc);
CacheInvalidateRelcacheByRelid(relid);
}
PublicationAddTables(pubid, rels, false, stmt);
else if (stmt->tableAction == DEFELEM_DROP)
PublicationDropTables(pubid, rels, false);
- else /* DEFELEM_SET */
+ else /* DEFELEM_SET */
{
List *oldrelids = GetPublicationRelations(pubid);
List *delrels = NIL;
{
Relation oldrel = heap_open(oldrelid,
ShareUpdateExclusiveLock);
+
delrels = lappend(delrels, oldrel);
}
}
PublicationDropTables(pubid, delrels, true);
/*
- * Don't bother calculating the difference for adding, we'll catch
- * and skip existing ones when doing catalog update.
+ * Don't bother calculating the difference for adding, we'll catch and
+ * skip existing ones when doing catalog update.
*/
PublicationAddTables(pubid, rels, true, stmt);
void
AlterPublication(AlterPublicationStmt *stmt)
{
- Relation rel;
- HeapTuple tup;
+ Relation rel;
+ HeapTuple tup;
rel = heap_open(PublicationRelationId, RowExclusiveLock);
void
RemovePublicationRelById(Oid proid)
{
- Relation rel;
- HeapTuple tup;
- Form_pg_publication_rel pubrel;
+ Relation rel;
+ HeapTuple tup;
+ Form_pg_publication_rel pubrel;
rel = heap_open(PublicationRelRelationId, RowExclusiveLock);
PublicationAddTables(Oid pubid, List *rels, bool if_not_exists,
AlterPublicationStmt *stmt)
{
- ListCell *lc;
+ ListCell *lc;
Assert(!stmt || !stmt->for_all_tables);
foreach(lc, rels)
{
Relation rel = (Relation) lfirst(lc);
- ObjectAddress obj;
+ ObjectAddress obj;
/* Must be owner of the table or superuser. */
if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId()))
static void
PublicationDropTables(Oid pubid, List *rels, bool missing_ok)
{
- ObjectAddress obj;
- ListCell *lc;
- Oid prid;
+ ObjectAddress obj;
+ ListCell *lc;
+ Oid prid;
foreach(lc, rels)
{
/*
* Internal workhorse for changing a publication owner
*/
- static void
+static void
AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
{
Form_pg_publication form;
if (form->puballtables && !superuser_arg(newOwnerId))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to change owner of publication \"%s\"",
- NameStr(form->pubname)),
+ errmsg("permission denied to change owner of publication \"%s\"",
+ NameStr(form->pubname)),
errhint("The owner of a FOR ALL TABLES publication must be a superuser.")));
}
ObjectAddress
AlterPublicationOwner(const char *name, Oid newOwnerId)
{
- Oid subid;
- HeapTuple tup;
- Relation rel;
+ Oid subid;
+ HeapTuple tup;
+ Relation rel;
ObjectAddress address;
rel = heap_open(PublicationRelationId, RowExclusiveLock);
void
AlterPublicationOwner_oid(Oid subid, Oid newOwnerId)
{
- HeapTuple tup;
- Relation rel;
+ HeapTuple tup;
+ Relation rel;
rel = heap_open(PublicationRelationId, RowExclusiveLock);
Buffer *buf, HeapTuple seqdatatuple);
static LOCKMODE alter_sequence_get_lock_level(List *options);
static void init_params(ParseState *pstate, List *options, bool for_identity,
- bool isInit,
- Form_pg_sequence seqform,
- bool *changed_seqform,
- Form_pg_sequence_data seqdataform, List **owned_by);
+ bool isInit,
+ Form_pg_sequence seqform,
+ bool *changed_seqform,
+ Form_pg_sequence_data seqdataform, List **owned_by);
static void do_setval(Oid relid, int64 next, bool iscalled);
static void process_owned_by(Relation seqrel, List *owned_by, bool for_identity);
{
FormData_pg_sequence seqform;
FormData_pg_sequence_data seqdataform;
- bool changed_seqform = false; /* not used here */
+ bool changed_seqform = false; /* not used here */
List *owned_by;
CreateStmt *stmt = makeNode(CreateStmt);
Oid seqoid;
snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
ereport(ERROR,
- (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
- errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
- RelationGetRelationName(seqrel), buf)));
+ (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
+ errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
+ RelationGetRelationName(seqrel), buf)));
}
next = minv;
}
snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
ereport(ERROR,
- (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
- errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
- RelationGetRelationName(seqrel), buf)));
+ (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
+ errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
+ RelationGetRelationName(seqrel), buf)));
}
next = maxv;
}
/* AS type */
if (as_type != NULL)
{
- Oid newtypid = typenameTypeId(pstate, defGetTypeName(as_type));
+ Oid newtypid = typenameTypeId(pstate, defGetTypeName(as_type));
if (newtypid != INT2OID &&
newtypid != INT4OID &&
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
for_identity
? errmsg("identity column type must be smallint, integer, or bigint")
- : errmsg("sequence type must be smallint, integer, or bigint")));
+ : errmsg("sequence type must be smallint, integer, or bigint")));
if (!isInit)
{
*/
if ((seqform->seqtypid == INT2OID && seqform->seqmax == PG_INT16_MAX) ||
(seqform->seqtypid == INT4OID && seqform->seqmax == PG_INT32_MAX) ||
- (seqform->seqtypid == INT8OID && seqform->seqmax == PG_INT64_MAX))
+ (seqform->seqtypid == INT8OID && seqform->seqmax == PG_INT64_MAX))
reset_max_value = true;
if ((seqform->seqtypid == INT2OID && seqform->seqmin == PG_INT16_MIN) ||
(seqform->seqtypid == INT4OID && seqform->seqmin == PG_INT32_MIN) ||
- (seqform->seqtypid == INT8OID && seqform->seqmin == PG_INT64_MIN))
+ (seqform->seqtypid == INT8OID && seqform->seqmin == PG_INT64_MIN))
reset_min_value = true;
}
seqform->seqmax = PG_INT64_MAX;
}
else
- seqform->seqmax = -1; /* descending seq */
+ seqform->seqmax = -1; /* descending seq */
*changed_seqform = true;
seqdataform->log_cnt = 0;
}
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("MAXVALUE (%s) is out of range for sequence data type %s",
- bufx, format_type_be(seqform->seqtypid))));
+ errmsg("MAXVALUE (%s) is out of range for sequence data type %s",
+ bufx, format_type_be(seqform->seqtypid))));
}
/* MINVALUE (null arg means NO MINVALUE) */
seqform->seqmin = PG_INT64_MIN;
}
else
- seqform->seqmin = 1; /* ascending seq */
+ seqform->seqmin = 1; /* ascending seq */
*changed_seqform = true;
seqdataform->log_cnt = 0;
}
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("MINVALUE (%s) is out of range for sequence data type %s",
- bufm, format_type_be(seqform->seqtypid))));
+ errmsg("MINVALUE (%s) is out of range for sequence data type %s",
+ bufm, format_type_be(seqform->seqtypid))));
}
/* crosscheck min/max */
else if (isInit)
{
if (seqform->seqincrement > 0)
- seqform->seqstart = seqform->seqmin; /* ascending seq */
+ seqform->seqstart = seqform->seqmin; /* ascending seq */
else
- seqform->seqstart = seqform->seqmax; /* descending seq */
+ seqform->seqstart = seqform->seqmax; /* descending seq */
*changed_seqform = true;
}
{
ereport(NOTICE,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("statistics object \"%s\" already exists, skipping",
- namestr)));
+ errmsg("statistics object \"%s\" already exists, skipping",
+ namestr)));
return InvalidObjectAddress;
}
*synchronous_commit = NULL;
/* Parse options */
- foreach (lc, options)
+ foreach(lc, options)
{
DefElem *defel = (DefElem *) lfirst(lc);
}
/*
- * Do additional checking for disallowed combination when
- * slot_name = NONE was used.
+ * Do additional checking for disallowed combination when slot_name = NONE
+ * was used.
*/
if (slot_name && *slot_name_given && !*slot_name)
{
values[Anum_pg_subscription_subsynccommit - 1] =
CStringGetTextDatum(synchronous_commit);
values[Anum_pg_subscription_subpublications - 1] =
- publicationListToArray(publications);
+ publicationListToArray(publications);
tup = heap_form_tuple(RelationGetDescr(rel), values, nulls);
*/
if (connect)
{
- XLogRecPtr lsn;
- char *err;
- WalReceiverConn *wrconn;
- List *tables;
- ListCell *lc;
- char table_state;
+ XLogRecPtr lsn;
+ char *err;
+ WalReceiverConn *wrconn;
+ List *tables;
+ ListCell *lc;
+ char table_state;
/* Try to connect to the publisher. */
wrconn = walrcv_connect(conninfo, true, stmt->subname, &err);
* info.
*/
tables = fetch_table_list(wrconn, publications);
- foreach (lc, tables)
+ foreach(lc, tables)
{
RangeVar *rv = (RangeVar *) lfirst(lc);
Oid relid;
(errmsg("synchronized table states")));
/*
- * If requested, create permanent slot for the subscription.
- * We won't use the initial snapshot for anything, so no need
- * to export it.
+ * If requested, create permanent slot for the subscription. We
+ * won't use the initial snapshot for anything, so no need to
+ * export it.
*/
if (create_slot)
{
walrcv_create_slot(wrconn, slotname, false,
CRS_NOEXPORT_SNAPSHOT, &lsn);
ereport(NOTICE,
- (errmsg("created replication slot \"%s\" on publisher",
- slotname)));
+ (errmsg("created replication slot \"%s\" on publisher",
+ slotname)));
}
}
PG_CATCH();
static void
AlterSubscription_refresh(Subscription *sub, bool copy_data)
{
- char *err;
+ char *err;
List *pubrel_names;
List *subrel_states;
Oid *subrel_local_oids;
subrel_states = GetSubscriptionRelations(sub->oid);
/*
- * Build qsorted array of local table oids for faster lookup.
- * This can potentially contain all tables in the database so
- * speed of lookup is important.
+ * Build qsorted array of local table oids for faster lookup. This can
+ * potentially contain all tables in the database so speed of lookup is
+ * important.
*/
subrel_local_oids = palloc(list_length(subrel_states) * sizeof(Oid));
off = 0;
foreach(lc, subrel_states)
{
SubscriptionRelState *relstate = (SubscriptionRelState *) lfirst(lc);
+
subrel_local_oids[off++] = relstate->relid;
}
qsort(subrel_local_oids, list_length(subrel_states),
sizeof(Oid), oid_cmp);
/*
- * Walk over the remote tables and try to match them to locally
- * known tables. If the table is not known locally create a new state
- * for it.
+ * Walk over the remote tables and try to match them to locally known
+ * tables. If the table is not known locally create a new state for it.
*
* Also builds array of local oids of remote tables for the next step.
*/
off = 0;
pubrel_local_oids = palloc(list_length(pubrel_names) * sizeof(Oid));
- foreach (lc, pubrel_names)
+ foreach(lc, pubrel_names)
{
RangeVar *rv = (RangeVar *) lfirst(lc);
Oid relid;
list_length(subrel_states), sizeof(Oid), oid_cmp))
{
SetSubscriptionRelState(sub->oid, relid,
- copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY,
+ copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY,
InvalidXLogRecPtr);
ereport(NOTICE,
(errmsg("added subscription for table %s.%s",
}
/*
- * Next remove state for tables we should not care about anymore using
- * the data we collected above
+ * Next remove state for tables we should not care about anymore using the
+ * data we collected above
*/
qsort(pubrel_local_oids, list_length(pubrel_names),
sizeof(Oid), oid_cmp);
for (off = 0; off < list_length(subrel_states); off++)
{
- Oid relid = subrel_local_oids[off];
+ Oid relid = subrel_local_oids[off];
if (!bsearch(&relid, pubrel_local_oids,
list_length(pubrel_names), sizeof(Oid), oid_cmp))
{
- char *namespace;
+ char *namespace;
RemoveSubscriptionRel(sub->oid, relid);
HeapTuple tup;
Oid subid;
bool update_tuple = false;
- Subscription *sub;
+ Subscription *sub;
rel = heap_open(SubscriptionRelationId, RowExclusiveLock);
if (slotname)
values[Anum_pg_subscription_subslotname - 1] =
- DirectFunctionCall1(namein, CStringGetDatum(slotname));
+ DirectFunctionCall1(namein, CStringGetDatum(slotname));
else
nulls[Anum_pg_subscription_subslotname - 1] = true;
replaces[Anum_pg_subscription_subslotname - 1] = true;
case ALTER_SUBSCRIPTION_ENABLED:
{
- bool enabled,
- enabled_given;
+ bool enabled,
+ enabled_given;
parse_subscription_options(stmt->options, NULL,
&enabled_given, &enabled, NULL,
case ALTER_SUBSCRIPTION_PUBLICATION:
case ALTER_SUBSCRIPTION_PUBLICATION_REFRESH:
{
- bool copy_data;
+ bool copy_data;
parse_subscription_options(stmt->options, NULL, NULL, NULL,
NULL, NULL, NULL, ©_data,
NULL);
values[Anum_pg_subscription_subpublications - 1] =
- publicationListToArray(stmt->publication);
+ publicationListToArray(stmt->publication);
replaces[Anum_pg_subscription_subpublications - 1] = true;
update_tuple = true;
case ALTER_SUBSCRIPTION_REFRESH:
{
- bool copy_data;
+ bool copy_data;
if (!sub->enabled)
ereport(ERROR,
char *slotname;
char originname[NAMEDATALEN];
char *err = NULL;
- RepOriginId originid;
- WalReceiverConn *wrconn = NULL;
- StringInfoData cmd;
+ RepOriginId originid;
+ WalReceiverConn *wrconn = NULL;
+ StringInfoData cmd;
/*
- * Lock pg_subscription with AccessExclusiveLock to ensure
- * that the launcher doesn't restart new worker during dropping
- * the subscription
+ * Lock pg_subscription with AccessExclusiveLock to ensure that the
+ * launcher doesn't restart new worker during dropping the subscription
*/
rel = heap_open(SubscriptionRelationId, AccessExclusiveLock);
InvokeObjectDropHook(SubscriptionRelationId, subid, 0);
/*
- * Lock the subscription so nobody else can do anything with it
- * (including the replication workers).
+ * Lock the subscription so nobody else can do anything with it (including
+ * the replication workers).
*/
LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
if (originid != InvalidRepOriginId)
replorigin_drop(originid);
- /* If there is no slot associated with the subscription, we can finish here. */
+ /*
+ * If there is no slot associated with the subscription, we can finish
+ * here.
+ */
if (!slotname)
{
heap_close(rel, NoLock);
}
/*
- * Otherwise drop the replication slot at the publisher node using
- * the replication connection.
+ * Otherwise drop the replication slot at the publisher node using the
+ * replication connection.
*/
load_file("libpqwalreceiver", false);
PG_TRY();
{
- WalRcvExecResult *res;
+ WalRcvExecResult *res;
+
res = walrcv_exec(wrconn, cmd.data, 0, NULL);
if (res->status != WALRCV_OK_COMMAND)
ereport(ERROR,
- (errmsg("could not drop the replication slot \"%s\" on publisher",
- slotname),
- errdetail("The error was: %s", res->err)));
+ (errmsg("could not drop the replication slot \"%s\" on publisher",
+ slotname),
+ errdetail("The error was: %s", res->err)));
else
ereport(NOTICE,
(errmsg("dropped replication slot \"%s\" on publisher",
if (!superuser_arg(newOwnerId))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to change owner of subscription \"%s\"",
- NameStr(form->subname)),
- errhint("The owner of a subscription must be a superuser.")));
+ errmsg("permission denied to change owner of subscription \"%s\"",
+ NameStr(form->subname)),
+ errhint("The owner of a subscription must be a superuser.")));
form->subowner = newOwnerId;
CatalogTupleUpdate(rel, &tup->t_self, tup);
static List *
fetch_table_list(WalReceiverConn *wrconn, List *publications)
{
- WalRcvExecResult *res;
- StringInfoData cmd;
- TupleTableSlot *slot;
- Oid tableRow[2] = {TEXTOID, TEXTOID};
- ListCell *lc;
- bool first;
- List *tablelist = NIL;
+ WalRcvExecResult *res;
+ StringInfoData cmd;
+ TupleTableSlot *slot;
+ Oid tableRow[2] = {TEXTOID, TEXTOID};
+ ListCell *lc;
+ bool first;
+ List *tablelist = NIL;
Assert(list_length(publications) > 0);
initStringInfo(&cmd);
appendStringInfo(&cmd, "SELECT DISTINCT t.schemaname, t.tablename\n"
- " FROM pg_catalog.pg_publication_tables t\n"
- " WHERE t.pubname IN (");
+ " FROM pg_catalog.pg_publication_tables t\n"
+ " WHERE t.pubname IN (");
first = true;
- foreach (lc, publications)
+ foreach(lc, publications)
{
- char *pubname = strVal(lfirst(lc));
+ char *pubname = strVal(lfirst(lc));
if (first)
first = false;
static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
Node *newDefault, LOCKMODE lockmode);
static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
- Node *def, LOCKMODE lockmode);
+ Node *def, LOCKMODE lockmode);
static ObjectAddress ATExecSetIdentity(Relation rel, const char *colName,
- Node *def, LOCKMODE lockmode);
+ Node *def, LOCKMODE lockmode);
static ObjectAddress ATExecDropIdentity(Relation rel, const char *colName, bool missing_ok, LOCKMODE lockmode);
static void ATPrepSetStatistics(Relation rel, const char *colName,
Node *newValue, LOCKMODE lockmode);
descriptor->tdhasoid = (localHasOids || parentOidCount > 0);
/*
- * If a partitioned table doesn't have the system OID column, then none
- * of its partitions should have it.
+ * If a partitioned table doesn't have the system OID column, then none of
+ * its partitions should have it.
*/
if (stmt->partbound && parentOidCount == 0 && localHasOids)
ereport(ERROR,
}
/*
- * Similarly, if we previously locked some other partition's heap, and
- * the name we're looking up no longer refers to that relation, release
- * the now-useless lock.
+ * Similarly, if we previously locked some other partition's heap, and the
+ * name we're looking up no longer refers to that relation, release the
+ * now-useless lock.
*/
if (relOid != oldRelOid && OidIsValid(state->partParentOid))
{
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" specified more than once",
- coldef->colname)));
+ errmsg("column \"%s\" specified more than once",
+ coldef->colname)));
}
prev = rest;
rest = next;
values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate,
econtext,
- &isnull[ex->attnum - 1]);
+ &isnull[ex->attnum - 1]);
}
/*
ATPrepDropNotNull(Relation rel, bool recurse, bool recursing)
{
/*
- * If the parent is a partitioned table, like check constraints, we do
- * not support removing the NOT NULL while partitions exist.
+ * If the parent is a partitioned table, like check constraints, we do not
+ * support removing the NOT NULL while partitions exist.
*/
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- PartitionDesc partdesc = RelationGetPartitionDesc(rel);
+ PartitionDesc partdesc = RelationGetPartitionDesc(rel);
Assert(partdesc != NULL);
if (partdesc->nparts > 0 && !recurse && !recursing)
if (get_attidentity(RelationGetRelid(rel), attnum))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("column \"%s\" of relation \"%s\" is an identity column",
- colName, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" is an identity column",
+ colName, RelationGetRelationName(rel))));
/*
* Check that the attribute is not in a primary key
*/
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- PartitionDesc partdesc = RelationGetPartitionDesc(rel);
+ PartitionDesc partdesc = RelationGetPartitionDesc(rel);
if (partdesc && partdesc->nparts > 0 && !recurse && !recursing)
ereport(ERROR,
if (get_attidentity(RelationGetRelid(rel), attnum))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("column \"%s\" of relation \"%s\" is an identity column",
- colName, RelationGetRelationName(rel)),
+ errmsg("column \"%s\" of relation \"%s\" is an identity column",
+ colName, RelationGetRelationName(rel)),
newDefault ? 0 : errhint("Use ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY instead.")));
/*
if (attTup->atthasdef)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("column \"%s\" of relation \"%s\" already has a default value",
- colName, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" already has a default value",
+ colName, RelationGetRelationName(rel))));
attTup->attidentity = cdef->identity;
CatalogTupleUpdate(attrelation, &tuple->t_self, tuple);
ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmode)
{
ListCell *option;
- DefElem *generatedEl = NULL;
+ DefElem *generatedEl = NULL;
HeapTuple tuple;
Form_pg_attribute attTup;
AttrNumber attnum;
foreach(option, castNode(List, def))
{
- DefElem *defel = lfirst_node(DefElem, option);
+ DefElem *defel = lfirst_node(DefElem, option);
if (strcmp(defel->defname, "generated") == 0)
{
if (!attTup->attidentity)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("column \"%s\" of relation \"%s\" is not an identity column",
- colName, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" is not an identity column",
+ colName, RelationGetRelationName(rel))));
if (generatedEl)
{
inhseqno + 1,
catalogRelation,
parent_rel->rd_rel->relkind ==
- RELKIND_PARTITIONED_TABLE);
+ RELKIND_PARTITIONED_TABLE);
/* Now we're done with pg_inherits */
heap_close(catalogRelation, RowExclusiveLock);
foreach(lc, varList)
{
- TriggerTransition *tt = lfirst_node(TriggerTransition, lc);
+ TriggerTransition *tt = lfirst_node(TriggerTransition, lc);
if (!(tt->isTable))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is a partitioned table",
RelationGetRelationName(rel)),
- errdetail("Triggers on partitioned tables cannot have transition tables.")));
+ errdetail("Triggers on partitioned tables cannot have transition tables.")));
if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is a foreign table",
RelationGetRelationName(rel)),
- errdetail("Triggers on foreign tables cannot have transition tables.")));
+ errdetail("Triggers on foreign tables cannot have transition tables.")));
if (rel->rd_rel->relkind == RELKIND_VIEW)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is a view",
RelationGetRelationName(rel)),
- errdetail("Triggers on views cannot have transition tables.")));
+ errdetail("Triggers on views cannot have transition tables.")));
if (stmt->timing != TRIGGER_TYPE_AFTER)
ereport(ERROR,
if (newtablename != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("NEW TABLE cannot be specified multiple times")));
+ errmsg("NEW TABLE cannot be specified multiple times")));
newtablename = tt->name;
}
if (oldtablename != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("OLD TABLE cannot be specified multiple times")));
+ errmsg("OLD TABLE cannot be specified multiple times")));
oldtablename = tt->name;
}
strcmp(newtablename, oldtablename) == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
+ errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
}
/*
if (oldtablename)
values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(oldtablename));
+ CStringGetDatum(oldtablename));
else
nulls[Anum_pg_trigger_tgoldtable - 1] = true;
if (newtablename)
values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(newtablename));
+ CStringGetDatum(newtablename));
else
nulls[Anum_pg_trigger_tgnewtable - 1] = true;
AfterTriggerEventList events; /* deferred-event list */
int query_depth; /* current query list index */
AfterTriggerEventList *query_stack; /* events pending from each query */
- Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from each query */
+ Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from
+ * each query */
Tuplestorestate **old_tuplestores; /* all old tuples from each query */
Tuplestorestate **new_tuplestores; /* all new tuples from each query */
int maxquerydepth; /* allocated len of above array */
case AFTER_TRIGGER_FDW_FETCH:
{
Tuplestorestate *fdw_tuplestore =
- GetTriggerTransitionTuplestore
- (afterTriggers.fdw_tuplestores);
+ GetTriggerTransitionTuplestore
+ (afterTriggers.fdw_tuplestores);
if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
trig_tuple_slot1))
Assert(oldtup != NULL);
old_tuplestore =
GetTriggerTransitionTuplestore
- (afterTriggers.old_tuplestores);
+ (afterTriggers.old_tuplestores);
tuplestore_puttuple(old_tuplestore, oldtup);
}
if ((event == TRIGGER_EVENT_INSERT &&
Assert(newtup != NULL);
new_tuplestore =
GetTriggerTransitionTuplestore
- (afterTriggers.new_tuplestores);
+ (afterTriggers.new_tuplestores);
tuplestore_puttuple(new_tuplestore, newtup);
}
/* If transition tables are the only reason we're here, return. */
if ((event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
- (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
- (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
+ (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
+ (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
return;
}
{
fdw_tuplestore =
GetTriggerTransitionTuplestore
- (afterTriggers.fdw_tuplestores);
+ (afterTriggers.fdw_tuplestores);
new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
}
else
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
- (Node *) makeString(pstrdup(startvalue)), -1));
+ (Node *) makeString(pstrdup(startvalue)), -1));
state = CS_WAITKEY;
}
}
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
- (Node *) makeString(pstrdup(startvalue)), -1));
+ (Node *) makeString(pstrdup(startvalue)), -1));
state = CS_WAITKEY;
}
}
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
- (Node *) makeString(pstrdup(startvalue)), -1));
+ (Node *) makeString(pstrdup(startvalue)), -1));
state = CS_WAITKEY;
}
else
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
- (Node *) makeString(pstrdup(startvalue)), -1));
+ (Node *) makeString(pstrdup(startvalue)), -1));
}
else if (state != CS_WAITKEY)
ereport(ERROR,
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
(errmsg("role \"%s\" is a member of role \"%s\"",
- rolename, get_rolespec_name(memberRole)))));
+ rolename, get_rolespec_name(memberRole)))));
/*
* Check if entry for this role/member already exists; if so, give
{
ereport(NOTICE,
(errmsg("role \"%s\" is already a member of role \"%s\"",
- get_rolespec_name(memberRole), rolename)));
+ get_rolespec_name(memberRole), rolename)));
ReleaseSysCache(authmem_tuple);
continue;
}
{
ereport(WARNING,
(errmsg("role \"%s\" is not a member of role \"%s\"",
- get_rolespec_name(memberRole), rolename)));
+ get_rolespec_name(memberRole), rolename)));
continue;
}
*/
initStringInfo(&buf);
appendStringInfo(&buf,
- _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
+ _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
nkeep, OldestXmin);
appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
nunused);
/* If we haven't prefetched this lot yet, do so now. */
if (prefetchedUntil > blkno)
{
- BlockNumber prefetchStart;
- BlockNumber pblkno;
+ BlockNumber prefetchStart;
+ BlockNumber pblkno;
prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
if (stmt->withCheckOption == LOCAL_CHECK_OPTION)
stmt->options = lappend(stmt->options,
makeDefElem("check_option",
- (Node *) makeString("local"), -1));
+ (Node *) makeString("local"), -1));
else if (stmt->withCheckOption == CASCADED_CHECK_OPTION)
stmt->options = lappend(stmt->options,
makeDefElem("check_option",
- (Node *) makeString("cascaded"), -1));
+ (Node *) makeString("cascaded"), -1));
/*
* Check that the view is auto-updatable if WITH CHECK OPTION was
return true;
case T_CustomScan:
- {
- CustomPath *customPath = castNode(CustomPath, pathnode);
- if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
- return true;
- return false;
- }
+ {
+ CustomPath *customPath = castNode(CustomPath, pathnode);
+
+ if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
+ return true;
+ return false;
+ }
case T_Result:
/*
hashtable->in_hash_funcs = hashtable->tab_hash_funcs;
hashtable->cur_eq_funcs = hashtable->tab_eq_funcs;
- key = NULL; /* flag to reference inputslot */
+ key = NULL; /* flag to reference inputslot */
if (isnew)
{
estate->es_num_root_result_relations = 0;
if (plannedstmt->nonleafResultRelations)
{
- int num_roots = list_length(plannedstmt->rootResultRelations);
+ int num_roots = list_length(plannedstmt->rootResultRelations);
/*
* Firstly, build ResultRelInfos for all the partitioned table
* triggers, if any.
*/
resultRelInfos = (ResultRelInfo *)
- palloc(num_roots * sizeof(ResultRelInfo));
+ palloc(num_roots * sizeof(ResultRelInfo));
resultRelInfo = resultRelInfos;
foreach(l, plannedstmt->rootResultRelations)
{
/* Simply lock the rest of them. */
foreach(l, plannedstmt->nonleafResultRelations)
{
- Index resultRelIndex = lfirst_int(l);
+ Index resultRelIndex = lfirst_int(l);
/* We locked the roots above. */
if (!list_member_int(plannedstmt->rootResultRelations,
if (resultRelInfo->ri_PartitionRoot)
{
HeapTuple tuple = ExecFetchSlotTuple(slot);
- TupleConversionMap *map;
+ TupleConversionMap *map;
rel = resultRelInfo->ri_PartitionRoot;
tupdesc = RelationGetDescr(rel);
/* a reverse map */
map = convert_tuples_by_name(orig_tupdesc, tupdesc,
- gettext_noop("could not convert row type"));
+ gettext_noop("could not convert row type"));
if (map != NULL)
{
tuple = do_convert_tuple(tuple, map);
{
HeapTuple tuple = ExecFetchSlotTuple(slot);
TupleDesc old_tupdesc = RelationGetDescr(rel);
- TupleConversionMap *map;
+ TupleConversionMap *map;
rel = resultRelInfo->ri_PartitionRoot;
tupdesc = RelationGetDescr(rel);
/* a reverse map */
map = convert_tuples_by_name(old_tupdesc, tupdesc,
- gettext_noop("could not convert row type"));
+ gettext_noop("could not convert row type"));
if (map != NULL)
{
tuple = do_convert_tuple(tuple, map);
{
HeapTuple tuple = ExecFetchSlotTuple(slot);
TupleDesc old_tupdesc = RelationGetDescr(rel);
- TupleConversionMap *map;
+ TupleConversionMap *map;
rel = resultRelInfo->ri_PartitionRoot;
tupdesc = RelationGetDescr(rel);
/* a reverse map */
map = convert_tuples_by_name(old_tupdesc, tupdesc,
- gettext_noop("could not convert row type"));
+ gettext_noop("could not convert row type"));
if (map != NULL)
{
tuple = do_convert_tuple(tuple, map);
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("no partition of relation \"%s\" found for row",
RelationGetRelationName(failed_rel)),
- val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
+ val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
}
return result;
bool *isnull,
int maxfieldlen)
{
- StringInfoData buf;
- PartitionKey key = RelationGetPartitionKey(rel);
+ StringInfoData buf;
+ PartitionKey key = RelationGetPartitionKey(rel);
int partnatts = get_partition_natts(key);
int i;
Oid relid = RelationGetRelid(rel);
/*
* Also store the per-worker detail.
*
- * Worker instrumentation should be allocated in the same context as
- * the regular instrumentation information, which is the per-query
- * context. Switch into per-query memory context.
+ * Worker instrumentation should be allocated in the same context as the
+ * regular instrumentation information, which is the per-query context.
+ * Switch into per-query memory context.
*/
oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
case T_NamedTuplestoreScan:
result = (PlanState *) ExecInitNamedTuplestoreScan((NamedTuplestoreScan *) node,
- estate, eflags);
+ estate, eflags);
break;
case T_WorkTableScan:
TupleTableSlot *searchslot,
TupleTableSlot *outslot)
{
- HeapTuple scantuple;
- ScanKeyData skey[INDEX_MAX_KEYS];
- IndexScanDesc scan;
- SnapshotData snap;
- TransactionId xwait;
- Relation idxrel;
- bool found;
-
- /* Open the index.*/
+ HeapTuple scantuple;
+ ScanKeyData skey[INDEX_MAX_KEYS];
+ IndexScanDesc scan;
+ SnapshotData snap;
+ TransactionId xwait;
+ Relation idxrel;
+ bool found;
+
+ /* Open the index. */
idxrel = index_open(idxoid, RowExclusiveLock);
/* Start an index scan. */
snap.xmin : snap.xmax;
/*
- * If the tuple is locked, wait for locking transaction to finish
- * and retry.
+ * If the tuple is locked, wait for locking transaction to finish and
+ * retry.
*/
if (TransactionIdIsValid(xwait))
{
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
- Buffer buf;
+ Buffer buf;
HeapUpdateFailureData hufd;
HTSU_Result res;
HeapTupleData locktup;
res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
lockmode,
LockWaitBlock,
- false /* don't follow updates */,
+ false /* don't follow updates */ ,
&buf, &hufd);
/* the tuple slot already has the buffer pinned */
ReleaseBuffer(buf);
* to use.
*/
static bool
-tuple_equals_slot(TupleDesc desc, HeapTuple tup, TupleTableSlot *slot)
+tuple_equals_slot(TupleDesc desc, HeapTuple tup, TupleTableSlot *slot)
{
Datum values[MaxTupleAttributeNumber];
bool isnull[MaxTupleAttributeNumber];
RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode,
TupleTableSlot *searchslot, TupleTableSlot *outslot)
{
- HeapTuple scantuple;
- HeapScanDesc scan;
- SnapshotData snap;
- TransactionId xwait;
- bool found;
- TupleDesc desc = RelationGetDescr(rel);
+ HeapTuple scantuple;
+ HeapScanDesc scan;
+ SnapshotData snap;
+ TransactionId xwait;
+ bool found;
+ TupleDesc desc = RelationGetDescr(rel);
Assert(equalTupleDescs(desc, outslot->tts_tupleDescriptor));
snap.xmin : snap.xmax;
/*
- * If the tuple is locked, wait for locking transaction to finish
- * and retry.
+ * If the tuple is locked, wait for locking transaction to finish and
+ * retry.
*/
if (TransactionIdIsValid(xwait))
{
/* Found tuple, try to lock it in the lockmode. */
if (found)
{
- Buffer buf;
+ Buffer buf;
HeapUpdateFailureData hufd;
HTSU_Result res;
HeapTupleData locktup;
res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
lockmode,
LockWaitBlock,
- false /* don't follow updates */,
+ false /* don't follow updates */ ,
&buf, &hufd);
/* the tuple slot already has the buffer pinned */
ReleaseBuffer(buf);
void
ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
{
- bool skip_tuple = false;
- HeapTuple tuple;
- ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
- Relation rel = resultRelInfo->ri_RelationDesc;
+ bool skip_tuple = false;
+ HeapTuple tuple;
+ ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
+ Relation rel = resultRelInfo->ri_RelationDesc;
/* For now we support only tables. */
Assert(rel->rd_rel->relkind == RELKIND_RELATION);
{
slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
- if (slot == NULL) /* "do nothing" */
+ if (slot == NULL) /* "do nothing" */
skip_tuple = true;
}
ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
TupleTableSlot *searchslot, TupleTableSlot *slot)
{
- bool skip_tuple = false;
- HeapTuple tuple;
- ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
- Relation rel = resultRelInfo->ri_RelationDesc;
+ bool skip_tuple = false;
+ HeapTuple tuple;
+ ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
+ Relation rel = resultRelInfo->ri_RelationDesc;
/* For now we support only tables. */
Assert(rel->rd_rel->relkind == RELKIND_RELATION);
&searchslot->tts_tuple->t_self,
NULL, slot);
- if (slot == NULL) /* "do nothing" */
+ if (slot == NULL) /* "do nothing" */
skip_tuple = true;
}
ExecSimpleRelationDelete(EState *estate, EPQState *epqstate,
TupleTableSlot *searchslot)
{
- bool skip_tuple = false;
- ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
- Relation rel = resultRelInfo->ri_RelationDesc;
+ bool skip_tuple = false;
+ ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
+ Relation rel = resultRelInfo->ri_RelationDesc;
/* For now we support only tables. */
Assert(rel->rd_rel->relkind == RELKIND_RELATION);
if (relkind != RELKIND_RELATION)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("logical replication target relation \"%s.%s\" is not a table",
- nspname, relname)));
+ errmsg("logical replication target relation \"%s.%s\" is not a table",
+ nspname, relname)));
}
ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate)
{
PlannedStmt *stmt = estate->es_plannedstmt;
- ListCell *lc;
+ ListCell *lc;
foreach(lc, partitioned_rels)
{
ListCell *l;
- Index rti = lfirst_int(lc);
- bool is_result_rel = false;
- Oid relid = getrelid(rti, estate->es_range_table);
+ Index rti = lfirst_int(lc);
+ bool is_result_rel = false;
+ Oid relid = getrelid(rti, estate->es_range_table);
/* If this is a result relation, already locked in InitPlan */
foreach(l, stmt->nonleafResultRelations)
* NULL and not auto-replace it with a later input value. Only the first
* non-NULL input will be auto-substituted.
*/
-} AggStatePerGroupData;
+} AggStatePerGroupData;
/*
* AggStatePerPhaseData - per-grouping-set-phase state
AttrNumber *hashGrpColIdxInput; /* hash col indices in input slot */
AttrNumber *hashGrpColIdxHash; /* indices in hashtbl tuples */
Agg *aggnode; /* original Agg node, for numGroups etc. */
-} AggStatePerHashData;
+} AggStatePerHashData;
static void select_current_set(AggState *aggstate, int setno, bool is_hash);
Assert(!(eflags & EXEC_FLAG_MARK));
/*
- * Lock the non-leaf tables in the partition tree controlled by this
- * node. It's a no-op for non-partitioned parent tables.
+ * Lock the non-leaf tables in the partition tree controlled by this node.
+ * It's a no-op for non-partitioned parent tables.
*/
ExecLockNonLeafAppendTables(node->partitioned_rels, estate);
* In case of shared mode, we can not ensure that the current
* blockno of the main iterator and that of the prefetch iterator
* are same. It's possible that whatever blockno we are
- * prefetching will be processed by another process. Therefore, we
- * don't validate the blockno here as we do in non-parallel case.
+ * prefetching will be processed by another process. Therefore,
+ * we don't validate the blockno here as we do in non-parallel
+ * case.
*/
if (prefetch_iterator)
tbm_shared_iterate(prefetch_iterator);
void
ExecEndGather(GatherState *node)
{
- ExecEndNode(outerPlanState(node)); /* let children clean up first */
+ ExecEndNode(outerPlanState(node)); /* let children clean up first */
ExecShutdownGather(node);
ExecFreeExprContext(&node->ps);
ExecClearTuple(node->ps.ps_ResultTupleSlot);
int readCounter;
int nTuples;
bool done;
-} GMReaderTupleBuffer;
+} GMReaderTupleBuffer;
/*
* When we read tuples from workers, it's a good idea to read several at once
ResetExprContext(econtext);
/*
- * Get next tuple, either from one of our workers, or by running the
- * plan ourselves.
+ * Get next tuple, either from one of our workers, or by running the plan
+ * ourselves.
*/
slot = gather_merge_getnext(node);
if (TupIsNull(slot))
return NULL;
/*
- * form the result tuple using ExecProject(), and return it --- unless
- * the projection produces an empty set, in which case we must loop
- * back around for another tuple
+ * form the result tuple using ExecProject(), and return it --- unless the
+ * projection produces an empty set, in which case we must loop back
+ * around for another tuple
*/
econtext->ecxt_outertuple = slot;
return ExecProject(node->ps.ps_ProjInfo);
void
ExecEndGatherMerge(GatherMergeState *node)
{
- ExecEndNode(outerPlanState(node)); /* let children clean up first */
+ ExecEndNode(outerPlanState(node)); /* let children clean up first */
ExecShutdownGatherMerge(node);
ExecFreeExprContext(&node->ps);
ExecClearTuple(node->ps.ps_ResultTupleSlot);
HeapTuple tup = NULL;
/*
- * If we're being asked to generate a tuple from the leader, then we
- * just call ExecProcNode as normal to produce one.
+ * If we're being asked to generate a tuple from the leader, then we just
+ * call ExecProcNode as normal to produce one.
*/
if (gm_state->nreaders == reader)
{
&tuple_buffer->done));
/*
- * Attempt to read more tuples in nowait mode and store them in
- * the tuple array.
+ * Attempt to read more tuples in nowait mode and store them in the
+ * tuple array.
*/
if (HeapTupleIsValid(tup))
form_tuple_array(gm_state, reader);
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
/*
- * Lock the non-leaf tables in the partition tree controlled by this
- * node. It's a no-op for non-partitioned parent tables.
+ * Lock the non-leaf tables in the partition tree controlled by this node.
+ * It's a no-op for non-partitioned parent tables.
*/
ExecLockNonLeafAppendTables(node->partitioned_rels, estate);
static void
fireBSTriggers(ModifyTableState *node)
{
- ResultRelInfo *resultRelInfo = node->resultRelInfo;
+ ResultRelInfo *resultRelInfo = node->resultRelInfo;
/*
* If the node modifies a partitioned table, we must fire its triggers.
static void
fireASTriggers(ModifyTableState *node)
{
- ResultRelInfo *resultRelInfo = node->resultRelInfo;
+ ResultRelInfo *resultRelInfo = node->resultRelInfo;
/*
* If the node modifies a partitioned table, we must fire its triggers.
/* If modifying a partitioned table, initialize the root table info */
if (node->rootResultRelIndex >= 0)
mtstate->rootResultRelInfo = estate->es_root_result_relations +
- node->rootResultRelIndex;
+ node->rootResultRelIndex;
mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
mtstate->mt_nplans = nplans;
/* The root table RT index is at the head of the partitioned_rels list */
if (node->partitioned_rels)
{
- Index root_rti;
- Oid root_oid;
+ Index root_rti;
+ Oid root_oid;
root_rti = linitial_int(node->partitioned_rels);
root_oid = getrelid(root_rti, estate->es_range_table);
- rel = heap_open(root_oid, NoLock); /* locked by InitPlan */
+ rel = heap_open(root_oid, NoLock); /* locked by InitPlan */
}
else
rel = mtstate->resultRelInfo->ri_RelationDesc;
}
/*
- * Build WITH CHECK OPTION constraints for each leaf partition rel.
- * Note that we didn't build the withCheckOptionList for each partition
- * within the planner, but simple translation of the varattnos for each
- * partition will suffice. This only occurs for the INSERT case;
- * UPDATE/DELETE cases are handled above.
+ * Build WITH CHECK OPTION constraints for each leaf partition rel. Note
+ * that we didn't build the withCheckOptionList for each partition within
+ * the planner, but simple translation of the varattnos for each partition
+ * will suffice. This only occurs for the INSERT case; UPDATE/DELETE
+ * cases are handled above.
*/
if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
{
- List *wcoList;
+ List *wcoList;
Assert(operation == CMD_INSERT);
resultRelInfo = mtstate->mt_partitions;
{
TupleTableSlot *resultSlot = node->ps.ps_ResultTupleSlot;
ExprContext *econtext = node->ps.ps_ExprContext;
- bool hassrf PG_USED_FOR_ASSERTS_ONLY;
+ bool hassrf PG_USED_FOR_ASSERTS_ONLY;
bool hasresult;
int argno;
{
long numLeft; /* number of left-input dups in group */
long numRight; /* number of right-input dups in group */
-} SetOpStatePerGroupData;
+} SetOpStatePerGroupData;
static TupleTableSlot *setop_retrieve_direct(SetOpState *setopstate);
PG_TRY();
{
routine->InitOpaque(tstate,
- tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts);
+ tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts);
/*
* If evaluating the document expression returns NULL, the table
int colno;
Datum value;
int ordinalitycol =
- ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
+ ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
/*
* Install the document as a possibly-toasted Datum into the tablefunc
ExecClearTuple(tstate->ss.ss_ScanTupleSlot);
/*
- * Obtain the value of each column for this row, installing them into the
- * slot; then add the tuple to the tuplestore.
+ * Obtain the value of each column for this row, installing them into
+ * the slot; then add the tuple to the tuplestore.
*/
for (colno = 0; colno < natts; colno++)
{
}
else
{
- bool isnull;
+ bool isnull;
values[colno] = routine->GetValue(tstate,
colno,
- tupdesc->attrs[colno]->atttypid,
- tupdesc->attrs[colno]->atttypmod,
+ tupdesc->attrs[colno]->atttypid,
+ tupdesc->attrs[colno]->atttypmod,
&isnull);
/* No value? Evaluate and apply the default, if any */
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("null is not allowed in column \"%s\"",
- NameStr(tupdesc->attrs[colno]->attname))));
+ NameStr(tupdesc->attrs[colno]->attname))));
nulls[colno] = isnull;
}
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
{
if (list_length(stmt_list) == 1 &&
- linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
+ linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL &&
ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree))
portal->cursorOptions |= CURSOR_OPT_SCROLL;
if (portal->cursorOptions & CURSOR_OPT_SCROLL)
{
if (list_length(stmt_list) == 1 &&
- linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
+ linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
stmt_list = pg_analyze_and_rewrite_params(parsetree,
src,
plan->parserSetup,
- plan->parserSetupArg,
- _SPI_current->queryEnv);
+ plan->parserSetupArg,
+ _SPI_current->queryEnv);
}
else
{
if (enr == NULL || enr->md.name == NULL)
return SPI_ERROR_ARGUMENT;
- res = _SPI_begin_call(false); /* keep current memory context */
+ res = _SPI_begin_call(false); /* keep current memory context */
if (res < 0)
return res;
if (name == NULL)
return SPI_ERROR_ARGUMENT;
- res = _SPI_begin_call(false); /* keep current memory context */
+ res = _SPI_begin_call(false); /* keep current memory context */
if (res < 0)
return res;
if (tdata->tg_newtable)
{
EphemeralNamedRelation enr =
- palloc(sizeof(EphemeralNamedRelationData));
- int rc;
+ palloc(sizeof(EphemeralNamedRelationData));
+ int rc;
enr->md.name = tdata->tg_trigger->tgnewtable;
enr->md.reliddesc = tdata->tg_relation->rd_id;
if (tdata->tg_oldtable)
{
EphemeralNamedRelation enr =
- palloc(sizeof(EphemeralNamedRelationData));
- int rc;
+ palloc(sizeof(EphemeralNamedRelationData));
+ int rc;
enr->md.name = tdata->tg_trigger->tgoldtable;
enr->md.reliddesc = tdata->tg_relation->rd_id;
if (current == NULL)
{
iter->is_over = true;
- break; /* end of iteration */
+ break; /* end of iteration */
}
else if (came_from == current->right)
{
* If the user does not exist, or has no password or it's expired, we
* still go through the motions of authentication, to avoid revealing to
* the client that the user didn't exist. If 'md5' is allowed, we choose
- * whether to use 'md5' or 'scram-sha-256' authentication based on
- * current password_encryption setting. The idea is that most genuine
- * users probably have a password of that type, and if we pretend that
- * this user had a password of that type, too, it "blends in" best.
+ * whether to use 'md5' or 'scram-sha-256' authentication based on current
+ * password_encryption setting. The idea is that most genuine users
+ * probably have a password of that type, and if we pretend that this user
+ * had a password of that type, too, it "blends in" best.
*/
if (!shadow_pass)
pwtype = Password_encryption;
/*
* If 'md5' authentication is allowed, decide whether to perform 'md5' or
* 'scram-sha-256' authentication based on the type of password the user
- * has. If it's an MD5 hash, we must do MD5 authentication, and if it's
- * a SCRAM verifier, we must do SCRAM authentication.
+ * has. If it's an MD5 hash, we must do MD5 authentication, and if it's a
+ * SCRAM verifier, we must do SCRAM authentication.
*
* If MD5 authentication is not allowed, always use SCRAM. If the user
* had an MD5 password, CheckSCRAMAuth() will fail.
{
*logdetail = psprintf(_("Role \"%s\" does not exist."),
role);
- return NULL; /* no such user */
+ return NULL; /* no such user */
}
datum = SysCacheGetAttr(AUTHNAME, roleTup,
ReleaseSysCache(roleTup);
*logdetail = psprintf(_("User \"%s\" has no password assigned."),
role);
- return NULL; /* user has no password */
+ return NULL; /* user has no password */
}
shadow_pass = TextDatumGetCString(datum);
*logdetail = psprintf(_("User \"%s\" has an empty password."),
role);
pfree(shadow_pass);
- return NULL; /* empty password */
+ return NULL; /* empty password */
}
/*
if (guessed_type != PASSWORD_TYPE_PLAINTEXT)
{
/*
- * Cannot convert an already-encrypted password from one
- * format to another, so return it as it is.
+ * Cannot convert an already-encrypted password from one format to
+ * another, so return it as it is.
*/
return pstrdup(password);
}
break;
case PASSWORD_TYPE_PLAINTEXT:
+
/*
* We never store passwords in plaintext, so this shouldn't
* happen.
tok = lfirst(cell);
if (am_walsender && !am_db_walsender)
{
- /* physical replication walsender connections can only match replication keyword */
+ /*
+ * physical replication walsender connections can only match
+ * replication keyword
+ */
if (token_is_keyword(tok, "replication"))
return true;
}
int ret;
List *parsed_servers;
ListCell *l;
- char *dupval = pstrdup(val);
+ char *dupval = pstrdup(val);
REQUIRE_AUTH_OPTION(uaRADIUS, "radiusservers", "radius");
{
List *parsed_ports;
ListCell *l;
- char *dupval = pstrdup(val);
+ char *dupval = pstrdup(val);
REQUIRE_AUTH_OPTION(uaRADIUS, "radiusports", "radius");
else if (strcmp(name, "radiussecrets") == 0)
{
List *parsed_secrets;
- char *dupval = pstrdup(val);
+ char *dupval = pstrdup(val);
REQUIRE_AUTH_OPTION(uaRADIUS, "radiussecrets", "radius");
else if (strcmp(name, "radiusidentifiers") == 0)
{
List *parsed_identifiers;
- char *dupval = pstrdup(val);
+ char *dupval = pstrdup(val);
REQUIRE_AUTH_OPTION(uaRADIUS, "radiusidentifiers", "radius");
#ifdef HAVE_UTIME_H
#include
#endif
-#ifdef _MSC_VER /* mstcpip.h is missing on mingw */
+#ifdef _MSC_VER /* mstcpip.h is missing on mingw */
#include
#endif
static GatherMerge *
_copyGatherMerge(const GatherMerge *from)
{
- GatherMerge *newnode = makeNode(GatherMerge);
+ GatherMerge *newnode = makeNode(GatherMerge);
/*
* copy node superclass fields
static NamedTuplestoreScan *
_copyNamedTuplestoreScan(const NamedTuplestoreScan *from)
{
- NamedTuplestoreScan *newnode = makeNode(NamedTuplestoreScan);
+ NamedTuplestoreScan *newnode = makeNode(NamedTuplestoreScan);
/*
* copy node superclass fields
Assert(!OidIsValid(collation)); /* result is always boolean */
break;
case T_NextValueExpr:
- Assert(!OidIsValid(collation)); /* result is always an integer type */
+ Assert(!OidIsValid(collation)); /* result is always an integer
+ * type */
break;
default:
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr));
static void
_outGatherMerge(StringInfo str, const GatherMerge *node)
{
- int i;
+ int i;
WRITE_NODE_TYPE("GATHERMERGE");
*/
typedef struct PTEntryArray
{
- pg_atomic_uint32 refcount; /* no. of iterator attached */
+ pg_atomic_uint32 refcount; /* no. of iterator attached */
PagetableEntry ptentry[FLEXIBLE_ARRAY_MEMBER];
} PTEntryArray;
*/
typedef struct PTIterationArray
{
- pg_atomic_uint32 refcount; /* no. of iterator attached */
+ pg_atomic_uint32 refcount; /* no. of iterator attached */
int index[FLEXIBLE_ARRAY_MEMBER]; /* index array */
} PTIterationArray;
/*
* For every shared iterator, referring to pagetable and iterator array,
- * increase the refcount by 1 so that while freeing the shared iterator
- * we don't free pagetable and iterator array until its refcount becomes 0.
+ * increase the refcount by 1 so that while freeing the shared iterator we
+ * don't free pagetable and iterator array until its refcount becomes 0.
*/
if (ptbase != NULL)
pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
return;
case RTE_NAMEDTUPLESTORE:
+
/*
* tuplestore cannot be shared, at least without more
* infrastructure to support that.
total_subpaths,
pathkeys,
NULL,
- partitioned_rels));
+ partitioned_rels));
}
}
* For each useful ordering, we can consider an order-preserving Gather
* Merge.
*/
- foreach (lc, rel->partial_pathlist)
+ foreach(lc, rel->partial_pathlist)
{
- Path *subpath = (Path *) lfirst(lc);
- GatherMergePath *path;
+ Path *subpath = (Path *) lfirst(lc);
+ GatherMergePath *path;
if (subpath->pathkeys == NIL)
continue;
{
/*
* For index only scans compute workers based on number of index pages
- * fetched; the number of heap pages we fetch might be so small as
- * to effectively rule out parallelism, which we don't want to do.
+ * fetched; the number of heap pages we fetch might be so small as to
+ * effectively rule out parallelism, which we don't want to do.
*/
if (indexonly)
rand_heap_pages = -1;
/* For partial paths, scale row estimate. */
if (path->path.parallel_workers > 0)
{
- double parallel_divisor = get_parallel_divisor(&path->path);
+ double parallel_divisor = get_parallel_divisor(&path->path);
path->path.rows =
clamp_row_est(path->path.rows / parallel_divisor);
/* For partial paths, scale row estimate. */
if (path->jpath.path.parallel_workers > 0)
{
- double parallel_divisor = get_parallel_divisor(&path->jpath.path);
+ double parallel_divisor = get_parallel_divisor(&path->jpath.path);
path->jpath.path.rows =
clamp_row_est(path->jpath.path.rows / parallel_divisor);
/* For partial paths, scale row estimate. */
if (path->jpath.path.parallel_workers > 0)
{
- double parallel_divisor = get_parallel_divisor(&path->jpath.path);
+ double parallel_divisor = get_parallel_divisor(&path->jpath.path);
path->jpath.path.rows =
clamp_row_est(path->jpath.path.rows / parallel_divisor);
true);
/*
- * if, after costing the path, we find that it's not worth
- * using parallel workers, just free it.
+ * if, after costing the path, we find that it's not worth using
+ * parallel workers, just free it.
*/
if (ipath->path.parallel_workers > 0)
add_partial_path(rel, (Path *) ipath);
static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
static NamedTuplestoreScan *create_namedtuplestorescan_plan(PlannerInfo *root,
- Path *best_path, List *tlist, List *scan_clauses);
+ Path *best_path, List *tlist, List *scan_clauses);
static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
static ForeignScan *create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
static CteScan *make_ctescan(List *qptlist, List *qpqual,
Index scanrelid, int ctePlanId, int cteParam);
static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
- Index scanrelid, char *enrname);
+ Index scanrelid, char *enrname);
static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
Index scanrelid, int wtParam);
static Append *make_append(List *appendplans, List *tlist, List *partitioned_rels);
/*
* bitmap_subplan_mark_shared
- * Set isshared flag in bitmap subplan so that it will be created in
+ * Set isshared flag in bitmap subplan so that it will be created in
* shared memory.
*/
static void
node->partitioned_rels = partitioned_rels;
node->resultRelations = resultRelations;
node->resultRelIndex = -1; /* will be set correctly in setrefs.c */
- node->rootResultRelIndex = -1; /* will be set correctly in setrefs.c */
+ node->rootResultRelIndex = -1; /* will be set correctly in setrefs.c */
node->plans = subplans;
if (!onconflict)
{
#define EXPRKIND_QUAL 0
#define EXPRKIND_TARGET 1
#define EXPRKIND_RTFUNC 2
-#define EXPRKIND_RTFUNC_LATERAL 3
+#define EXPRKIND_RTFUNC_LATERAL 3
#define EXPRKIND_VALUES 4
-#define EXPRKIND_VALUES_LATERAL 5
+#define EXPRKIND_VALUES_LATERAL 5
#define EXPRKIND_LIMIT 6
#define EXPRKIND_APPINFO 7
#define EXPRKIND_PHV 8
ListCell *lc;
Index rti;
RangeTblEntry *parent_rte;
- List *partitioned_rels = NIL;
+ List *partitioned_rels = NIL;
Assert(parse->commandType != CMD_INSERT);
/*
* If the parent RTE is a partitioned table, we should use that as the
* nominal relation, because the RTEs added for partitioned tables
- * (including the root parent) as child members of the inheritance set
- * do not appear anywhere else in the plan. The situation is exactly
- * the opposite in the case of non-partitioned inheritance parent as
- * described below.
+ * (including the root parent) as child members of the inheritance set do
+ * not appear anywhere else in the plan. The situation is exactly the
+ * opposite in the case of non-partitioned inheritance parent as described
+ * below.
*/
parent_rte = rt_fetch(parentRTindex, root->parse->rtable);
if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE)
* is used elsewhere in the plan, so using the original parent RTE
* would give rise to confusing use of multiple aliases in EXPLAIN
* output for what the user will think is the "same" table. OTOH,
- * it's not a problem in the partitioned inheritance case, because
- * the duplicate child RTE added for the parent does not appear
- * anywhere else in the plan tree.
+ * it's not a problem in the partitioned inheritance case, because the
+ * duplicate child RTE added for the parent does not appear anywhere
+ * else in the plan tree.
*/
if (nominalRelation < 0)
nominalRelation = appinfo->child_relid;
ListCell *lc;
ListCell *lc2;
- Assert(gd); /* keep Coverity happy */
+ Assert(gd); /* keep Coverity happy */
dNumGroups = 0;
/*
* We treat this as a knapsack problem: the knapsack capacity
* represents work_mem, the item weights are the estimated memory
- * usage of the hashtables needed to implement a single rollup, and
- * we really ought to use the cost saving as the item value;
+ * usage of the hashtables needed to implement a single rollup,
+ * and we really ought to use the cost saving as the item value;
* however, currently the costs assigned to sort nodes don't
* reflect the comparison costs well, and so we treat all items as
* of equal value (each rollup we hash instead saves us one sort).
foreach(l, root->pcinfo_list)
{
- PartitionedChildRelInfo *pc = lfirst(l);
+ PartitionedChildRelInfo *pc = lfirst(l);
if (pc->parent_relid == rti)
{
* If the main target relation is a partitioned table, the
* following list contains the RT indexes of partitioned child
* relations including the root, which are not included in the
- * above list. We also keep RT indexes of the roots separately
- * to be identitied as such during the executor initialization.
+ * above list. We also keep RT indexes of the roots
+ * separately to be identitied as such during the executor
+ * initialization.
*/
if (splan->partitioned_rels != NIL)
{
list_copy(splan->partitioned_rels));
/* Remember where this root will be in the global list. */
splan->rootResultRelIndex =
- list_length(root->glob->rootResultRelations);
+ list_length(root->glob->rootResultRelations);
root->glob->rootResultRelations =
- lappend_int(root->glob->rootResultRelations,
+ lappend_int(root->glob->rootResultRelations,
linitial_int(splan->partitioned_rels));
}
}
newrc->waitPolicy = oldrc->waitPolicy;
/*
- * We mark RowMarks for partitioned child tables as parent RowMarks
- * so that the executor ignores them (except their existence means
- * that the child tables be locked using appropriate mode).
+ * We mark RowMarks for partitioned child tables as parent
+ * RowMarks so that the executor ignores them (except their
+ * existence means that the child tables be locked using
+ * appropriate mode).
*/
newrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE);
* parent RT index to the list of RT indexes of its partitioned child
* tables. When creating an Append or a ModifyTable path for the parent,
* we copy the child RT index list verbatim to the path so that it could
- * be carried over to the executor so that the latter could identify
- * the partitioned child tables.
+ * be carried over to the executor so that the latter could identify the
+ * partitioned child tables.
*/
if (partitioned_child_rels != NIL)
{
Relids required_outer, double *rows)
{
GatherMergePath *pathnode = makeNode(GatherMergePath);
- Cost input_startup_cost = 0;
- Cost input_total_cost = 0;
+ Cost input_startup_cost = 0;
+ Cost input_total_cost = 0;
Assert(subpath->parallel_safe);
Assert(pathkeys);
else
{
/* We'll need to insert a Sort node, so include cost for that */
- Path sort_path; /* dummy for result of cost_sort */
+ Path sort_path; /* dummy for result of cost_sort */
cost_sort(&sort_path,
root,
Index varno = rel->relid;
Relation relation;
TupleConstr *constr;
- List *pcqual;
+ List *pcqual;
/*
* We assume the relation has already been safely locked.
/*
* Pass top parent's relids down the inheritance hierarchy. If the parent
- * has top_parent_relids set, it's a direct or an indirect child of the top
- * parent indicated by top_parent_relids. By extension this child is also
- * an indirect child of that parent.
+ * has top_parent_relids set, it's a direct or an indirect child of the
+ * top parent indicated by top_parent_relids. By extension this child is
+ * also an indirect child of that parent.
*/
if (parent)
{
* Recursively transform the components of the tree.
*/
sostmt = castNode(SetOperationStmt,
- transformSetOperationTree(pstate, stmt, true, NULL));
+ transformSetOperationTree(pstate, stmt, true, NULL));
Assert(sostmt);
qry->setOperations = (Node *) sostmt;
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/*------
translator: %s is a SQL row locking clause such as FOR UPDATE */
- errmsg("%s cannot be applied to a named tuplestore",
- LCS_asString(lc->strength)),
+ errmsg("%s cannot be applied to a named tuplestore",
+ LCS_asString(lc->strength)),
parser_errposition(pstate, thisrel->location)));
break;
default:
static Node *transformJoinOnClause(ParseState *pstate, JoinExpr *j,
List *namespace);
static RangeTblEntry *getRTEForSpecialRelationTypes(ParseState *pstate,
- RangeVar *rv);
+ RangeVar *rv);
static RangeTblEntry *transformTableEntry(ParseState *pstate, RangeVar *r);
static RangeTblEntry *transformCTEReference(ParseState *pstate, RangeVar *r,
CommonTableExpr *cte, Index levelsup);
static RangeTblEntry *transformRangeFunction(ParseState *pstate,
RangeFunction *r);
static RangeTblEntry *transformRangeTableFunc(ParseState *pstate,
- RangeTableFunc *t);
+ RangeTableFunc *t);
static TableSampleClause *transformRangeTableSample(ParseState *pstate,
RangeTableSample *rts);
static Node *transformFromClauseItem(ParseState *pstate, Node *n,
/* Now create the lvar = rvar join condition */
e = makeSimpleA_Expr(AEXPR_OP, "=",
- (Node *) copyObject(lvar), (Node *) copyObject(rvar),
+ (Node *) copyObject(lvar), (Node *) copyObject(rvar),
-1);
/* Prepare to combine into an AND clause, if multiple join columns */
/* Transform and apply typecast to the row-generating expression ... */
Assert(rtf->rowexpr != NULL);
tf->rowexpr = coerce_to_specific_type(pstate,
- transformExpr(pstate, rtf->rowexpr, EXPR_KIND_FROM_FUNCTION),
+ transformExpr(pstate, rtf->rowexpr, EXPR_KIND_FROM_FUNCTION),
TEXTOID,
constructName);
assign_expr_collations(pstate, tf->rowexpr);
/* ... and to the document itself */
Assert(rtf->docexpr != NULL);
tf->docexpr = coerce_to_specific_type(pstate,
- transformExpr(pstate, rtf->docexpr, EXPR_KIND_FROM_FUNCTION),
+ transformExpr(pstate, rtf->docexpr, EXPR_KIND_FROM_FUNCTION),
docType,
constructName);
assign_expr_collations(pstate, tf->docexpr);
makeString(pstrdup(rawc->colname)));
/*
- * Determine the type and typmod for the new column. FOR
- * ORDINALITY columns are INTEGER per spec; the others are
- * user-specified.
+ * Determine the type and typmod for the new column. FOR ORDINALITY
+ * columns are INTEGER per spec; the others are user-specified.
*/
if (rawc->for_ordinality)
{
tf->coltypes = lappend_oid(tf->coltypes, typid);
tf->coltypmods = lappend_int(tf->coltypmods, typmod);
tf->colcollations = lappend_oid(tf->colcollations,
- type_is_collatable(typid) ? DEFAULT_COLLATION_OID : InvalidOid);
+ type_is_collatable(typid) ? DEFAULT_COLLATION_OID : InvalidOid);
/* Transform the PATH and DEFAULT expressions */
if (rawc->colexpr)
{
colexpr = coerce_to_specific_type(pstate,
- transformExpr(pstate, rawc->colexpr,
- EXPR_KIND_FROM_FUNCTION),
+ transformExpr(pstate, rawc->colexpr,
+ EXPR_KIND_FROM_FUNCTION),
TEXTOID,
constructName);
assign_expr_collations(pstate, colexpr);
if (rawc->coldefexpr)
{
coldefexpr = coerce_to_specific_type_typmod(pstate,
- transformExpr(pstate, rawc->coldefexpr,
- EXPR_KIND_FROM_FUNCTION),
+ transformExpr(pstate, rawc->coldefexpr,
+ EXPR_KIND_FROM_FUNCTION),
typid, typmod,
constructName);
assign_expr_collations(pstate, coldefexpr);
static RangeTblEntry *
getRTEForSpecialRelationTypes(ParseState *pstate, RangeVar *rv)
{
-
CommonTableExpr *cte;
Index levelsup;
RangeTblEntry *rte = NULL;
/* ROW() op ROW() is handled specially */
cmp = make_row_comparison_op(pstate,
a->name,
- copyObject(((RowExpr *) lexpr)->args),
+ copyObject(((RowExpr *) lexpr)->args),
((RowExpr *) rexpr)->args,
a->location);
}
*/
if (get_visible_ENR_metadata(pstate->p_queryEnv, relation->relname))
rel = NULL;
+
/*
* An unqualified name might have been meant as a reference to
* some not-yet-in-scope CTE. The bare "does not exist" message
default:
elog(ERROR, "unexpected enrtype: %d", enrmd->enrtype);
- return NULL; /* for fussy compilers */
+ return NULL; /* for fussy compilers */
}
/*
char **snamespace_p, char **sname_p)
{
ListCell *option;
- DefElem *nameEl = NULL;
+ DefElem *nameEl = NULL;
Oid snamespaceid;
char *snamespace;
char *sname;
* used by pg_dump. Else, generate a name.
*
* Although we use ChooseRelationName, it's not guaranteed that the
- * selected sequence name won't conflict; given sufficiently long
- * field names, two different serial columns in the same table could
- * be assigned the same sequence name, and we'd not notice since we
- * aren't creating the sequence quite yet. In practice this seems
- * quite unlikely to be a problem, especially since few people would
- * need two serial columns in one table.
+ * selected sequence name won't conflict; given sufficiently long field
+ * names, two different serial columns in the same table could be assigned
+ * the same sequence name, and we'd not notice since we aren't creating
+ * the sequence quite yet. In practice this seems quite unlikely to be a
+ * problem, especially since few people would need two serial columns in
+ * one table.
*/
foreach(option, seqoptions)
if (nameEl)
{
- RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg));
+ RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg));
+
snamespace = rv->schemaname;
sname = rv->relname;
seqoptions = list_delete_ptr(seqoptions, nameEl);
cxt->relation->relname, column->colname)));
/*
- * Build a CREATE SEQUENCE command to create the sequence object, and
- * add it to the list of things to be done before this CREATE/ALTER
- * TABLE.
+ * Build a CREATE SEQUENCE command to create the sequence object, and add
+ * it to the list of things to be done before this CREATE/ALTER TABLE.
*/
seqstmt = makeNode(CreateSeqStmt);
seqstmt->for_identity = for_identity;
seqstmt->sequence = makeRangeVar(snamespace, sname, -1);
seqstmt->options = seqoptions;
+
/*
* If a sequence data type was specified, add it to the options. Prepend
* to the list rather than append; in case a user supplied their own AS
seqstmt->options);
/*
- * If this is ALTER ADD COLUMN, make sure the sequence will be owned
- * by the table's owner. The current user might be someone else
- * (perhaps a superuser, or someone who's only a member of the owning
- * role), but the SEQUENCE OWNED BY mechanisms will bleat unless table
- * and sequence have exactly the same owning role.
+ * If this is ALTER ADD COLUMN, make sure the sequence will be owned by
+ * the table's owner. The current user might be someone else (perhaps a
+ * superuser, or someone who's only a member of the owning role), but the
+ * SEQUENCE OWNED BY mechanisms will bleat unless table and sequence have
+ * exactly the same owning role.
*/
if (cxt->rel)
seqstmt->ownerId = cxt->rel->rd_rel->relowner;
cxt->blist = lappend(cxt->blist, seqstmt);
/*
- * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence
- * as owned by this column, and add it to the list of things to be
- * done after this CREATE/ALTER TABLE.
+ * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence as
+ * owned by this column, and add it to the list of things to be done after
+ * this CREATE/ALTER TABLE.
*/
altseqstmt = makeNode(AlterSeqStmt);
altseqstmt->sequence = makeRangeVar(snamespace, sname, -1);
break;
case CONSTR_IDENTITY:
- {
- Type ctype;
- Oid typeOid;
+ {
+ Type ctype;
+ Oid typeOid;
- ctype = typenameType(cxt->pstate, column->typeName, NULL);
- typeOid = HeapTupleGetOid(ctype);
- ReleaseSysCache(ctype);
+ ctype = typenameType(cxt->pstate, column->typeName, NULL);
+ typeOid = HeapTupleGetOid(ctype);
+ ReleaseSysCache(ctype);
- if (saw_identity)
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("multiple identity specifications for column \"%s\" of table \"%s\"",
+ if (saw_identity)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("multiple identity specifications for column \"%s\" of table \"%s\"",
column->colname, cxt->relation->relname),
- parser_errposition(cxt->pstate,
- constraint->location)));
+ parser_errposition(cxt->pstate,
+ constraint->location)));
- generateSerialExtraStmts(cxt, column,
- typeOid, constraint->options, true,
- NULL, NULL);
+ generateSerialExtraStmts(cxt, column,
+ typeOid, constraint->options, true,
+ NULL, NULL);
- column->identity = constraint->generated_when;
- saw_identity = true;
- column->is_not_null = TRUE;
- break;
- }
+ column->identity = constraint->generated_when;
+ saw_identity = true;
+ column->is_not_null = TRUE;
+ break;
+ }
case CONSTR_CHECK:
cxt->ckconstraints = lappend(cxt->ckconstraints, constraint);
if (attribute->attidentity &&
(table_like_clause->options & CREATE_TABLE_LIKE_IDENTITY))
{
- Oid seq_relid;
+ Oid seq_relid;
List *seq_options;
/*
stmt->objtype = OBJECT_COLUMN;
stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname),
- makeString(cxt->relation->relname),
+ makeString(cxt->relation->relname),
makeString(def->colname));
stmt->comment = comment;
stmt->objtype = OBJECT_TABCONSTRAINT;
stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname),
- makeString(cxt->relation->relname),
+ makeString(cxt->relation->relname),
makeString(n->conname));
stmt->comment = comment;
* change the data type of the sequence.
*/
attnum = get_attnum(relid, cmd->name);
- /* if attribute not found, something will error about it later */
+
+ /*
+ * if attribute not found, something will error about it
+ * later
+ */
if (attnum != InvalidAttrNumber && get_attidentity(relid, attnum))
{
Oid seq_relid = getOwnedSequence(relid, attnum);
AlterSeqStmt *altseqstmt = makeNode(AlterSeqStmt);
altseqstmt->sequence = makeRangeVar(get_namespace_name(get_rel_namespace(seq_relid)),
- get_rel_name(seq_relid),
+ get_rel_name(seq_relid),
-1);
altseqstmt->options = list_make1(makeDefElem("as", (Node *) makeTypeNameFromOid(typeOid, -1), -1));
altseqstmt->for_identity = true;
case AT_AddIdentity:
{
- Constraint *def = castNode(Constraint, cmd->def);
- ColumnDef *newdef = makeNode(ColumnDef);
+ Constraint *def = castNode(Constraint, cmd->def);
+ ColumnDef *newdef = makeNode(ColumnDef);
AttrNumber attnum;
newdef->colname = cmd->name;
cmd->def = (Node *) newdef;
attnum = get_attnum(relid, cmd->name);
- /* if attribute not found, something will error about it later */
+
+ /*
+ * if attribute not found, something will error about it
+ * later
+ */
if (attnum != InvalidAttrNumber)
generateSerialExtraStmts(&cxt, newdef,
get_atttype(relid, attnum),
*/
foreach(lc, castNode(List, cmd->def))
{
- DefElem *def = lfirst_node(DefElem, lc);
+ DefElem *def = lfirst_node(DefElem, lc);
if (strcmp(def->defname, "generated") == 0)
newdef = lappend(newdef, def);
seqstmt = makeNode(AlterSeqStmt);
seq_relid = linitial_oid(seqlist);
seqstmt->sequence = makeRangeVar(get_namespace_name(get_rel_namespace(seq_relid)),
- get_rel_name(seq_relid), -1);
+ get_rel_name(seq_relid), -1);
seqstmt->options = newseqopts;
seqstmt->for_identity = true;
seqstmt->missing_ok = false;
cxt.alist = lappend(cxt.alist, seqstmt);
}
}
- /* If column was not found or was not an identity column, we
- * just let the ALTER TABLE command error out later. */
+
+ /*
+ * If column was not found or was not an identity column,
+ * we just let the ALTER TABLE command error out later.
+ */
cmd->def = (Node *) newdef;
newcmds = lappend(newcmds, cmd);
else if (seen_unbounded)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot specify finite value after UNBOUNDED"),
- parser_errposition(pstate, exprLocation((Node *) ldatum))));
+ errmsg("cannot specify finite value after UNBOUNDED"),
+ parser_errposition(pstate, exprLocation((Node *) ldatum))));
}
seen_unbounded = false;
foreach(cell1, spec->upperdatums)
else if (seen_unbounded)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot specify finite value after UNBOUNDED"),
- parser_errposition(pstate, exprLocation((Node *) rdatum))));
+ errmsg("cannot specify finite value after UNBOUNDED"),
+ parser_errposition(pstate, exprLocation((Node *) rdatum))));
}
i = j = 0;
* Attempt to create a new unnamed semaphore.
*/
static void
-PosixSemaphoreCreate(sem_t * sem)
+PosixSemaphoreCreate(sem_t *sem)
{
if (sem_init(sem, 1, 1) < 0)
elog(FATAL, "sem_init failed: %m");
* PosixSemaphoreKill - removes a semaphore
*/
static void
-PosixSemaphoreKill(sem_t * sem)
+PosixSemaphoreKill(sem_t *sem)
{
#ifdef USE_NAMED_POSIX_SEMAPHORES
/* Got to use sem_close for named semaphores */
{
RegisteredBgWorker *rw;
BackgroundWorkerSlot *slot;
- int notify_pid;
+ int notify_pid;
rw = slist_container(RegisteredBgWorker, rw_lnode, cur->cur);
* check whether there has been any WAL inserted since the last time
* we've logged a running xacts.
*
- * We do this logging in the bgwriter as it is the only process that is
- * run regularly and returns to its mainloop all the time. E.g.
+ * We do this logging in the bgwriter as it is the only process that
+ * is run regularly and returns to its mainloop all the time. E.g.
* Checkpointer, when active, is barely ever in its mainloop and thus
* makes it hard to log regularly.
*/
*/
rc = WaitLatch(MyLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
- BgWriterDelay /* ms */, WAIT_EVENT_BGWRITER_MAIN);
+ BgWriterDelay /* ms */ , WAIT_EVENT_BGWRITER_MAIN);
/*
* If no latch event and BgBufferSync says nothing's happening, extend
rc = WaitLatch(MyLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
- cur_timeout * 1000L /* convert to ms */,
+ cur_timeout * 1000L /* convert to ms */ ,
WAIT_EVENT_CHECKPOINTER_MAIN);
/*
*/
typedef struct TabStatHashEntry
{
- Oid t_id;
- PgStat_TableStatus* tsa_entry;
+ Oid t_id;
+ PgStat_TableStatus *tsa_entry;
} TabStatHashEntry;
/*
static PgStat_TableStatus *
get_tabstat_entry(Oid rel_id, bool isshared)
{
- TabStatHashEntry* hash_entry;
+ TabStatHashEntry *hash_entry;
PgStat_TableStatus *entry;
TabStatusArray *tsa;
- bool found;
+ bool found;
/*
* Create hash table if we don't have it already.
*/
if (pgStatTabHash == NULL)
{
- HASHCTL ctl;
+ HASHCTL ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
PgStat_TableStatus *
find_tabstat_entry(Oid rel_id)
{
- TabStatHashEntry* hash_entry;
+ TabStatHashEntry *hash_entry;
/* If hashtable doesn't exist, there are no entries at all */
- if(!pgStatTabHash)
+ if (!pgStatTabHash)
return NULL;
hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL);
- if(!hash_entry)
+ if (!hash_entry)
return NULL;
/* Note that this step could also return NULL, but that's correct */
break;
default:
elog(FATAL, "unrecognized process type: %d",
- (int) MyAuxProcType);
+ (int) MyAuxProcType);
proc_exit(1);
}
}
/* We have userid for client-backends, wal-sender and bgworker processes */
if (beentry->st_backendType == B_BACKEND
- || beentry->st_backendType == B_WAL_SENDER
- || beentry->st_backendType == B_BG_WORKER)
+ || beentry->st_backendType == B_WAL_SENDER
+ || beentry->st_backendType == B_BG_WORKER)
beentry->st_userid = GetSessionUserId();
else
beentry->st_userid = InvalidOid;
break;
case PG_WAIT_ACTIVITY:
{
- WaitEventActivity w = (WaitEventActivity) wait_event_info;
+ WaitEventActivity w = (WaitEventActivity) wait_event_info;
event_name = pgstat_get_wait_activity(w);
break;
}
case PG_WAIT_CLIENT:
{
- WaitEventClient w = (WaitEventClient) wait_event_info;
+ WaitEventClient w = (WaitEventClient) wait_event_info;
event_name = pgstat_get_wait_client(w);
break;
break;
case PG_WAIT_IPC:
{
- WaitEventIPC w = (WaitEventIPC) wait_event_info;
+ WaitEventIPC w = (WaitEventIPC) wait_event_info;
event_name = pgstat_get_wait_ipc(w);
break;
}
case PG_WAIT_TIMEOUT:
{
- WaitEventTimeout w = (WaitEventTimeout) wait_event_info;
+ WaitEventTimeout w = (WaitEventTimeout) wait_event_info;
event_name = pgstat_get_wait_timeout(w);
break;
case WAIT_EVENT_LOGICAL_APPLY_MAIN:
event_name = "LogicalApplyMain";
break;
- /* no default case, so that compiler will warn */
+ /* no default case, so that compiler will warn */
}
return event_name;
case WAIT_EVENT_WAL_SENDER_WRITE_DATA:
event_name = "WalSenderWriteData";
break;
- /* no default case, so that compiler will warn */
+ /* no default case, so that compiler will warn */
}
return event_name;
case WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE:
event_name = "LogicalSyncStateChange";
break;
- /* no default case, so that compiler will warn */
+ /* no default case, so that compiler will warn */
}
return event_name;
case WAIT_EVENT_RECOVERY_APPLY_DELAY:
event_name = "RecoveryApplyDelay";
break;
- /* no default case, so that compiler will warn */
+ /* no default case, so that compiler will warn */
}
return event_name;
return backendDesc;
}
+
/* ------------------------------------------------------------
* Local support functions follow
* ------------------------------------------------------------
wr = WaitLatchOrSocket(MyLatch,
WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT,
pgStatSock,
- 2 * 1000L /* msec */,
+ 2 * 1000L /* msec */ ,
WAIT_EVENT_PGSTAT_MAIN);
#endif
ereport(LOG,
(errcode_for_file_access(),
errmsg("could not remove file \"%s\": %m",
- LOG_METAINFO_DATAFILE)));
+ LOG_METAINFO_DATAFILE)));
/*
* If enabled, start up syslogger collection subprocess
int exitstatus) /* child's exit status */
{
char namebuf[MAXPGPATH];
- slist_mutable_iter iter;
+ slist_mutable_iter iter;
slist_foreach_modify(iter, &BackgroundWorkerList)
{
rw->rw_backend = NULL;
rw->rw_pid = 0;
rw->rw_child_slot = 0;
- ReportBackgroundWorkerExit(&iter); /* report child death */
+ ReportBackgroundWorkerExit(&iter); /* report child death */
LogChildExit(EXIT_STATUS_0(exitstatus) ? DEBUG1 : LOG,
namebuf, pid, exitstatus);
#ifdef HAVE_STRONG_RANDOM
return pg_strong_random((char *) cancel_key, sizeof(int32));
#else
+
/*
* If built with --disable-strong-random, use plain old erand48.
*
- * We cannot use pg_backend_random() in postmaster, because it stores
- * its state in shared memory.
+ * We cannot use pg_backend_random() in postmaster, because it stores its
+ * state in shared memory.
*/
static unsigned short seed[3];
if (canAcceptConnections() == CAC_OK)
{
/*
- * Compute the cancel key that will be assigned to this session.
- * We probably don't need cancel keys for autovac workers, but
- * we'd better have something random in the field to prevent
- * unfriendly people from sending cancels to them.
+ * Compute the cancel key that will be assigned to this session. We
+ * probably don't need cancel keys for autovac workers, but we'd
+ * better have something random in the field to prevent unfriendly
+ * people from sending cancels to them.
*/
if (!RandomCancelKey(&MyCancelKey))
{
static void
update_metainfo_datafile(void)
{
- FILE *fh;
+ FILE *fh;
if (!(Log_destination & LOG_DESTINATION_STDERR) &&
!(Log_destination & LOG_DESTINATION_CSVLOG))
ereport(LOG,
(errcode_for_file_access(),
errmsg("could not remove file \"%s\": %m",
- LOG_METAINFO_DATAFILE)));
+ LOG_METAINFO_DATAFILE)));
return;
}
ereport(LOG,
(errcode_for_file_access(),
errmsg("could not open file \"%s\": %m",
- LOG_METAINFO_DATAFILE_TMP)));
+ LOG_METAINFO_DATAFILE_TMP)));
return;
}
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write file \"%s\": %m",
+ errmsg("could not write file \"%s\": %m",
LOG_METAINFO_DATAFILE_TMP)));
fclose(fh);
return;
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write file \"%s\": %m",
+ errmsg("could not write file \"%s\": %m",
LOG_METAINFO_DATAFILE_TMP)));
fclose(fh);
return;
if (rename(LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE) != 0)
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not rename file \"%s\" to \"%s\": %m",
- LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE)));
+ errmsg("could not rename file \"%s\" to \"%s\": %m",
+ LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE)));
}
/* --------------------------------
static void sendFileWithContent(const char *filename, const char *content);
static int64 _tarWriteHeader(const char *filename, const char *linktarget,
struct stat * statbuf, bool sizeonly);
-static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
- bool sizeonly);
+static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf,
+ bool sizeonly);
static void send_int8_string(StringInfoData *buf, int64 intval);
static void SendBackupHeader(List *tablespaces);
static void base_backup_cleanup(int code, Datum arg);
{
/*
* Skip temporary statistics files. PG_STAT_TMP_DIR must be skipped even
- * when stats_temp_directory is set because PGSS_TEXT_FILE is always created
- * there.
+ * when stats_temp_directory is set because PGSS_TEXT_FILE is always
+ * created there.
*/
PG_STAT_TMP_DIR,
/*
- * It is generally not useful to backup the contents of this directory even
- * if the intention is to restore to another master. See backup.sgml for a
- * more detailed description.
+ * It is generally not useful to backup the contents of this directory
+ * even if the intention is to restore to another master. See backup.sgml
+ * for a more detailed description.
*/
"pg_replslot",
dir = AllocateDir("pg_wal");
if (!dir)
ereport(ERROR,
- (errmsg("could not open directory \"%s\": %m", "pg_wal")));
+ (errmsg("could not open directory \"%s\": %m", "pg_wal")));
while ((de = ReadDir(dir, "pg_wal")) != NULL)
{
/* Does it look like a WAL segment, and is it in the range? */
qsort(walFiles, nWalFiles, sizeof(char *), compareWalFileNames);
/*
- * There must be at least one xlog file in the pg_wal directory,
- * since we are doing backup-including-xlog.
+ * There must be at least one xlog file in the pg_wal directory, since
+ * we are doing backup-including-xlog.
*/
if (nWalFiles < 1)
ereport(ERROR,
if (strcmp(de->d_name, excludeDirContents[excludeIdx]) == 0)
{
elog(DEBUG1, "contents of directory \"%s\" excluded from backup", de->d_name);
- size += _tarWriteDir(pathbuf, basepathlen, &statbuf, sizeonly);
+ size += _tarWriteDir(pathbuf, basepathlen, &statbuf, sizeonly);
excludeFound = true;
break;
}
if (!sizeonly)
{
rc = tarCreateHeader(h, filename, linktarget, statbuf->st_size,
- statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
+ statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
statbuf->st_mtime);
switch (rc)
break;
case TAR_SYMLINK_TOO_LONG:
ereport(ERROR,
- (errmsg("symbolic link target too long for tar format: "
- "file name \"%s\", target \"%s\"",
- filename, linktarget)));
+ (errmsg("symbolic link target too long for tar format: "
+ "file name \"%s\", target \"%s\"",
+ filename, linktarget)));
break;
default:
elog(ERROR, "unrecognized tar error: %d", rc);
* write it as a directory anyway.
*/
static int64
-_tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
+_tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf,
bool sizeonly)
{
/* If symlink, write it as a directory anyway */
struct WalReceiverConn
{
/* Current connection to the primary, if any */
- PGconn *streamConn;
+ PGconn *streamConn;
/* Used to remember if the connection is logical or physical */
- bool logical;
+ bool logical;
/* Buffer for currently read records */
- char *recvBuf;
+ char *recvBuf;
};
/* Prototypes for interface functions */
static WalReceiverConn *libpqrcv_connect(const char *conninfo,
- bool logical, const char *appname,
- char **err);
+ bool logical, const char *appname,
+ char **err);
static void libpqrcv_check_conninfo(const char *conninfo);
static char *libpqrcv_get_conninfo(WalReceiverConn *conn);
static char *libpqrcv_identify_system(WalReceiverConn *conn,
- TimeLineID *primary_tli,
- int *server_version);
+ TimeLineID *primary_tli,
+ int *server_version);
static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
TimeLineID tli, char **filename,
char **content, int *len);
static bool libpqrcv_startstreaming(WalReceiverConn *conn,
- const WalRcvStreamOptions *options);
+ const WalRcvStreamOptions *options);
static void libpqrcv_endstreaming(WalReceiverConn *conn,
- TimeLineID *next_tli);
-static int libpqrcv_receive(WalReceiverConn *conn, char **buffer,
- pgsocket *wait_fd);
+ TimeLineID *next_tli);
+static int libpqrcv_receive(WalReceiverConn *conn, char **buffer,
+ pgsocket *wait_fd);
static void libpqrcv_send(WalReceiverConn *conn, const char *buffer,
- int nbytes);
+ int nbytes);
static char *libpqrcv_create_slot(WalReceiverConn *conn,
- const char *slotname,
- bool temporary,
- CRSSnapshotAction snapshot_action,
- XLogRecPtr *lsn);
+ const char *slotname,
+ bool temporary,
+ CRSSnapshotAction snapshot_action,
+ XLogRecPtr *lsn);
static WalRcvExecResult *libpqrcv_exec(WalReceiverConn *conn,
- const char *query,
- const int nRetTypes,
- const Oid *retTypes);
+ const char *query,
+ const int nRetTypes,
+ const Oid *retTypes);
static void libpqrcv_disconnect(WalReceiverConn *conn);
static WalReceiverFunctionsType PQWalReceiverFunctions = {
conn = palloc0(sizeof(WalReceiverConn));
conn->streamConn = PQconnectStartParams(keys, vals,
- /* expand_dbname = */ true);
+ /* expand_dbname = */ true);
if (PQstatus(conn->streamConn) == CONNECTION_BAD)
{
*err = pchomp(PQerrorMessage(conn->streamConn));
static void
libpqrcv_check_conninfo(const char *conninfo)
{
- PQconninfoOption *opts = NULL;
- char *err = NULL;
+ PQconninfoOption *opts = NULL;
+ char *err = NULL;
opts = PQconninfoParse(conninfo, &err);
if (opts == NULL)
*/
if (options->logical)
{
- char *pubnames_str;
- List *pubnames;
- char *pubnames_literal;
+ char *pubnames_str;
+ List *pubnames;
+ char *pubnames_literal;
appendStringInfoString(&cmd, " (");
* next timeline's ID, or just CommandComplete if the server was shut
* down.
*
- * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT
- * is also possible in case we aborted the copy in mid-stream.
+ * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT is
+ * also possible in case we aborted the copy in mid-stream.
*/
res = PQgetResult(conn->streamConn);
if (PQresultStatus(res) == PGRES_TUPLES_OK)
/*
* PQexec() silently discards any prior query results on the connection.
- * This is not required for this function as it's expected that the
- * caller (which is this library in all cases) will behave correctly and
- * we don't have to be backwards compatible with old libpq.
+ * This is not required for this function as it's expected that the caller
+ * (which is this library in all cases) will behave correctly and we don't
+ * have to be backwards compatible with old libpq.
*/
/*
bool temporary, CRSSnapshotAction snapshot_action,
XLogRecPtr *lsn)
{
- PGresult *res;
- StringInfoData cmd;
- char *snapshot;
+ PGresult *res;
+ StringInfoData cmd;
+ char *snapshot;
initStringInfo(&cmd);
}
*lsn = DatumGetLSN(DirectFunctionCall1Coll(pg_lsn_in, InvalidOid,
- CStringGetDatum(PQgetvalue(res, 0, 1))));
+ CStringGetDatum(PQgetvalue(res, 0, 1))));
if (!PQgetisnull(res, 0, 2))
snapshot = pstrdup(PQgetvalue(res, 0, 2));
else
*/
static void
libpqrcv_processTuples(PGresult *pgres, WalRcvExecResult *walres,
- const int nRetTypes, const Oid *retTypes)
+ const int nRetTypes, const Oid *retTypes)
{
- int tupn;
- int coln;
- int nfields = PQnfields(pgres);
- HeapTuple tuple;
- AttInMetadata *attinmeta;
- MemoryContext rowcontext;
- MemoryContext oldcontext;
+ int tupn;
+ int coln;
+ int nfields = PQnfields(pgres);
+ HeapTuple tuple;
+ AttInMetadata *attinmeta;
+ MemoryContext rowcontext;
+ MemoryContext oldcontext;
/* Make sure we got expected number of fields. */
if (nfields != nRetTypes)
/* Process returned rows. */
for (tupn = 0; tupn < PQntuples(pgres); tupn++)
{
- char *cstrs[MaxTupleAttributeNumber];
+ char *cstrs[MaxTupleAttributeNumber];
CHECK_FOR_INTERRUPTS();
if (MyDatabaseId == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("the query interface requires a database connection")));
+ errmsg("the query interface requires a database connection")));
pgres = libpqrcv_PQexec(conn->streamConn, query);
walres->status = WALRCV_OK_COMMAND;
break;
- /* Empty query is considered error. */
+ /* Empty query is considered error. */
case PGRES_EMPTY_QUERY:
walres->status = WALRCV_ERROR;
walres->err = _("empty query");
static char *
stringlist_to_identifierstr(PGconn *conn, List *strings)
{
- ListCell *lc;
+ ListCell *lc;
StringInfoData res;
- bool first = true;
+ bool first = true;
initStringInfo(&res);
- foreach (lc, strings)
+ foreach(lc, strings)
{
- char *val = strVal(lfirst(lc));
- char *val_escaped;
+ char *val = strVal(lfirst(lc));
+ char *val_escaped;
if (first)
first = false;
/* max sleep time between cycles (3min) */
#define DEFAULT_NAPTIME_PER_CYCLE 180000L
-int max_logical_replication_workers = 4;
-int max_sync_workers_per_subscription = 2;
+int max_logical_replication_workers = 4;
+int max_sync_workers_per_subscription = 2;
LogicalRepWorker *MyLogicalRepWorker = NULL;
pid_t launcher_pid;
/* Background workers. */
- LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER];
+ LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER];
} LogicalRepCtxStruct;
LogicalRepCtxStruct *LogicalRepCtx;
volatile sig_atomic_t got_SIGHUP = false;
volatile sig_atomic_t got_SIGTERM = false;
-static bool on_commit_launcher_wakeup = false;
+static bool on_commit_launcher_wakeup = false;
-Datum pg_stat_get_subscription(PG_FUNCTION_ARGS);
+Datum pg_stat_get_subscription(PG_FUNCTION_ARGS);
/*
while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection)))
{
Form_pg_subscription subform = (Form_pg_subscription) GETSTRUCT(tup);
- Subscription *sub;
- MemoryContext oldcxt;
+ Subscription *sub;
+ MemoryContext oldcxt;
/*
* Allocate our results in the caller's context, not the
LogicalRepWorker *
logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
{
- int i;
- LogicalRepWorker *res = NULL;
+ int i;
+ LogicalRepWorker *res = NULL;
Assert(LWLockHeldByMe(LogicalRepWorkerLock));
/* Search for attached worker for a given subscription id. */
for (i = 0; i < max_logical_replication_workers; i++)
{
- LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+ LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
if (w->in_use && w->subid == subid && w->relid == relid &&
(!only_running || w->proc))
{
logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid,
Oid relid)
{
- BackgroundWorker bgw;
+ BackgroundWorker bgw;
BackgroundWorkerHandle *bgw_handle;
- int i;
- int slot = 0;
- LogicalRepWorker *worker = NULL;
- int nsyncworkers;
- TimestampTz now;
+ int i;
+ int slot = 0;
+ LogicalRepWorker *worker = NULL;
+ int nsyncworkers;
+ TimestampTz now;
ereport(LOG,
- (errmsg("starting logical replication worker for subscription \"%s\"",
- subname)));
+ (errmsg("starting logical replication worker for subscription \"%s\"",
+ subname)));
/* Report this after the initial starting message for consistency. */
if (max_replication_slots == 0)
*/
if (worker == NULL || nsyncworkers >= max_sync_workers_per_subscription)
{
- bool did_cleanup = false;
+ bool did_cleanup = false;
for (i = 0; i < max_logical_replication_workers; i++)
{
/* Register the new dynamic worker. */
memset(&bgw, 0, sizeof(bgw));
- bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
+ bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
BGWORKER_BACKEND_DATABASE_CONNECTION;
bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
ereport(WARNING,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("out of background worker slots"),
- errhint("You might need to increase max_worker_processes.")));
+ errhint("You might need to increase max_worker_processes.")));
return;
}
logicalrep_worker_stop(Oid subid, Oid relid)
{
LogicalRepWorker *worker;
- uint16 generation;
+ uint16 generation;
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
*/
while (worker->in_use && !worker->proc)
{
- int rc;
+ int rc;
LWLockRelease(LogicalRepWorkerLock);
/* ... and wait for it to die. */
for (;;)
{
- int rc;
+ int rc;
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
if (!worker->proc || worker->generation != generation)
void
logicalrep_worker_wakeup(Oid subid, Oid relid)
{
- LogicalRepWorker *worker;
+ LogicalRepWorker *worker;
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
worker = logicalrep_worker_find(subid, relid, true);
{
LWLockRelease(LogicalRepWorkerLock);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical replication worker slot %d is empty, cannot attach",
- slot)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical replication worker slot %d is empty, cannot attach",
+ slot)));
}
if (MyLogicalRepWorker->proc)
{
LWLockRelease(LogicalRepWorkerLock);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical replication worker slot %d is already used by "
- "another worker, cannot attach", slot)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("logical replication worker slot %d is already used by "
+ "another worker, cannot attach", slot)));
}
MyLogicalRepWorker->proc = MyProc;
void
logicalrep_worker_sigterm(SIGNAL_ARGS)
{
- int save_errno = errno;
+ int save_errno = errno;
got_SIGTERM = true;
void
logicalrep_worker_sighup(SIGNAL_ARGS)
{
- int save_errno = errno;
+ int save_errno = errno;
got_SIGHUP = true;
int
logicalrep_sync_worker_count(Oid subid)
{
- int i;
- int res = 0;
+ int i;
+ int res = 0;
Assert(LWLockHeldByMe(LogicalRepWorkerLock));
/* Search for attached worker for a given subscription id. */
for (i = 0; i < max_logical_replication_workers; i++)
{
- LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+ LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
if (w->subid == subid && OidIsValid(w->relid))
res++;
}
return;
memset(&bgw, 0, sizeof(bgw));
- bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
+ bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
BGWORKER_BACKEND_DATABASE_CONNECTION;
bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
if (!found)
{
- int slot;
+ int slot;
memset(LogicalRepCtx, 0, ApplyLauncherShmemSize());
void
ApplyLauncherMain(Datum main_arg)
{
- TimestampTz last_start_time = 0;
+ TimestampTz last_start_time = 0;
ereport(DEBUG1,
(errmsg("logical replication launcher started")));
int rc;
List *sublist;
ListCell *lc;
- MemoryContext subctx;
- MemoryContext oldctx;
- TimestampTz now;
- long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
+ MemoryContext subctx;
+ MemoryContext oldctx;
+ TimestampTz now;
+ long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
now = GetCurrentTimestamp();
{
/* Use temporary context for the database list and worker info. */
subctx = AllocSetContextCreate(TopMemoryContext,
- "Logical Replication Launcher sublist",
+ "Logical Replication Launcher sublist",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/* Start the missing workers for enabled subscriptions. */
foreach(lc, sublist)
{
- Subscription *sub = (Subscription *) lfirst(lc);
- LogicalRepWorker *w;
+ Subscription *sub = (Subscription *) lfirst(lc);
+ LogicalRepWorker *w;
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
w = logicalrep_worker_find(sub->oid, InvalidOid, false);
{
/*
* The wait in previous cycle was interrupted in less than
- * wal_retrieve_retry_interval since last worker was started,
- * this usually means crash of the worker, so we should retry
- * in wal_retrieve_retry_interval again.
+ * wal_retrieve_retry_interval since last worker was started, this
+ * usually means crash of the worker, so we should retry in
+ * wal_retrieve_retry_interval again.
*/
wait_time = wal_retrieve_retry_interval;
}
Datum values[PG_STAT_GET_SUBSCRIPTION_COLS];
bool nulls[PG_STAT_GET_SUBSCRIPTION_COLS];
int worker_pid;
- LogicalRepWorker worker;
+ LogicalRepWorker worker;
memcpy(&worker, &LogicalRepCtx->workers[i],
sizeof(LogicalRepWorker));
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
- /* If only a single subscription was requested, and we found it, break. */
+ /*
+ * If only a single subscription was requested, and we found it,
+ * break.
+ */
if (OidIsValid(subid))
break;
}
XLogPageReadCB read_page,
LogicalOutputPluginWriterPrepareWrite prepare_write,
LogicalOutputPluginWriterWrite do_write,
- LogicalOutputPluginWriterUpdateProgress update_progress)
+ LogicalOutputPluginWriterUpdateProgress update_progress)
{
ReplicationSlot *slot;
MemoryContext context,
* plugin contains the name of the output plugin
* output_plugin_options contains options passed to the output plugin
* read_page, prepare_write, do_write, update_progress
- * callbacks that have to be filled to perform the use-case dependent,
- * actual, work.
+ * callbacks that have to be filled to perform the use-case dependent,
+ * actual, work.
*
* Needs to be called while in a memory context that's at least as long lived
* as the decoding context because further memory contexts will be created
XLogPageReadCB read_page,
LogicalOutputPluginWriterPrepareWrite prepare_write,
LogicalOutputPluginWriterWrite do_write,
- LogicalOutputPluginWriterUpdateProgress update_progress)
+ LogicalOutputPluginWriterUpdateProgress update_progress)
{
TransactionId xmin_horizon = InvalidTransactionId;
ReplicationSlot *slot;
if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm)
{
LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr);
+
/*
* If only the confirmed_flush_lsn has changed the slot won't get
- * marked as dirty by the above. Callers on the walsender interface
- * are expected to keep track of their own progress and don't need
- * it written out. But SQL-interface users cannot specify their own
- * start positions and it's harder for them to keep track of their
- * progress, so we should make more of an effort to save it for them.
+ * marked as dirty by the above. Callers on the walsender
+ * interface are expected to keep track of their own progress and
+ * don't need it written out. But SQL-interface users cannot
+ * specify their own start positions and it's harder for them to
+ * keep track of their progress, so we should make more of an
+ * effort to save it for them.
*
- * Dirty the slot so it's written out at the next checkpoint. We'll
- * still lose its position on crash, as documented, but it's better
- * than always losing the position even on clean restart.
+ * Dirty the slot so it's written out at the next checkpoint.
+ * We'll still lose its position on crash, as documented, but it's
+ * better than always losing the position even on clean restart.
*/
ReplicationSlotMarkDirty();
}
static void logicalrep_write_attrs(StringInfo out, Relation rel);
static void logicalrep_write_tuple(StringInfo out, Relation rel,
- HeapTuple tuple);
+ HeapTuple tuple);
static void logicalrep_read_attrs(StringInfo in, LogicalRepRelation *rel);
static void logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple);
logicalrep_write_commit(StringInfo out, ReorderBufferTXN *txn,
XLogRecPtr commit_lsn)
{
- uint8 flags = 0;
+ uint8 flags = 0;
pq_sendbyte(out, 'C'); /* sending COMMIT */
logicalrep_read_commit(StringInfo in, LogicalRepCommitData *commit_data)
{
/* read flags (unused for now) */
- uint8 flags = pq_getmsgbyte(in);
+ uint8 flags = pq_getmsgbyte(in);
if (flags != 0)
elog(ERROR, "unrecognized flags %u in commit message", flags);
* Write INSERT to the output stream.
*/
void
-logicalrep_write_insert(StringInfo out, Relation rel, HeapTuple newtuple)
+logicalrep_write_insert(StringInfo out, Relation rel, HeapTuple newtuple)
{
pq_sendbyte(out, 'I'); /* action INSERT */
logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup)
{
char action;
- LogicalRepRelId relid;
+ LogicalRepRelId relid;
/* read the relation id */
relid = pq_getmsgint(in, 4);
*/
void
logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple,
- HeapTuple newtuple)
+ HeapTuple newtuple)
{
pq_sendbyte(out, 'U'); /* action UPDATE */
if (oldtuple != NULL)
{
if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
- pq_sendbyte(out, 'O'); /* old tuple follows */
+ pq_sendbyte(out, 'O'); /* old tuple follows */
else
- pq_sendbyte(out, 'K'); /* old key follows */
+ pq_sendbyte(out, 'K'); /* old key follows */
logicalrep_write_tuple(out, rel, oldtuple);
}
LogicalRepTupleData *newtup)
{
char action;
- LogicalRepRelId relid;
+ LogicalRepRelId relid;
/* read the relation id */
relid = pq_getmsgint(in, 4);
logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup)
{
char action;
- LogicalRepRelId relid;
+ LogicalRepRelId relid;
/* read the relation id */
relid = pq_getmsgint(in, 4);
LogicalRepRelation *
logicalrep_read_rel(StringInfo in)
{
- LogicalRepRelation *rel = palloc(sizeof(LogicalRepRelation));
+ LogicalRepRelation *rel = palloc(sizeof(LogicalRepRelation));
rel->remoteid = pq_getmsgint(in, 4);
if (isnull[i])
{
- pq_sendbyte(out, 'n'); /* null column */
+ pq_sendbyte(out, 'n'); /* null column */
continue;
}
else if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i]))
{
- pq_sendbyte(out, 'u'); /* unchanged toast column */
+ pq_sendbyte(out, 'u'); /* unchanged toast column */
continue;
}
switch (kind)
{
- case 'n': /* null */
+ case 'n': /* null */
tuple->values[i] = NULL;
tuple->changed[i] = true;
break;
- case 'u': /* unchanged column */
+ case 'u': /* unchanged column */
/* we don't receive the value of an unchanged column */
tuple->values[i] = NULL;
break;
- case 't': /* text formatted value */
+ case 't': /* text formatted value */
{
int len;
tuple->changed[i] = true;
- len = pq_getmsgint(in, 4); /* read length */
+ len = pq_getmsgint(in, 4); /* read length */
/* and data */
tuple->values[i] = palloc(len + 1);
for (i = 0; i < desc->natts; i++)
{
Form_pg_attribute att = desc->attrs[i];
- uint8 flags = 0;
+ uint8 flags = 0;
if (att->attisdropped)
continue;
pq_sendbyte(out, '\0');
else
{
- char *nspname = get_namespace_name(nspid);
+ char *nspname = get_namespace_name(nspid);
if (nspname == NULL)
elog(ERROR, "cache lookup failed for namespace %u",
#include "utils/memutils.h"
#include "utils/syscache.h"
-static MemoryContext LogicalRepRelMapContext = NULL;
+static MemoryContext LogicalRepRelMapContext = NULL;
-static HTAB *LogicalRepRelMap = NULL;
-static HTAB *LogicalRepTypMap = NULL;
+static HTAB *LogicalRepRelMap = NULL;
+static HTAB *LogicalRepTypMap = NULL;
static void logicalrep_typmap_invalidate_cb(Datum arg, int cacheid,
- uint32 hashvalue);
+ uint32 hashvalue);
/*
* Relcache invalidation callback for our relation map cache.
static void
logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
{
- LogicalRepRelMapEntry *entry;
+ LogicalRepRelMapEntry *entry;
/* Just to be sure. */
if (LogicalRepRelMap == NULL)
/* This will usually be small. */
LogicalRepTypMap = hash_create("logicalrep type map cache", 2, &ctl,
- HASH_ELEM | HASH_BLOBS |HASH_CONTEXT);
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
/* Watch for invalidation events. */
CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb,
if (remoterel->natts > 0)
{
- int i;
+ int i;
for (i = 0; i < remoterel->natts; i++)
pfree(remoterel->attnames[i]);
void
logicalrep_relmap_update(LogicalRepRelation *remoterel)
{
- MemoryContext oldctx;
- LogicalRepRelMapEntry *entry;
- bool found;
- int i;
+ MemoryContext oldctx;
+ LogicalRepRelMapEntry *entry;
+ bool found;
+ int i;
if (LogicalRepRelMap == NULL)
logicalrep_relmap_init();
static int
logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname)
{
- int i;
+ int i;
for (i = 0; i < remoterel->natts; i++)
{
LogicalRepRelMapEntry *
logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
{
- LogicalRepRelMapEntry *entry;
+ LogicalRepRelMapEntry *entry;
bool found;
if (LogicalRepRelMap == NULL)
Bitmapset *idkey;
TupleDesc desc;
LogicalRepRelation *remoterel;
- MemoryContext oldctx;
+ MemoryContext oldctx;
+
remoterel = &entry->remoterel;
/* Try to find and lock the relation by name. */
/*
* Build the mapping of local attribute numbers to remote attribute
- * numbers and validate that we don't miss any replicated columns
- * as that would result in potentially unwanted data loss.
+ * numbers and validate that we don't miss any replicated columns as
+ * that would result in potentially unwanted data loss.
*/
desc = RelationGetDescr(entry->localrel);
oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext);
found = 0;
for (i = 0; i < desc->natts; i++)
{
- int attnum = logicalrep_rel_att_by_name(remoterel,
- NameStr(desc->attrs[i]->attname));
+ int attnum = logicalrep_rel_att_by_name(remoterel,
+ NameStr(desc->attrs[i]->attname));
+
entry->attrmap[i] = attnum;
if (attnum >= 0)
found++;
if (found < remoterel->natts)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical replication target relation \"%s.%s\" is missing "
- "some replicated columns",
- remoterel->nspname, remoterel->relname)));
+ errmsg("logical replication target relation \"%s.%s\" is missing "
+ "some replicated columns",
+ remoterel->nspname, remoterel->relname)));
/*
* Check that replica identity matches. We allow for stricter replica
* but in the opposite scenario it will.
*
* Don't throw any error here just mark the relation entry as not
- * updatable, as replica identity is only for updates and deletes
- * but inserts can be replicated even without it.
+ * updatable, as replica identity is only for updates and deletes but
+ * inserts can be replicated even without it.
*/
entry->updatable = true;
idkey = RelationGetIndexAttrBitmap(entry->localrel,
{
idkey = RelationGetIndexAttrBitmap(entry->localrel,
INDEX_ATTR_BITMAP_PRIMARY_KEY);
+
/*
* If no replica identity index and no PK, the published table
* must have replica identity FULL.
i = -1;
while ((i = bms_next_member(idkey, i)) >= 0)
{
- int attnum = i + FirstLowInvalidHeapAttributeNumber;
+ int attnum = i + FirstLowInvalidHeapAttributeNumber;
if (!AttrNumberIsForUserDefinedAttr(attnum))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical replication target relation \"%s.%s\" uses "
- "system columns in REPLICA IDENTITY index",
- remoterel->nspname, remoterel->relname)));
+ errmsg("logical replication target relation \"%s.%s\" uses "
+ "system columns in REPLICA IDENTITY index",
+ remoterel->nspname, remoterel->relname)));
attnum = AttrNumberGetAttrOffset(attnum);
logicalrep_typmap_invalidate_cb(Datum arg, int cacheid, uint32 hashvalue)
{
HASH_SEQ_STATUS status;
- LogicalRepTyp *entry;
+ LogicalRepTyp *entry;
/* Just to be sure. */
if (LogicalRepTypMap == NULL)
void
logicalrep_typmap_update(LogicalRepTyp *remotetyp)
{
- MemoryContext oldctx;
- LogicalRepTyp *entry;
- bool found;
+ MemoryContext oldctx;
+ LogicalRepTyp *entry;
+ bool found;
if (LogicalRepTypMap == NULL)
logicalrep_relmap_init();
Oid
logicalrep_typmap_getid(Oid remoteid)
{
- LogicalRepTyp *entry;
- bool found;
- Oid nspoid;
+ LogicalRepTyp *entry;
+ bool found;
+ Oid nspoid;
/* Internal types are mapped directly. */
if (remoteid < FirstNormalObjectId)
* by the following graph describing the SnapBuild->state transitions:
*
* +-------------------------+
- * +----| START |-------------+
+ * +----| START |-------------+
* | +-------------------------+ |
* | | |
* | | |
* | | |
* | v |
* | +-------------------------+ v
- * | | BUILDING_SNAPSHOT |------------>|
+ * | | BUILDING_SNAPSHOT |------------>|
* | +-------------------------+ |
* | | |
* | | |
- * | running_xacts #2, xacts from #1 finished |
+ * | running_xacts #2, xacts from #1 finished |
* | | |
* | | |
* | v |
* | +-------------------------+ v
- * | | FULL_SNAPSHOT |------------>|
+ * | | FULL_SNAPSHOT |------------>|
* | +-------------------------+ |
* | | |
* running_xacts | saved snapshot
* with zero xacts | at running_xacts's lsn
* | | |
- * | running_xacts with xacts from #2 finished |
+ * | running_xacts with xacts from #2 finished |
* | | |
* | v |
* | +-------------------------+ |
TransactionId was_xmin;
TransactionId was_xmax;
- size_t was_xcnt; /* number of used xip entries */
- size_t was_xcnt_space; /* allocated size of xip */
- TransactionId *was_xip; /* running xacts array, xidComparator-sorted */
+ size_t was_xcnt; /* number of used xip entries */
+ size_t was_xcnt_space; /* allocated size of xip */
+ TransactionId *was_xip; /* running xacts array, xidComparator-sorted */
} was_running;
/*
{
if (newxcnt >= GetMaxSnapshotXidCount())
ereport(ERROR,
- (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("initial slot snapshot too large")));
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("initial slot snapshot too large")));
newxip[newxcnt++] = xid;
}
if (NormalTransactionIdFollows(subxid, xmax))
xmax = subxid;
}
+
/*
* If we're forcing timetravel we also need visibility information
* about subtransaction, so keep track of subtransaction's state, even
/*
* Adjust xmax of the snapshot builder, we only do that for committed,
- * catalog modifying, transactions, everything else isn't interesting
- * for us since we'll never look at the respective rows.
+ * catalog modifying, transactions, everything else isn't interesting for
+ * us since we'll never look at the respective rows.
*/
if (needs_timetravel &&
(!TransactionIdIsValid(builder->xmax) ||
running->oldestRunningXid);
/*
- * Increase shared memory limits, so vacuum can work on tuples we prevented
- * from being pruned till now.
+ * Increase shared memory limits, so vacuum can work on tuples we
+ * prevented from being pruned till now.
*/
LogicalIncreaseXminForSlot(lsn, running->oldestRunningXid);
* modifying transactions.
*
* c) First incrementally build a snapshot for catalog tuples
- * (BUILDING_SNAPSHOT), that requires all, already in-progress,
- * transactions to finish. Every transaction starting after that
- * (FULL_SNAPSHOT state), has enough information to be decoded. But
- * for older running transactions no viable snapshot exists yet, so
- * CONSISTENT will only be reached once all of those have finished.
+ * (BUILDING_SNAPSHOT), that requires all, already in-progress,
+ * transactions to finish. Every transaction starting after that
+ * (FULL_SNAPSHOT state), has enough information to be decoded. But
+ * for older running transactions no viable snapshot exists yet, so
+ * CONSISTENT will only be reached once all of those have finished.
* ---
*/
/* there won't be any state to cleanup */
return false;
}
+
/*
* c) transition from START to BUILDING_SNAPSHOT.
*
SnapBuildWaitSnapshot(running, running->nextXid);
}
+
/*
* c) transition from BUILDING_SNAPSHOT to FULL_SNAPSHOT.
*
SnapBuildStartNextPhaseAt(builder, running->nextXid);
ereport(LOG,
- (errmsg("logical decoding found initial consistent point at %X/%X",
- (uint32) (lsn >> 32), (uint32) lsn),
- errdetail("Waiting for transactions (approximately %d) older than %u to end.",
- running->xcnt, running->nextXid)));
+ (errmsg("logical decoding found initial consistent point at %X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail("Waiting for transactions (approximately %d) older than %u to end.",
+ running->xcnt, running->nextXid)));
SnapBuildWaitSnapshot(running, running->nextXid);
}
+
/*
* c) transition from FULL_SNAPSHOT to CONSISTENT.
*
*
* This isn't required for the correctness of decoding, but to:
* a) allow isolationtester to notice that we're currently waiting for
- * something.
+ * something.
* b) log a new xl_running_xacts record where it'd be helpful, without having
- * to write for bgwriter or checkpointer.
+ * to write for bgwriter or checkpointer.
* ---
*/
static void
TransactionId xid = running->xids[off];
/*
- * Upper layers should prevent that we ever need to wait on
- * ourselves. Check anyway, since failing to do so would either
- * result in an endless wait or an Assert() failure.
+ * Upper layers should prevent that we ever need to wait on ourselves.
+ * Check anyway, since failing to do so would either result in an
+ * endless wait or an Assert() failure.
*/
if (TransactionIdIsCurrentTransactionId(xid))
elog(ERROR, "waiting for ourselves");
char path[MAXPGPATH + 21];
/*
- * We start off with a minimum of the last redo pointer. No new replication
- * slot will start before that, so that's a safe upper bound for removal.
+ * We start off with a minimum of the last redo pointer. No new
+ * replication slot will start before that, so that's a safe upper bound
+ * for removal.
*/
redo = GetRedoRecPtr();
/*
* Exit routine for synchronization worker.
*/
-static void pg_attribute_noreturn()
+static void
+pg_attribute_noreturn()
finish_sync_worker(void)
{
/*
static bool
wait_for_sync_status_change(Oid relid, char origstate)
{
- int rc;
- char state = origstate;
+ int rc;
+ char state = origstate;
while (!got_SIGTERM)
{
- LogicalRepWorker *worker;
+ LogicalRepWorker *worker;
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
worker = logicalrep_worker_find(MyLogicalRepWorker->subid,
struct tablesync_start_time_mapping
{
Oid relid;
- TimestampTz last_start_time;
+ TimestampTz last_start_time;
};
static List *table_states = NIL;
static HTAB *last_start_times = NULL;
/* We need up to date sync state info for subscription tables here. */
if (!table_states_valid)
{
- MemoryContext oldctx;
- List *rstates;
- ListCell *lc;
+ MemoryContext oldctx;
+ List *rstates;
+ ListCell *lc;
SubscriptionRelState *rstate;
/* Clean the old list. */
started_tx = true;
/* Fetch all non-ready tables. */
- rstates = GetSubscriptionNotReadyRelations(MySubscription->oid);
+ rstates = GetSubscriptionNotReadyRelations(MySubscription->oid);
/* Allocate the tracking info in a permanent memory context. */
oldctx = MemoryContextSwitchTo(CacheMemoryContext);
last_start_times = hash_create("Logical replication table sync worker start times",
256, &ctl, HASH_ELEM | HASH_BLOBS);
}
+
/*
* Clean up the hash table when we're done with all tables (just to
* release the bit of memory).
/* Process all tables that are being synchronized. */
foreach(lc, table_states)
{
- SubscriptionRelState *rstate = (SubscriptionRelState *)lfirst(lc);
+ SubscriptionRelState *rstate = (SubscriptionRelState *) lfirst(lc);
if (rstate->state == SUBREL_STATE_SYNCDONE)
{
/*
- * Apply has caught up to the position where the table sync
- * has finished. Time to mark the table as ready so that
- * apply will just continue to replicate it normally.
+ * Apply has caught up to the position where the table sync has
+ * finished. Time to mark the table as ready so that apply will
+ * just continue to replicate it normally.
*/
if (current_lsn >= rstate->lsn)
{
}
else
{
- LogicalRepWorker *syncworker;
- int nsyncworkers = 0;
+ LogicalRepWorker *syncworker;
+ int nsyncworkers = 0;
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
syncworker = logicalrep_worker_find(MyLogicalRepWorker->subid,
SpinLockRelease(&syncworker->relmutex);
}
else
+
/*
* If no sync worker for this table yet, count running sync
* workers for this subscription, while we have the lock, for
* There are three possible synchronization situations here.
*
* a) Apply is in front of the table sync: We tell the table
- * sync to CATCHUP.
+ * sync to CATCHUP.
*
* b) Apply is behind the table sync: We tell the table sync
- * to mark the table as SYNCDONE and finish.
-
+ * to mark the table as SYNCDONE and finish.
+ *
* c) Apply and table sync are at the same position: We tell
- * table sync to mark the table as READY and finish.
+ * table sync to mark the table as READY and finish.
*
- * In any case we'll need to wait for table sync to change
- * the state in catalog and only then continue ourselves.
+ * In any case we'll need to wait for table sync to change the
+ * state in catalog and only then continue ourselves.
*/
if (current_lsn > rstate->lsn)
{
logicalrep_worker_wakeup_ptr(syncworker);
/*
- * Enter busy loop and wait for synchronization status
- * change.
+ * Enter busy loop and wait for synchronization status change.
*/
wait_for_sync_status_change(rstate->relid, rstate->state);
}
/*
- * If there is no sync worker registered for the table and
- * there is some free sync worker slot, start new sync worker
- * for the table.
+ * If there is no sync worker registered for the table and there
+ * is some free sync worker slot, start new sync worker for the
+ * table.
*/
else if (!syncworker && nsyncworkers < max_sync_workers_per_subscription)
{
- TimestampTz now = GetCurrentTimestamp();
+ TimestampTz now = GetCurrentTimestamp();
struct tablesync_start_time_mapping *hentry;
bool found;
for (i = 0; i < desc->natts; i++)
{
- int remoteattnum = rel->attrmap[i];
+ int remoteattnum = rel->attrmap[i];
/* Skip dropped attributes. */
if (desc->attrs[i]->attisdropped)
continue;
attnamelist = lappend(attnamelist,
- makeString(rel->remoterel.attnames[remoteattnum]));
+ makeString(rel->remoterel.attnames[remoteattnum]));
}
return attnamelist;
static int
copy_read_data(void *outbuf, int minread, int maxread)
{
- int bytesread = 0;
- int avail;
+ int bytesread = 0;
+ int avail;
/* If there are some leftover data from previous read, use them. */
avail = copybuf->len - copybuf->cursor;
fetch_remote_table_info(char *nspname, char *relname,
LogicalRepRelation *lrel)
{
- WalRcvExecResult *res;
- StringInfoData cmd;
- TupleTableSlot *slot;
- Oid tableRow[2] = {OIDOID, CHAROID};
- Oid attrRow[4] = {TEXTOID, OIDOID, INT4OID, BOOLOID};
- bool isnull;
- int natt;
+ WalRcvExecResult *res;
+ StringInfoData cmd;
+ TupleTableSlot *slot;
+ Oid tableRow[2] = {OIDOID, CHAROID};
+ Oid attrRow[4] = {TEXTOID, OIDOID, INT4OID, BOOLOID};
+ bool isnull;
+ int natt;
lrel->nspname = nspname;
lrel->relname = relname;
/* First fetch Oid and replica identity. */
initStringInfo(&cmd);
appendStringInfo(&cmd, "SELECT c.oid, c.relreplident"
- " FROM pg_catalog.pg_class c"
- " INNER JOIN pg_catalog.pg_namespace n"
- " ON (c.relnamespace = n.oid)"
- " WHERE n.nspname = %s"
- " AND c.relname = %s"
- " AND c.relkind = 'r'",
- quote_literal_cstr(nspname),
- quote_literal_cstr(relname));
+ " FROM pg_catalog.pg_class c"
+ " INNER JOIN pg_catalog.pg_namespace n"
+ " ON (c.relnamespace = n.oid)"
+ " WHERE n.nspname = %s"
+ " AND c.relname = %s"
+ " AND c.relkind = 'r'",
+ quote_literal_cstr(nspname),
+ quote_literal_cstr(relname));
res = walrcv_exec(wrconn, cmd.data, 2, tableRow);
if (res->status != WALRCV_OK_TUPLES)
" a.attnum = ANY(i.indkey)"
" FROM pg_catalog.pg_attribute a"
" LEFT JOIN pg_catalog.pg_index i"
- " ON (i.indexrelid = pg_get_replica_identity_index(%u))"
+ " ON (i.indexrelid = pg_get_replica_identity_index(%u))"
" WHERE a.attnum > 0::pg_catalog.int2"
" AND NOT a.attisdropped"
" AND a.attrelid = %u"
/* Should never happen. */
if (++natt >= MaxTupleAttributeNumber)
elog(ERROR, "too many columns in remote table \"%s.%s\"",
- nspname, relname);
+ nspname, relname);
ExecClearTuple(slot);
}
copy_table(Relation rel)
{
LogicalRepRelMapEntry *relmapentry;
- LogicalRepRelation lrel;
- WalRcvExecResult *res;
- StringInfoData cmd;
+ LogicalRepRelation lrel;
+ WalRcvExecResult *res;
+ StringInfoData cmd;
CopyState cstate;
List *attnamelist;
ParseState *pstate;
char *
LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
{
- char *slotname;
- char *err;
+ char *slotname;
+ char *err;
char relstate;
XLogRecPtr relstate_lsn;
* NAMEDATALEN on the remote that matters, but this scheme will also work
* reasonably if that is different.)
*/
- StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small"); /* for sanity */
+ StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small"); /* for sanity */
slotname = psprintf("%.*s_%u_sync_%u",
NAMEDATALEN - 28,
MySubscription->slotname,
case SUBREL_STATE_DATASYNC:
{
Relation rel;
- WalRcvExecResult *res;
+ WalRcvExecResult *res;
SpinLockAcquire(&MyLogicalRepWorker->relmutex);
MyLogicalRepWorker->relstate = SUBREL_STATE_DATASYNC;
pgstat_report_stat(false);
/*
- * We want to do the table data sync in single
- * transaction.
+ * We want to do the table data sync in single transaction.
*/
StartTransactionCommand();
/*
* Use standard write lock here. It might be better to
- * disallow access to table while it's being synchronized.
- * But we don't want to block the main apply process from
- * working and it has to open relation in RowExclusiveLock
- * when remapping remote relation id to local one.
+ * disallow access to table while it's being synchronized. But
+ * we don't want to block the main apply process from working
+ * and it has to open relation in RowExclusiveLock when
+ * remapping remote relation id to local one.
*/
rel = heap_open(MyLogicalRepWorker->relid, RowExclusiveLock);
/*
- * Create temporary slot for the sync process.
- * We do this inside transaction so that we can use the
- * snapshot made by the slot to get existing data.
+ * Create temporary slot for the sync process. We do this
+ * inside transaction so that we can use the snapshot made by
+ * the slot to get existing data.
*/
res = walrcv_exec(wrconn,
"BEGIN READ ONLY ISOLATION LEVEL "
/*
* Create new temporary logical decoding slot.
*
- * We'll use slot for data copy so make sure the snapshot
- * is used for the transaction, that way the COPY will get
- * data that is consistent with the lsn used by the slot
- * to start decoding.
+ * We'll use slot for data copy so make sure the snapshot is
+ * used for the transaction, that way the COPY will get data
+ * that is consistent with the lsn used by the slot to start
+ * decoding.
*/
walrcv_create_slot(wrconn, slotname, true,
CRS_USE_SNAPSHOT, origin_startpos);
CommandCounterIncrement();
/*
- * We are done with the initial data synchronization,
- * update the state.
+ * We are done with the initial data synchronization, update
+ * the state.
*/
SpinLockAcquire(&MyLogicalRepWorker->relmutex);
MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT;
SpinLockRelease(&MyLogicalRepWorker->relmutex);
/*
- * Wait for main apply worker to either tell us to
- * catchup or that we are done.
+ * Wait for main apply worker to either tell us to catchup or
+ * that we are done.
*/
wait_for_sync_status_change(MyLogicalRepWorker->relid,
MyLogicalRepWorker->relstate);
typedef struct FlushPosition
{
- dlist_node node;
- XLogRecPtr local_end;
- XLogRecPtr remote_end;
+ dlist_node node;
+ XLogRecPtr local_end;
+ XLogRecPtr remote_end;
} FlushPosition;
static dlist_head lsn_mapping = DLIST_STATIC_INIT(lsn_mapping);
typedef struct SlotErrCallbackArg
{
- LogicalRepRelation *rel;
+ LogicalRepRelation *rel;
int attnum;
} SlotErrCallbackArg;
-static MemoryContext ApplyMessageContext = NULL;
-MemoryContext ApplyContext = NULL;
+static MemoryContext ApplyMessageContext = NULL;
+MemoryContext ApplyContext = NULL;
-WalReceiverConn *wrconn = NULL;
+WalReceiverConn *wrconn = NULL;
-Subscription *MySubscription = NULL;
-bool MySubscriptionValid = false;
+Subscription *MySubscription = NULL;
+bool MySubscriptionValid = false;
-bool in_remote_transaction = false;
-static XLogRecPtr remote_final_lsn = InvalidXLogRecPtr;
+bool in_remote_transaction = false;
+static XLogRecPtr remote_final_lsn = InvalidXLogRecPtr;
static void send_feedback(XLogRecPtr recvpos, bool force, bool requestReply);
*/
static void
slot_fill_defaults(LogicalRepRelMapEntry *rel, EState *estate,
- TupleTableSlot *slot)
+ TupleTableSlot *slot)
{
TupleDesc desc = RelationGetDescr(rel->localrel);
int num_phys_attrs = desc->natts;
static void
slot_store_error_callback(void *arg)
{
- SlotErrCallbackArg *errarg = (SlotErrCallbackArg *) arg;
- Oid remotetypoid,
- localtypoid;
+ SlotErrCallbackArg *errarg = (SlotErrCallbackArg *) arg;
+ Oid remotetypoid,
+ localtypoid;
if (errarg->attnum < 0)
return;
*/
static void
slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
- char **values)
+ char **values)
{
- int natts = slot->tts_tupleDescriptor->natts;
- int i;
- SlotErrCallbackArg errarg;
- ErrorContextCallback errcallback;
+ int natts = slot->tts_tupleDescriptor->natts;
+ int i;
+ SlotErrCallbackArg errarg;
+ ErrorContextCallback errcallback;
ExecClearTuple(slot);
/* Call the "in" function for each non-dropped attribute */
for (i = 0; i < natts; i++)
{
- Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i];
- int remoteattnum = rel->attrmap[i];
+ Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i];
+ int remoteattnum = rel->attrmap[i];
if (!att->attisdropped && remoteattnum >= 0 &&
values[remoteattnum] != NULL)
{
- Oid typinput;
- Oid typioparam;
+ Oid typinput;
+ Oid typioparam;
errarg.attnum = remoteattnum;
*/
static void
slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
- char **values, bool *replaces)
+ char **values, bool *replaces)
{
- int natts = slot->tts_tupleDescriptor->natts;
- int i;
- SlotErrCallbackArg errarg;
- ErrorContextCallback errcallback;
+ int natts = slot->tts_tupleDescriptor->natts;
+ int i;
+ SlotErrCallbackArg errarg;
+ ErrorContextCallback errcallback;
slot_getallattrs(slot);
ExecClearTuple(slot);
/* Call the "in" function for each replaced attribute */
for (i = 0; i < natts; i++)
{
- Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i];
- int remoteattnum = rel->attrmap[i];
+ Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i];
+ int remoteattnum = rel->attrmap[i];
if (remoteattnum >= 0 && !replaces[remoteattnum])
continue;
if (remoteattnum >= 0 && values[remoteattnum] != NULL)
{
- Oid typinput;
- Oid typioparam;
+ Oid typinput;
+ Oid typioparam;
errarg.attnum = remoteattnum;
static void
apply_handle_begin(StringInfo s)
{
- LogicalRepBeginData begin_data;
+ LogicalRepBeginData begin_data;
logicalrep_read_begin(s, &begin_data);
static void
apply_handle_commit(StringInfo s)
{
- LogicalRepCommitData commit_data;
+ LogicalRepCommitData commit_data;
logicalrep_read_commit(s, &commit_data);
apply_handle_origin(StringInfo s)
{
/*
- * ORIGIN message can only come inside remote transaction and before
- * any actual writes.
+ * ORIGIN message can only come inside remote transaction and before any
+ * actual writes.
*/
if (!in_remote_transaction ||
(IsTransactionState() && !am_tablesync_worker()))
static void
apply_handle_relation(StringInfo s)
{
- LogicalRepRelation *rel;
+ LogicalRepRelation *rel;
rel = logicalrep_read_rel(s);
logicalrep_relmap_update(rel);
static void
apply_handle_type(StringInfo s)
{
- LogicalRepTyp typ;
+ LogicalRepTyp typ;
logicalrep_read_typ(s, &typ);
logicalrep_typmap_update(&typ);
static Oid
GetRelationIdentityOrPK(Relation rel)
{
- Oid idxoid;
+ Oid idxoid;
idxoid = RelationGetReplicaIndex(rel);
apply_handle_insert(StringInfo s)
{
LogicalRepRelMapEntry *rel;
- LogicalRepTupleData newtup;
- LogicalRepRelId relid;
- EState *estate;
- TupleTableSlot *remoteslot;
- MemoryContext oldctx;
+ LogicalRepTupleData newtup;
+ LogicalRepRelId relid;
+ EState *estate;
+ TupleTableSlot *remoteslot;
+ MemoryContext oldctx;
ensure_transaction();
return;
/*
- * We are in error mode so it's fine this is somewhat slow.
- * It's better to give user correct error.
+ * We are in error mode so it's fine this is somewhat slow. It's better to
+ * give user correct error.
*/
if (OidIsValid(GetRelationIdentityOrPK(rel->localrel)))
{
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("publisher does not send replica identity column "
- "expected by the logical replication target relation \"%s.%s\"",
+ "expected by the logical replication target relation \"%s.%s\"",
rel->remoterel.nspname, rel->remoterel.relname)));
}
apply_handle_update(StringInfo s)
{
LogicalRepRelMapEntry *rel;
- LogicalRepRelId relid;
- Oid idxoid;
- EState *estate;
- EPQState epqstate;
- LogicalRepTupleData oldtup;
- LogicalRepTupleData newtup;
- bool has_oldtup;
- TupleTableSlot *localslot;
- TupleTableSlot *remoteslot;
- bool found;
- MemoryContext oldctx;
+ LogicalRepRelId relid;
+ Oid idxoid;
+ EState *estate;
+ EPQState epqstate;
+ LogicalRepTupleData oldtup;
+ LogicalRepTupleData newtup;
+ bool has_oldtup;
+ TupleTableSlot *localslot;
+ TupleTableSlot *remoteslot;
+ bool found;
+ MemoryContext oldctx;
ensure_transaction();
MemoryContextSwitchTo(oldctx);
/*
- * Try to find tuple using either replica identity index, primary key
- * or if needed, sequential scan.
+ * Try to find tuple using either replica identity index, primary key or
+ * if needed, sequential scan.
*/
idxoid = GetRelationIdentityOrPK(rel->localrel);
Assert(OidIsValid(idxoid) ||
apply_handle_delete(StringInfo s)
{
LogicalRepRelMapEntry *rel;
- LogicalRepTupleData oldtup;
- LogicalRepRelId relid;
- Oid idxoid;
- EState *estate;
- EPQState epqstate;
- TupleTableSlot *remoteslot;
- TupleTableSlot *localslot;
- bool found;
- MemoryContext oldctx;
+ LogicalRepTupleData oldtup;
+ LogicalRepRelId relid;
+ Oid idxoid;
+ EState *estate;
+ EPQState epqstate;
+ TupleTableSlot *remoteslot;
+ TupleTableSlot *localslot;
+ bool found;
+ MemoryContext oldctx;
ensure_transaction();
MemoryContextSwitchTo(oldctx);
/*
- * Try to find tuple using either replica identity index, primary key
- * or if needed, sequential scan.
+ * Try to find tuple using either replica identity index, primary key or
+ * if needed, sequential scan.
*/
idxoid = GetRelationIdentityOrPK(rel->localrel);
Assert(OidIsValid(idxoid) ||
}
else
{
- /* The tuple to be deleted could not be found.*/
+ /* The tuple to be deleted could not be found. */
ereport(DEBUG1,
(errmsg("logical replication could not find row for delete "
"in replication target %s",
static void
apply_dispatch(StringInfo s)
{
- char action = pq_getmsgbyte(s);
+ char action = pq_getmsgbyte(s);
switch (action)
{
- /* BEGIN */
+ /* BEGIN */
case 'B':
apply_handle_begin(s);
break;
- /* COMMIT */
+ /* COMMIT */
case 'C':
apply_handle_commit(s);
break;
- /* INSERT */
+ /* INSERT */
case 'I':
apply_handle_insert(s);
break;
- /* UPDATE */
+ /* UPDATE */
case 'U':
apply_handle_update(s);
break;
- /* DELETE */
+ /* DELETE */
case 'D':
apply_handle_delete(s);
break;
- /* RELATION */
+ /* RELATION */
case 'R':
apply_handle_relation(s);
break;
- /* TYPE */
+ /* TYPE */
case 'Y':
apply_handle_type(s);
break;
- /* ORIGIN */
+ /* ORIGIN */
case 'O':
apply_handle_origin(s);
break;
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid logical replication message type %c", action)));
+ errmsg("invalid logical replication message type %c", action)));
}
}
dlist_foreach_modify(iter, &lsn_mapping)
{
FlushPosition *pos =
- dlist_container(FlushPosition, node, iter.cur);
+ dlist_container(FlushPosition, node, iter.cur);
*write = pos->remote_end;
LogicalRepApplyLoop(XLogRecPtr last_received)
{
/*
- * Init the ApplyMessageContext which we clean up after each
- * replication protocol message.
+ * Init the ApplyMessageContext which we clean up after each replication
+ * protocol message.
*/
ApplyMessageContext = AllocSetContextCreate(ApplyContext,
- "ApplyMessageContext",
- ALLOCSET_DEFAULT_SIZES);
+ "ApplyMessageContext",
+ ALLOCSET_DEFAULT_SIZES);
/* mark as idle, before starting to loop */
pgstat_report_activity(STATE_IDLE, NULL);
}
else
{
- int c;
+ int c;
StringInfoData s;
/* Reset timeout. */
{
/*
* If we didn't get any transactions for a while there might be
- * unconsumed invalidation messages in the queue, consume them now.
+ * unconsumed invalidation messages in the queue, consume them
+ * now.
*/
AcceptInvalidationMessages();
if (!MySubscriptionValid)
if (endofstream)
{
TimeLineID tli;
+
walrcv_endstreaming(wrconn, &tli);
break;
}
if (rc & WL_TIMEOUT)
{
/*
- * We didn't receive anything new. If we haven't heard
- * anything from the server for more than
- * wal_receiver_timeout / 2, ping the server. Also, if
- * it's been longer than wal_receiver_status_interval
- * since the last update we sent, send a status update to
- * the master anyway, to report any progress in applying
- * WAL.
+ * We didn't receive anything new. If we haven't heard anything
+ * from the server for more than wal_receiver_timeout / 2, ping
+ * the server. Also, if it's been longer than
+ * wal_receiver_status_interval since the last update we sent,
+ * send a status update to the master anyway, to report any
+ * progress in applying WAL.
*/
bool requestReply = false;
/*
- * Check if time since last receive from standby has
- * reached the configured limit.
+ * Check if time since last receive from standby has reached the
+ * configured limit.
*/
if (wal_receiver_timeout > 0)
{
(errmsg("terminating logical replication worker due to timeout")));
/*
- * We didn't receive anything new, for half of
- * receiver replication timeout. Ping the server.
+ * We didn't receive anything new, for half of receiver
+ * replication timeout. Ping the server.
*/
if (!ping_sent)
{
timeout = TimestampTzPlusMilliseconds(last_recv_timestamp,
- (wal_receiver_timeout / 2));
+ (wal_receiver_timeout / 2));
if (now >= timeout)
{
requestReply = true;
static void
send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
{
- static StringInfo reply_message = NULL;
- static TimestampTz send_time = 0;
+ static StringInfo reply_message = NULL;
+ static TimestampTz send_time = 0;
static XLogRecPtr last_recvpos = InvalidXLogRecPtr;
static XLogRecPtr last_writepos = InvalidXLogRecPtr;
static XLogRecPtr last_flushpos = InvalidXLogRecPtr;
- XLogRecPtr writepos;
- XLogRecPtr flushpos;
+ XLogRecPtr writepos;
+ XLogRecPtr flushpos;
TimestampTz now;
- bool have_pending_txes;
+ bool have_pending_txes;
/*
* If the user doesn't want status to be reported to the publisher, be
get_flush_position(&writepos, &flushpos, &have_pending_txes);
/*
- * No outstanding transactions to flush, we can report the latest
- * received position. This is important for synchronous replication.
+ * No outstanding transactions to flush, we can report the latest received
+ * position. This is important for synchronous replication.
*/
if (!have_pending_txes)
flushpos = writepos = recvpos;
if (!reply_message)
{
- MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext);
+ MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext);
+
reply_message = makeStringInfo();
MemoryContextSwitchTo(oldctx);
}
pq_sendint64(reply_message, recvpos); /* write */
pq_sendint64(reply_message, flushpos); /* flush */
pq_sendint64(reply_message, writepos); /* apply */
- pq_sendint64(reply_message, now); /* sendTime */
+ pq_sendint64(reply_message, now); /* sendTime */
pq_sendbyte(reply_message, requestReply); /* replyRequested */
elog(DEBUG2, "sending feedback (force %d) to recv %X/%X, write %X/%X, flush %X/%X",
static void
reread_subscription(void)
{
- MemoryContext oldctx;
- Subscription *newsub;
- bool started_tx = false;
+ MemoryContext oldctx;
+ Subscription *newsub;
+ bool started_tx = false;
/* This function might be called inside or outside of transaction. */
if (!IsTransactionState())
newsub = GetSubscription(MyLogicalRepWorker->subid, true);
/*
- * Exit if the subscription was removed.
- * This normally should not happen as the worker gets killed
- * during DROP SUBSCRIPTION.
+ * Exit if the subscription was removed. This normally should not happen
+ * as the worker gets killed during DROP SUBSCRIPTION.
*/
if (!newsub)
{
ereport(LOG,
- (errmsg("logical replication worker for subscription \"%s\" will "
- "stop because the subscription was removed",
- MySubscription->name)));
+ (errmsg("logical replication worker for subscription \"%s\" will "
+ "stop because the subscription was removed",
+ MySubscription->name)));
walrcv_disconnect(wrconn);
proc_exit(0);
}
/*
- * Exit if the subscription was disabled.
- * This normally should not happen as the worker gets killed
- * during ALTER SUBSCRIPTION ... DISABLE.
+ * Exit if the subscription was disabled. This normally should not happen
+ * as the worker gets killed during ALTER SUBSCRIPTION ... DISABLE.
*/
if (!newsub->enabled)
{
ereport(LOG,
- (errmsg("logical replication worker for subscription \"%s\" will "
- "stop because the subscription was disabled",
- MySubscription->name)));
+ (errmsg("logical replication worker for subscription \"%s\" will "
+ "stop because the subscription was disabled",
+ MySubscription->name)));
walrcv_disconnect(wrconn);
proc_exit(0);
}
/*
- * Exit if connection string was changed. The launcher will start
- * new worker.
+ * Exit if connection string was changed. The launcher will start new
+ * worker.
*/
if (strcmp(newsub->conninfo, MySubscription->conninfo) != 0)
{
ereport(LOG,
- (errmsg("logical replication worker for subscription \"%s\" will "
- "restart because the connection information was changed",
- MySubscription->name)));
+ (errmsg("logical replication worker for subscription \"%s\" will "
+ "restart because the connection information was changed",
+ MySubscription->name)));
walrcv_disconnect(wrconn);
proc_exit(0);
if (strcmp(newsub->name, MySubscription->name) != 0)
{
ereport(LOG,
- (errmsg("logical replication worker for subscription \"%s\" will "
- "restart because subscription was renamed",
- MySubscription->name)));
+ (errmsg("logical replication worker for subscription \"%s\" will "
+ "restart because subscription was renamed",
+ MySubscription->name)));
walrcv_disconnect(wrconn);
proc_exit(0);
Assert(newsub->slotname);
/*
- * We need to make new connection to new slot if slot name has changed
- * so exit here as well if that's the case.
+ * We need to make new connection to new slot if slot name has changed so
+ * exit here as well if that's the case.
*/
if (strcmp(newsub->slotname, MySubscription->slotname) != 0)
{
ereport(LOG,
- (errmsg("logical replication worker for subscription \"%s\" will "
- "restart because the replication slot name was changed",
- MySubscription->name)));
+ (errmsg("logical replication worker for subscription \"%s\" will "
+ "restart because the replication slot name was changed",
+ MySubscription->name)));
walrcv_disconnect(wrconn);
proc_exit(0);
}
/*
- * Exit if publication list was changed. The launcher will start
- * new worker.
+ * Exit if publication list was changed. The launcher will start new
+ * worker.
*/
if (!equal(newsub->publications, MySubscription->publications))
{
ereport(LOG,
- (errmsg("logical replication worker for subscription \"%s\" will "
- "restart because subscription's publications were changed",
- MySubscription->name)));
+ (errmsg("logical replication worker for subscription \"%s\" will "
+ "restart because subscription's publications were changed",
+ MySubscription->name)));
walrcv_disconnect(wrconn);
proc_exit(0);
void
ApplyWorkerMain(Datum main_arg)
{
- int worker_slot = DatumGetInt32(main_arg);
- MemoryContext oldctx;
- char originname[NAMEDATALEN];
- XLogRecPtr origin_startpos;
- char *myslotname;
+ int worker_slot = DatumGetInt32(main_arg);
+ MemoryContext oldctx;
+ char originname[NAMEDATALEN];
+ XLogRecPtr origin_startpos;
+ char *myslotname;
WalRcvStreamOptions options;
/* Attach to slot */
/* Load the subscription into persistent memory context. */
ApplyContext = AllocSetContextCreate(TopMemoryContext,
- "ApplyContext",
- ALLOCSET_DEFAULT_SIZES);
+ "ApplyContext",
+ ALLOCSET_DEFAULT_SIZES);
StartTransactionCommand();
oldctx = MemoryContextSwitchTo(ApplyContext);
MySubscription = GetSubscription(MyLogicalRepWorker->subid, false);
if (!MySubscription->enabled)
{
ereport(LOG,
- (errmsg("logical replication worker for subscription \"%s\" will not "
- "start because the subscription was disabled during startup",
- MySubscription->name)));
+ (errmsg("logical replication worker for subscription \"%s\" will not "
+ "start because the subscription was disabled during startup",
+ MySubscription->name)));
proc_exit(0);
}
if (am_tablesync_worker())
{
- char *syncslotname;
+ char *syncslotname;
/* This is table synchroniation worker, call initial sync. */
syncslotname = LogicalRepSyncTableStart(&origin_startpos);
else
{
/* This is main apply worker */
- RepOriginId originid;
- TimeLineID startpointTLI;
- char *err;
- int server_version;
+ RepOriginId originid;
+ TimeLineID startpointTLI;
+ char *err;
+ int server_version;
myslotname = MySubscription->slotname;
(errmsg("could not connect to the publisher: %s", err)));
/*
- * We don't really use the output identify_system for anything
- * but it does some initializations on the upstream so let's still
- * call it.
+ * We don't really use the output identify_system for anything but it
+ * does some initializations on the upstream so let's still call it.
*/
(void) walrcv_identify_system(wrconn, &startpointTLI,
&server_version);
}
/*
- * Setup callback for syscache so that we know when something
- * changes in the subscription relation state.
+ * Setup callback for syscache so that we know when something changes in
+ * the subscription relation state.
*/
CacheRegisterSyscacheCallback(SUBSCRIPTIONRELMAP,
invalidate_syncing_table_states,
extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
-static void pgoutput_startup(LogicalDecodingContext * ctx,
- OutputPluginOptions *opt, bool is_init);
-static void pgoutput_shutdown(LogicalDecodingContext * ctx);
+static void pgoutput_startup(LogicalDecodingContext *ctx,
+ OutputPluginOptions *opt, bool is_init);
+static void pgoutput_shutdown(LogicalDecodingContext *ctx);
static void pgoutput_begin_txn(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn);
+ ReorderBufferTXN *txn);
static void pgoutput_commit_txn(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
+ ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
static void pgoutput_change(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn, Relation rel,
- ReorderBufferChange *change);
+ ReorderBufferTXN *txn, Relation rel,
+ ReorderBufferChange *change);
static bool pgoutput_origin_filter(LogicalDecodingContext *ctx,
- RepOriginId origin_id);
+ RepOriginId origin_id);
static bool publications_valid;
static List *LoadPublications(List *pubnames);
static void publication_invalidation_cb(Datum arg, int cacheid,
- uint32 hashvalue);
+ uint32 hashvalue);
/* Entry in the map used to remember which relation schemas we sent. */
typedef struct RelationSyncEntry
{
- Oid relid; /* relation oid */
- bool schema_sent; /* did we send the schema? */
- bool replicate_valid;
+ Oid relid; /* relation oid */
+ bool schema_sent; /* did we send the schema? */
+ bool replicate_valid;
PublicationActions pubactions;
} RelationSyncEntry;
static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data, Oid relid);
static void rel_sync_cache_relation_cb(Datum arg, Oid relid);
static void rel_sync_cache_publication_cb(Datum arg, int cacheid,
- uint32 hashvalue);
+ uint32 hashvalue);
/*
* Specify output plugin callbacks
if (!SplitIdentifierString(strVal(defel->arg), ',',
publication_names))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_NAME),
- errmsg("invalid publication_names syntax")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_NAME),
+ errmsg("invalid publication_names syntax")));
}
else
elog(ERROR, "unrecognized pgoutput option: %s", defel->defname);
* Initialize this plugin
*/
static void
-pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
- bool is_init)
+pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
+ bool is_init)
{
- PGOutputData *data = palloc0(sizeof(PGOutputData));
+ PGOutputData *data = palloc0(sizeof(PGOutputData));
/* Create our memory context for private allocations. */
data->context = AllocSetContextCreate(ctx->context,
- "logical replication output context",
+ "logical replication output context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/* Check if we support requested protocol */
if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM)
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("client sent proto_version=%d but we only support protocol %d or lower",
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("client sent proto_version=%d but we only support protocol %d or lower",
data->protocol_version, LOGICALREP_PROTO_VERSION_NUM)));
if (data->protocol_version < LOGICALREP_PROTO_MIN_VERSION_NUM)
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("client sent proto_version=%d but we only support protocol %d or higher",
- data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM)));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("client sent proto_version=%d but we only support protocol %d or higher",
+ data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM)));
if (list_length(data->publication_names) < 1)
ereport(ERROR,
static void
pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
{
- bool send_replication_origin = txn->origin_id != InvalidRepOriginId;
+ bool send_replication_origin = txn->origin_id != InvalidRepOriginId;
OutputPluginPrepareWrite(ctx, !send_replication_origin);
logicalrep_write_begin(ctx->out, txn);
if (send_replication_origin)
{
- char *origin;
+ char *origin;
/* Message boundary */
OutputPluginWrite(ctx, false);
* XXX: which behaviour do we want here?
*
* Alternatives:
- * - don't send origin message if origin name not found
- * (that's what we do now)
- * - throw error - that will break replication, not good
- * - send some special "unknown" origin
+ * - don't send origin message if origin name not found
+ * (that's what we do now)
+ * - throw error - that will break replication, not good
+ * - send some special "unknown" origin
*----------
*/
if (replorigin_by_oid(txn->origin_id, true, &origin))
*/
static void
pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
- XLogRecPtr commit_lsn)
+ XLogRecPtr commit_lsn)
{
OutputPluginUpdateProgress(ctx);
pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
Relation relation, ReorderBufferChange *change)
{
- PGOutputData *data = (PGOutputData *) ctx->output_plugin_private;
- MemoryContext old;
- RelationSyncEntry *relentry;
+ PGOutputData *data = (PGOutputData *) ctx->output_plugin_private;
+ MemoryContext old;
+ RelationSyncEntry *relentry;
relentry = get_rel_sync_entry(data, RelationGetRelid(relation));
break;
case REORDER_BUFFER_CHANGE_UPDATE:
{
- HeapTuple oldtuple = change->data.tp.oldtuple ?
- &change->data.tp.oldtuple->tuple : NULL;
+ HeapTuple oldtuple = change->data.tp.oldtuple ?
+ &change->data.tp.oldtuple->tuple : NULL;
OutputPluginPrepareWrite(ctx, true);
logicalrep_write_update(ctx->out, relation, oldtuple,
*/
static bool
pgoutput_origin_filter(LogicalDecodingContext *ctx,
- RepOriginId origin_id)
+ RepOriginId origin_id)
{
return false;
}
* of the ctx->context so it will be cleaned up by logical decoding machinery.
*/
static void
-pgoutput_shutdown(LogicalDecodingContext * ctx)
+pgoutput_shutdown(LogicalDecodingContext *ctx)
{
if (RelationSyncCache)
{
List *result = NIL;
ListCell *lc;
- foreach (lc, pubnames)
+ foreach(lc, pubnames)
{
- char *pubname = (char *) lfirst(lc);
- Publication *pub = GetPublicationByName(pubname, false);
+ char *pubname = (char *) lfirst(lc);
+ Publication *pub = GetPublicationByName(pubname, false);
result = lappend(result, pub);
}
publications_valid = false;
/*
- * Also invalidate per-relation cache so that next time the filtering
- * info is checked it will be updated with the new publication
- * settings.
+ * Also invalidate per-relation cache so that next time the filtering info
+ * is checked it will be updated with the new publication settings.
*/
rel_sync_cache_publication_cb(arg, cacheid, hashvalue);
}
static void
init_rel_sync_cache(MemoryContext cachectx)
{
- HASHCTL ctl;
+ HASHCTL ctl;
MemoryContext old_ctxt;
if (RelationSyncCache != NULL)
static RelationSyncEntry *
get_rel_sync_entry(PGOutputData *data, Oid relid)
{
- RelationSyncEntry *entry;
- bool found;
- MemoryContext oldctx;
+ RelationSyncEntry *entry;
+ bool found;
+ MemoryContext oldctx;
Assert(RelationSyncCache != NULL);
}
/*
- * Build publication cache. We can't use one provided by relcache
- * as relcache considers all publications given relation is in, but
- * here we only need to consider ones that the subscriber requested.
+ * Build publication cache. We can't use one provided by relcache as
+ * relcache considers all publications given relation is in, but here
+ * we only need to consider ones that the subscriber requested.
*/
entry->pubactions.pubinsert = entry->pubactions.pubupdate =
entry->pubactions.pubdelete = false;
static void
rel_sync_cache_relation_cb(Datum arg, Oid relid)
{
- RelationSyncEntry *entry;
+ RelationSyncEntry *entry;
/*
* We can get here if the plugin was used in SQL interface as the
* safe point.
*
* Getting invalidations for relations that aren't in the table is
- * entirely normal, since there's no way to unregister for an
- * invalidation event. So we don't care if it's found or not.
+ * entirely normal, since there's no way to unregister for an invalidation
+ * event. So we don't care if it's found or not.
*/
entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
HASH_FIND, NULL);
/*
- * Reset schema sent status as the relation definition may have
- * changed.
+ * Reset schema sent status as the relation definition may have changed.
*/
if (entry != NULL)
entry->schema_sent = false;
static void
rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
{
- HASH_SEQ_STATUS status;
- RelationSyncEntry *entry;
+ HASH_SEQ_STATUS status;
+ RelationSyncEntry *entry;
/*
* We can get here if the plugin was used in SQL interface as the
return;
/*
- * There is no way to find which entry in our cache the hash belongs to
- * so mark the whole cache as invalid.
+ * There is no way to find which entry in our cache the hash belongs to so
+ * mark the whole cache as invalid.
*/
hash_seq_init(&status, RelationSyncCache);
while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
/*
* Rename the slot directory on disk, so that we'll no longer recognize
* this as a valid slot. Note that if this fails, we've got to mark the
- * slot inactive before bailing out. If we're dropping an ephemeral or
- * a temporary slot, we better never fail hard as the caller won't expect
+ * slot inactive before bailing out. If we're dropping an ephemeral or a
+ * temporary slot, we better never fail hard as the caller won't expect
* the slot to survive and this might get called during error handling.
*/
if (rename(path, tmppath) == 0)
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s;
- char *slotname;
- int active_pid;
+ char *slotname;
+ int active_pid;
s = &ReplicationSlotCtl->replication_slots[i];
/*
* Acquire a logical decoding slot, this will check for conflicting names.
- * Initially create persistent slot as ephemeral - that allows us to nicely
- * handle errors during initialization because it'll get dropped if this
- * transaction fails. We'll make it persistent at the end.
- * Temporary slots can be created as temporary from beginning as they get
- * dropped on error as well.
+ * Initially create persistent slot as ephemeral - that allows us to
+ * nicely handle errors during initialization because it'll get dropped if
+ * this transaction fails. We'll make it persistent at the end. Temporary
+ * slots can be created as temporary from beginning as they get dropped on
+ * error as well.
*/
ReplicationSlotCreate(NameStr(*name), true,
temporary ? RS_TEMPORARY : RS_EPHEMERAL);
* Create logical decoding context, to build the initial snapshot.
*/
ctx = CreateInitDecodingContext(NameStr(*plugin), NIL,
- false, /* do not build snapshot */
+ false, /* do not build snapshot */
logical_read_local_xlog_page, NULL, NULL,
NULL);
Datum values[PG_GET_REPLICATION_SLOTS_COLS];
bool nulls[PG_GET_REPLICATION_SLOTS_COLS];
- ReplicationSlotPersistency persistency;
+ ReplicationSlotPersistency persistency;
TransactionId xmin;
TransactionId catalog_xmin;
XLogRecPtr restart_lsn;
static int SyncRepWakeQueue(bool all, int mode);
static bool SyncRepGetSyncRecPtr(XLogRecPtr *writePtr,
- XLogRecPtr *flushPtr,
- XLogRecPtr *applyPtr,
- bool *am_sync);
+ XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr,
+ bool *am_sync);
static void SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr,
- XLogRecPtr *flushPtr,
- XLogRecPtr *applyPtr,
- List *sync_standbys);
+ XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr,
+ List *sync_standbys);
static void SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr,
- XLogRecPtr *flushPtr,
- XLogRecPtr *applyPtr,
- List *sync_standbys, uint8 nth);
+ XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr,
+ List *sync_standbys, uint8 nth);
static int SyncRepGetStandbyPriority(void);
static List *SyncRepGetSyncStandbysPriority(bool *am_sync);
static List *SyncRepGetSyncStandbysQuorum(bool *am_sync);
if (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY)
ereport(LOG,
(errmsg("standby \"%s\" is now a synchronous standby with priority %u",
- application_name, MyWalSnd->sync_standby_priority)));
+ application_name, MyWalSnd->sync_standby_priority)));
else
ereport(LOG,
(errmsg("standby \"%s\" is now a candidate for quorum synchronous standby",
*/
static bool
SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
- XLogRecPtr *applyPtr, bool *am_sync)
+ XLogRecPtr *applyPtr, bool *am_sync)
{
List *sync_standbys;
* oldest ones among sync standbys. In a quorum-based, they are the Nth
* latest ones.
*
- * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest positions.
- * But we use SyncRepGetOldestSyncRecPtr() for that calculation because
- * it's a bit more efficient.
+ * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest
+ * positions. But we use SyncRepGetOldestSyncRecPtr() for that calculation
+ * because it's a bit more efficient.
*
* XXX If the numbers of current and requested sync standbys are the same,
* we can use SyncRepGetOldestSyncRecPtr() to calculate the synced
SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
XLogRecPtr *applyPtr, List *sync_standbys)
{
- ListCell *cell;
+ ListCell *cell;
/*
- * Scan through all sync standbys and calculate the oldest
- * Write, Flush and Apply positions.
+ * Scan through all sync standbys and calculate the oldest Write, Flush
+ * and Apply positions.
*/
- foreach (cell, sync_standbys)
+ foreach(cell, sync_standbys)
{
- WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
+ WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
XLogRecPtr write;
XLogRecPtr flush;
XLogRecPtr apply;
*/
static void
SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
- XLogRecPtr *applyPtr, List *sync_standbys, uint8 nth)
+ XLogRecPtr *applyPtr, List *sync_standbys, uint8 nth)
{
- ListCell *cell;
- XLogRecPtr *write_array;
- XLogRecPtr *flush_array;
- XLogRecPtr *apply_array;
- int len;
- int i = 0;
+ ListCell *cell;
+ XLogRecPtr *write_array;
+ XLogRecPtr *flush_array;
+ XLogRecPtr *apply_array;
+ int len;
+ int i = 0;
len = list_length(sync_standbys);
write_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
flush_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
apply_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
- foreach (cell, sync_standbys)
+ foreach(cell, sync_standbys)
{
- WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
+ WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
SpinLockAcquire(&walsnd->mutex);
write_array[i] = walsnd->write;
static int
cmp_lsn(const void *a, const void *b)
{
- XLogRecPtr lsn1 = *((const XLogRecPtr *) a);
- XLogRecPtr lsn2 = *((const XLogRecPtr *) b);
+ XLogRecPtr lsn1 = *((const XLogRecPtr *) a);
+ XLogRecPtr lsn2 = *((const XLogRecPtr *) b);
if (lsn1 > lsn2)
return -1;
* sync standby. Otherwise it's set to false.
*/
List *
-SyncRepGetSyncStandbys(bool *am_sync)
+SyncRepGetSyncStandbys(bool *am_sync)
{
/* Set default result */
if (am_sync != NULL)
static List *
SyncRepGetSyncStandbysQuorum(bool *am_sync)
{
- List *result = NIL;
- int i;
+ List *result = NIL;
+ int i;
volatile WalSnd *walsnd; /* Use volatile pointer to prevent code
* rearrangement */
continue;
/*
- * Consider this standby as a candidate for quorum sync standbys
- * and append it to the result.
+ * Consider this standby as a candidate for quorum sync standbys and
+ * append it to the result.
*/
result = lappend_int(result, i);
if (am_sync != NULL && walsnd == MyWalSnd)
return 0;
/*
- * In quorum-based sync replication, all the standbys in the list
- * have the same priority, one.
+ * In quorum-based sync replication, all the standbys in the list have the
+ * same priority, one.
*/
return (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY) ? priority : 1;
}
{
TimestampTz now;
TransactionId nextXid;
- uint32 xmin_epoch, catalog_xmin_epoch;
- TransactionId xmin, catalog_xmin;
+ uint32 xmin_epoch,
+ catalog_xmin_epoch;
+ TransactionId xmin,
+ catalog_xmin;
static TimestampTz sendTime = 0;
+
/* initially true so we always send at least one feedback message */
static bool master_has_standby_xmin = true;
*
* Bailing out here also ensures that we don't send feedback until we've
* read our own replication slot state, so we don't tell the master to
- * discard needed xmin or catalog_xmin from any slots that may exist
- * on this replica.
+ * discard needed xmin or catalog_xmin from any slots that may exist on
+ * this replica.
*/
if (!HotStandbyActive())
return;
* excludes the catalog_xmin.
*/
xmin = GetOldestXmin(NULL,
- PROCARRAY_FLAGS_DEFAULT|PROCARRAY_SLOTS_XMIN);
+ PROCARRAY_FLAGS_DEFAULT | PROCARRAY_SLOTS_XMIN);
ProcArrayGetReplicationSlotXmin(&slot_xmin, &catalog_xmin);
GetNextXidAndEpoch(&nextXid, &xmin_epoch);
catalog_xmin_epoch = xmin_epoch;
if (nextXid < xmin)
- xmin_epoch --;
+ xmin_epoch--;
if (nextXid < catalog_xmin)
- catalog_xmin_epoch --;
+ catalog_xmin_epoch--;
elog(DEBUG2, "sending hot standby feedback xmin %u epoch %u catalog_xmin %u catalog_xmin_epoch %u",
xmin, xmin_epoch, catalog_xmin, catalog_xmin_epoch);
/* A sample associating a WAL location with the time it was written. */
typedef struct
{
- XLogRecPtr lsn;
+ XLogRecPtr lsn;
TimestampTz time;
} WalTimeSample;
/* A mechanism for tracking replication lag. */
static struct
{
- XLogRecPtr last_lsn;
+ XLogRecPtr last_lsn;
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE];
- int write_head;
- int read_heads[NUM_SYNC_REP_WAIT_MODE];
+ int write_head;
+ int read_heads[NUM_SYNC_REP_WAIT_MODE];
WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
-} LagTracker;
+} LagTracker;
/* Signal handlers */
static void WalSndSigHupHandler(SIGNAL_ARGS);
if (ThisTimeLineID == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("IDENTIFY_SYSTEM has not been run before START_REPLICATION")));
+ errmsg("IDENTIFY_SYSTEM has not been run before START_REPLICATION")));
/*
* We assume here that we're logging enough information in the WAL for
sendTimeLineIsHistoric = true;
/*
- * Check that the timeline the client requested exists, and
- * the requested start location is on that timeline.
+ * Check that the timeline the client requested exists, and the
+ * requested start location is on that timeline.
*/
timeLineHistory = readTimeLineHistory(ThisTimeLineID);
switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
* request to start replication from the beginning of the WAL
* segment that contains switchpoint, but on the new timeline, so
* that it doesn't end up with a partial segment. If you ask for
- * too old a starting point, you'll get an error later when we fail
- * to find the requested WAL segment in pg_wal.
+ * too old a starting point, you'll get an error later when we
+ * fail to find the requested WAL segment in pg_wal.
*
* XXX: we could be more strict here and only allow a startpoint
* that's older than the switchpoint, if it's still in the same
MemSet(nulls, false, sizeof(nulls));
/*
- * Need a tuple descriptor representing two columns.
- * int8 may seem like a surprising data type for this, but in theory
- * int4 would not be wide enough for this, as TimeLineID is unsigned.
+ * Need a tuple descriptor representing two columns. int8 may seem
+ * like a surprising data type for this, but in theory int4 would not
+ * be wide enough for this, as TimeLineID is unsigned.
*/
tupdesc = CreateTemplateTupleDesc(2, false);
TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli",
bool reserve_wal_given = false;
/* Parse options */
- foreach (lc, cmd->options)
+ foreach(lc, cmd->options)
{
DefElem *defel = (DefElem *) lfirst(lc);
if (cmd->kind == REPLICATION_KIND_LOGICAL)
{
LogicalDecodingContext *ctx;
- bool need_full_snapshot = false;
+ bool need_full_snapshot = false;
/*
* Do options check early so that we can bail before calling the
TimestampTz now = GetCurrentTimestamp();
/*
- * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS
- * to avoid flooding the lag tracker when we commit frequently.
+ * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to
+ * avoid flooding the lag tracker when we commit frequently.
*/
-#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000
+#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000
if (!TimestampDifferenceExceeds(sendTime, now,
WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS))
return;
SnapBuildClearExportedSnapshot();
/*
- * For aborted transactions, don't allow anything except pure SQL,
- * the exec_simple_query() will handle it correctly.
+ * For aborted transactions, don't allow anything except pure SQL, the
+ * exec_simple_query() will handle it correctly.
*/
if (IsAbortedTransactionBlockState() && !IsA(cmd_node, SQLCmd))
ereport(ERROR,
bool clearLagTimes;
TimestampTz now;
- static bool fullyAppliedLastTime = false;
+ static bool fullyAppliedLastTime = false;
/* the caller already consumed the msgtype byte */
writePtr = pq_getmsgint64(&reply_message);
}
if (!TransactionIdPrecedesOrEquals(xid, nextXid))
- return false; /* epoch OK, but it's wrapped around */
+ return false; /* epoch OK, but it's wrapped around */
return true;
}
*
* If we're using a replication slot we reserve the xmin via that,
* otherwise via the walsender's PGXACT entry. We can only track the
- * catalog xmin separately when using a slot, so we store the least
- * of the two provided when not using a slot.
+ &nbs