Post-PG 10 beta1 pgindent run
authorBruce Momjian
Wed, 17 May 2017 20:31:56 +0000 (16:31 -0400)
committerBruce Momjian
Wed, 17 May 2017 20:31:56 +0000 (16:31 -0400)
perltidy run not included.

310 files changed:
contrib/bloom/blinsert.c
contrib/bloom/blutils.c
contrib/btree_gin/btree_gin.c
contrib/btree_gist/btree_cash.c
contrib/btree_gist/btree_date.c
contrib/btree_gist/btree_enum.c
contrib/btree_gist/btree_float4.c
contrib/btree_gist/btree_float8.c
contrib/btree_gist/btree_inet.c
contrib/btree_gist/btree_int2.c
contrib/btree_gist/btree_int4.c
contrib/btree_gist/btree_int8.c
contrib/btree_gist/btree_interval.c
contrib/btree_gist/btree_oid.c
contrib/btree_gist/btree_time.c
contrib/btree_gist/btree_ts.c
contrib/btree_gist/btree_utils_num.h
contrib/btree_gist/btree_utils_var.c
contrib/btree_gist/btree_utils_var.h
contrib/btree_gist/btree_uuid.c
contrib/dblink/dblink.c
contrib/oid2name/oid2name.c
contrib/pageinspect/brinfuncs.c
contrib/pageinspect/hashfuncs.c
contrib/pageinspect/rawpage.c
contrib/pg_standby/pg_standby.c
contrib/pg_visibility/pg_visibility.c
contrib/pgcrypto/openssl.c
contrib/pgcrypto/pgcrypto.c
contrib/pgrowlocks/pgrowlocks.c
contrib/pgstattuple/pgstatapprox.c
contrib/pgstattuple/pgstatindex.c
contrib/postgres_fdw/deparse.c
contrib/postgres_fdw/postgres_fdw.c
contrib/postgres_fdw/postgres_fdw.h
src/backend/access/brin/brin.c
src/backend/access/brin/brin_revmap.c
src/backend/access/brin/brin_xlog.c
src/backend/access/common/printsimple.c
src/backend/access/gin/ginvacuum.c
src/backend/access/hash/hash.c
src/backend/access/hash/hash_xlog.c
src/backend/access/hash/hashinsert.c
src/backend/access/hash/hashpage.c
src/backend/access/hash/hashutil.c
src/backend/access/heap/heapam.c
src/backend/access/nbtree/nbtree.c
src/backend/access/rmgrdesc/brindesc.c
src/backend/access/rmgrdesc/clogdesc.c
src/backend/access/rmgrdesc/gindesc.c
src/backend/access/spgist/spginsert.c
src/backend/access/transam/clog.c
src/backend/access/transam/commit_ts.c
src/backend/access/transam/subtrans.c
src/backend/access/transam/twophase.c
src/backend/access/transam/varsup.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xlogfuncs.c
src/backend/access/transam/xloginsert.c
src/backend/access/transam/xlogreader.c
src/backend/access/transam/xlogutils.c
src/backend/catalog/dependency.c
src/backend/catalog/heap.c
src/backend/catalog/objectaddress.c
src/backend/catalog/pg_collation.c
src/backend/catalog/pg_depend.c
src/backend/catalog/pg_inherits.c
src/backend/catalog/pg_namespace.c
src/backend/catalog/pg_publication.c
src/backend/catalog/pg_subscription.c
src/backend/commands/alter.c
src/backend/commands/analyze.c
src/backend/commands/collationcmds.c
src/backend/commands/copy.c
src/backend/commands/dbcommands.c
src/backend/commands/define.c
src/backend/commands/dropcmds.c
src/backend/commands/event_trigger.c
src/backend/commands/foreigncmds.c
src/backend/commands/publicationcmds.c
src/backend/commands/sequence.c
src/backend/commands/statscmds.c
src/backend/commands/subscriptioncmds.c
src/backend/commands/tablecmds.c
src/backend/commands/trigger.c
src/backend/commands/tsearchcmds.c
src/backend/commands/user.c
src/backend/commands/vacuumlazy.c
src/backend/commands/view.c
src/backend/executor/execAmi.c
src/backend/executor/execGrouping.c
src/backend/executor/execMain.c
src/backend/executor/execParallel.c
src/backend/executor/execProcnode.c
src/backend/executor/execReplication.c
src/backend/executor/execUtils.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeAppend.c
src/backend/executor/nodeBitmapHeapscan.c
src/backend/executor/nodeGather.c
src/backend/executor/nodeGatherMerge.c
src/backend/executor/nodeMergeAppend.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeProjectSet.c
src/backend/executor/nodeSetOp.c
src/backend/executor/nodeTableFuncscan.c
src/backend/executor/spi.c
src/backend/lib/rbtree.c
src/backend/libpq/auth.c
src/backend/libpq/crypt.c
src/backend/libpq/hba.c
src/backend/libpq/pqcomm.c
src/backend/nodes/copyfuncs.c
src/backend/nodes/nodeFuncs.c
src/backend/nodes/outfuncs.c
src/backend/nodes/tidbitmap.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/path/indxpath.c
src/backend/optimizer/plan/createplan.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/plan/setrefs.c
src/backend/optimizer/prep/prepunion.c
src/backend/optimizer/util/pathnode.c
src/backend/optimizer/util/plancat.c
src/backend/optimizer/util/relnode.c
src/backend/parser/analyze.c
src/backend/parser/parse_clause.c
src/backend/parser/parse_expr.c
src/backend/parser/parse_relation.c
src/backend/parser/parse_utilcmd.c
src/backend/port/posix_sema.c
src/backend/postmaster/bgworker.c
src/backend/postmaster/bgwriter.c
src/backend/postmaster/checkpointer.c
src/backend/postmaster/pgstat.c
src/backend/postmaster/postmaster.c
src/backend/postmaster/syslogger.c
src/backend/replication/basebackup.c
src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
src/backend/replication/logical/launcher.c
src/backend/replication/logical/logical.c
src/backend/replication/logical/logicalfuncs.c
src/backend/replication/logical/proto.c
src/backend/replication/logical/relation.c
src/backend/replication/logical/snapbuild.c
src/backend/replication/logical/tablesync.c
src/backend/replication/logical/worker.c
src/backend/replication/pgoutput/pgoutput.c
src/backend/replication/slot.c
src/backend/replication/slotfuncs.c
src/backend/replication/syncrep.c
src/backend/replication/walreceiver.c
src/backend/replication/walsender.c
src/backend/rewrite/rewriteDefine.c
src/backend/rewrite/rewriteHandler.c
src/backend/statistics/dependencies.c
src/backend/statistics/extended_stats.c
src/backend/statistics/mvdistinct.c
src/backend/storage/file/fd.c
src/backend/storage/lmgr/condition_variable.c
src/backend/storage/lmgr/lwlock.c
src/backend/storage/smgr/md.c
src/backend/tcop/utility.c
src/backend/tsearch/to_tsany.c
src/backend/tsearch/wparser.c
src/backend/utils/adt/cash.c
src/backend/utils/adt/dbsize.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/genfile.c
src/backend/utils/adt/json.c
src/backend/utils/adt/jsonb.c
src/backend/utils/adt/jsonfuncs.c
src/backend/utils/adt/like.c
src/backend/utils/adt/mac.c
src/backend/utils/adt/mac8.c
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/selfuncs.c
src/backend/utils/adt/txid.c
src/backend/utils/adt/varlena.c
src/backend/utils/adt/xml.c
src/backend/utils/cache/inval.c
src/backend/utils/cache/lsyscache.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/syscache.c
src/backend/utils/fmgr/dfmgr.c
src/backend/utils/fmgr/fmgr.c
src/backend/utils/mb/conv.c
src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c
src/backend/utils/mb/encnames.c
src/backend/utils/misc/backend_random.c
src/backend/utils/misc/guc.c
src/backend/utils/sort/tuplesort.c
src/backend/utils/time/snapmgr.c
src/bin/initdb/findtimezone.c
src/bin/initdb/initdb.c
src/bin/pg_archivecleanup/pg_archivecleanup.c
src/bin/pg_basebackup/pg_basebackup.c
src/bin/pg_basebackup/pg_receivewal.c
src/bin/pg_basebackup/receivelog.c
src/bin/pg_basebackup/walmethods.c
src/bin/pg_basebackup/walmethods.h
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_dump/pg_backup.h
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_archiver.h
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dump.h
src/bin/pg_dump/pg_dumpall.c
src/bin/pg_resetwal/pg_resetwal.c
src/bin/pg_test_timing/pg_test_timing.c
src/bin/pg_upgrade/exec.c
src/bin/pg_upgrade/info.c
src/bin/pg_upgrade/option.c
src/bin/pg_waldump/pg_waldump.c
src/bin/psql/conditional.h
src/bin/psql/describe.c
src/bin/psql/describe.h
src/bin/psql/tab-complete.c
src/common/file_utils.c
src/common/saslprep.c
src/common/scram-common.c
src/common/sha2_openssl.c
src/common/unicode_norm.c
src/include/access/brin.h
src/include/access/brin_tuple.h
src/include/access/brin_xlog.h
src/include/access/clog.h
src/include/access/hash.h
src/include/access/hash_xlog.h
src/include/access/relscan.h
src/include/access/twophase.h
src/include/access/xact.h
src/include/access/xlog.h
src/include/access/xlogreader.h
src/include/access/xlogutils.h
src/include/c.h
src/include/catalog/dependency.h
src/include/catalog/pg_authid.h
src/include/catalog/pg_collation.h
src/include/catalog/pg_operator.h
src/include/catalog/pg_policy.h
src/include/catalog/pg_proc.h
src/include/catalog/pg_publication.h
src/include/catalog/pg_publication_rel.h
src/include/catalog/pg_sequence.h
src/include/catalog/pg_statistic_ext.h
src/include/catalog/pg_subscription.h
src/include/catalog/pg_subscription_rel.h
src/include/commands/createas.h
src/include/commands/explain.h
src/include/commands/subscriptioncmds.h
src/include/common/file_utils.h
src/include/common/scram-common.h
src/include/executor/executor.h
src/include/executor/nodeGatherMerge.h
src/include/executor/spi.h
src/include/executor/tablefunc.h
src/include/lib/simplehash.h
src/include/libpq/hba.h
src/include/mb/pg_wchar.h
src/include/nodes/execnodes.h
src/include/nodes/makefuncs.h
src/include/nodes/parsenodes.h
src/include/nodes/plannodes.h
src/include/nodes/relation.h
src/include/optimizer/cost.h
src/include/optimizer/pathnode.h
src/include/optimizer/paths.h
src/include/parser/parse_func.h
src/include/parser/parse_node.h
src/include/parser/parse_oper.h
src/include/replication/logical.h
src/include/replication/logicallauncher.h
src/include/replication/logicalproto.h
src/include/replication/logicalrelation.h
src/include/replication/pgoutput.h
src/include/replication/snapbuild.h
src/include/replication/syncrep.h
src/include/replication/walreceiver.h
src/include/replication/worker_internal.h
src/include/statistics/extended_stats_internal.h
src/include/storage/condition_variable.h
src/include/storage/proc.h
src/include/storage/procarray.h
src/include/tcop/utility.h
src/include/utils/jsonapi.h
src/include/utils/lsyscache.h
src/include/utils/pg_locale.h
src/include/utils/plancache.h
src/include/utils/queryenvironment.h
src/include/utils/regproc.h
src/include/utils/rel.h
src/include/utils/varlena.h
src/interfaces/libpq/fe-auth-scram.c
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/fe-secure-openssl.c
src/interfaces/libpq/libpq-fe.h
src/pl/plperl/plperl.c
src/pl/plpgsql/src/plpgsql.h
src/pl/plpython/plpy_exec.c
src/pl/plpython/plpy_typeio.c
src/pl/tcl/pltcl.c
src/port/dirmod.c
src/test/regress/regress.c
src/tools/testint128.c

index 913f1f8a51870667a26c7339dd5b5af255375b52..0d506e3c1ad6afa79133e02804aabfe1fa45c6b9 100644 (file)
@@ -165,11 +165,11 @@ blbuildempty(Relation index)
    BloomFillMetapage(index, metapage);
 
    /*
-    * Write the page and log it.  It might seem that an immediate sync
-    * would be sufficient to guarantee that the file exists on disk, but
-    * recovery itself might remove it while replaying, for example, an
-    * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record.  Therefore, we
-    * need this even when wal_level=minimal.
+    * Write the page and log it.  It might seem that an immediate sync would
+    * be sufficient to guarantee that the file exists on disk, but recovery
+    * itself might remove it while replaying, for example, an
+    * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record.  Therefore, we need
+    * this even when wal_level=minimal.
     */
    PageSetChecksumInplace(metapage, BLOOM_METAPAGE_BLKNO);
    smgrwrite(index->rd_smgr, INIT_FORKNUM, BLOOM_METAPAGE_BLKNO,
index f2eda67e0aeb87c5e61cdb93617e3d86938276b6..00a65875b03a711f8320b18deb09c7466bd56f18 100644 (file)
@@ -75,7 +75,7 @@ _PG_init(void)
        bl_relopt_tab[i + 1].optname = MemoryContextStrdup(TopMemoryContext,
                                                           buf);
        bl_relopt_tab[i + 1].opttype = RELOPT_TYPE_INT;
-       bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) + sizeof(int) * i;
+       bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) +sizeof(int) * i;
    }
 }
 
index 7191fbf54f7dc8eed034f5be987391320c292ce8..6f0c752b2e89e0a7d8229b17c5246f0207846840 100644 (file)
@@ -112,13 +112,13 @@ gin_btree_compare_prefix(FunctionCallInfo fcinfo)
                cmp;
 
    cmp = DatumGetInt32(CallerFInfoFunctionCall2(
-                           data->typecmp,
-                           fcinfo->flinfo,
-                           PG_GET_COLLATION(),
-                           (data->strategy == BTLessStrategyNumber ||
-                            data->strategy == BTLessEqualStrategyNumber)
-                           ? data->datum : a,
-                           b));
+                                                data->typecmp,
+                                                fcinfo->flinfo,
+                                                PG_GET_COLLATION(),
+                                  (data->strategy == BTLessStrategyNumber ||
+                                data->strategy == BTLessEqualStrategyNumber)
+                                                ? data->datum : a,
+                                                b));
 
    switch (data->strategy)
    {
@@ -438,16 +438,16 @@ GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)
  */
 
 
-#define ENUM_IS_LEFTMOST(x)    ((x) == InvalidOid)
+#define ENUM_IS_LEFTMOST(x) ((x) == InvalidOid)
 
 PG_FUNCTION_INFO_V1(gin_enum_cmp);
 
 Datum
 gin_enum_cmp(PG_FUNCTION_ARGS)
 {
-   Oid     a = PG_GETARG_OID(0);
-   Oid     b = PG_GETARG_OID(1);
-   int     res = 0;
+   Oid         a = PG_GETARG_OID(0);
+   Oid         b = PG_GETARG_OID(1);
+   int         res = 0;
 
    if (ENUM_IS_LEFTMOST(a))
    {
@@ -460,11 +460,11 @@ gin_enum_cmp(PG_FUNCTION_ARGS)
    else
    {
        res = DatumGetInt32(CallerFInfoFunctionCall2(
-                               enum_cmp,
-                               fcinfo->flinfo,
-                               PG_GET_COLLATION(),
-                               ObjectIdGetDatum(a),
-                               ObjectIdGetDatum(b)));
+                                                    enum_cmp,
+                                                    fcinfo->flinfo,
+                                                    PG_GET_COLLATION(),
+                                                    ObjectIdGetDatum(a),
+                                                    ObjectIdGetDatum(b)));
    }
 
    PG_RETURN_INT32(res);
index ca0c86b5d82df8c42b4aef6c9bc94d67dbd390f4..1116ca084f3ab544985dd33ee1e7dbd8c2b225aa 100644 (file)
@@ -170,7 +170,7 @@ gbt_cash_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index c9daf340976285f664020b5b7bb5d5006c5816d9..28c7c2ac8611bc216f06f42e6bfcd5df7ac3aa55 100644 (file)
@@ -182,7 +182,7 @@ gbt_date_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index 5e46e782befe90de308d196a5587b50519d3502a..8bbadfe860c1a976bf8d5ed73d400bc3125857ae 100644 (file)
@@ -32,14 +32,14 @@ static bool
 gbt_enumgt(const void *a, const void *b, FmgrInfo *flinfo)
 {
    return DatumGetBool(
-       CallerFInfoFunctionCall2(enum_gt, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
+                       CallerFInfoFunctionCall2(enum_gt, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
        );
 }
 static bool
 gbt_enumge(const void *a, const void *b, FmgrInfo *flinfo)
 {
    return DatumGetBool(
-       CallerFInfoFunctionCall2(enum_ge, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
+                       CallerFInfoFunctionCall2(enum_ge, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
        );
 }
 static bool
@@ -74,12 +74,12 @@ gbt_enumkey_cmp(const void *a, const void *b, FmgrInfo *flinfo)
            return 0;
 
        return DatumGetInt32(
-           CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->upper), ObjectIdGetDatum(ib->upper))
+                            CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->upper), ObjectIdGetDatum(ib->upper))
            );
    }
 
    return DatumGetInt32(
-       CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->lower), ObjectIdGetDatum(ib->lower))
+                        CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->lower), ObjectIdGetDatum(ib->lower))
        );
 }
 
@@ -94,7 +94,7 @@ static const gbtree_ninfo tinfo =
    gbt_enumle,
    gbt_enumlt,
    gbt_enumkey_cmp,
-   NULL /* no KNN support at least for now */
+   NULL                        /* no KNN support at least for now */
 };
 
 
index 46b3edbab3b7497b938531bcd4d77c903dcfdca6..fe6993c226cda3fa78556ca48bd588252d000321 100644 (file)
@@ -163,7 +163,7 @@ gbt_float4_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index 7d653075c571db1a8722fb836cce13dcd8608612..13153d811fdfeccb986bc18bd0a777a22c074bc9 100644 (file)
@@ -170,7 +170,7 @@ gbt_float8_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index 7c95ee6814525d2fd69db4bdea84d010c5884495..e1561b37b73d7f9f007ef8cbdd4dc5f5396aa92d 100644 (file)
@@ -133,7 +133,7 @@ gbt_inet_consistent(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_BOOL(gbt_num_consistent(&key, (void *) &query,
-                                     &strategy, GIST_LEAF(entry), &tinfo, fcinfo->flinfo));
+                      &strategy, GIST_LEAF(entry), &tinfo, fcinfo->flinfo));
 }
 
 
index 3dae5e7c61dc8a3223e47d7f2266ae44bc532898..0a4498a693a028167e98444dd785073b61215a77 100644 (file)
@@ -170,7 +170,7 @@ gbt_int2_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index 213bfa3323f82b5329989cbb86e21d8ec627e50f..b29cbc81a3e66ee7475054f8342965d4d3627529 100644 (file)
@@ -171,7 +171,7 @@ gbt_int4_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index 62b079bba698816b1970a358d050c0c723a7b5aa..df1f5338c845023e2d50f30c0d63179a0cb46cf2 100644 (file)
@@ -171,7 +171,7 @@ gbt_int8_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index f41f471bf6e20bd86b9dcc2b3f4cd09956be650c..e4dd9e4238a0e7ab04a4604875e49433c161d7c7 100644 (file)
@@ -245,7 +245,7 @@ gbt_intv_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-            gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index e588faa15a33740bde8e7f9f3f667ac6a396a237..e0d6f2adf18236ea5ae100abfd15858ec2c0217f 100644 (file)
@@ -171,7 +171,7 @@ gbt_oid_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index a4a1ad5aebc17317dbadb6726ef960fb471d76db..5eec8323f553ad133d9435bb75ca756db5c50a0e 100644 (file)
@@ -235,7 +235,7 @@ gbt_time_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index 13bc39424b256c01f85ec3621715b927da769bfa..592466c948add31978d0abf47b3347645fee0909 100644 (file)
@@ -283,7 +283,7 @@ gbt_ts_distance(PG_FUNCTION_ARGS)
    key.upper = (GBT_NUMKEY *) &kkk->upper;
 
    PG_RETURN_FLOAT8(
-           gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
@@ -328,7 +328,7 @@ gbt_tstz_distance(PG_FUNCTION_ARGS)
    qqq = tstz_to_ts_gmt(query);
 
    PG_RETURN_FLOAT8(
-             gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                    gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index 17561fa9e4ef888a8e459a0c0330e0044a7e8c5f..8aab19396c494075162cf296922143b306a95c6b 100644 (file)
@@ -42,13 +42,13 @@ typedef struct
 
    /* Methods */
 
-   bool        (*f_gt) (const void *, const void *, FmgrInfo *);   /* greater than */
-   bool        (*f_ge) (const void *, const void *, FmgrInfo *);   /* greater or equal */
-   bool        (*f_eq) (const void *, const void *, FmgrInfo *);   /* equal */
-   bool        (*f_le) (const void *, const void *, FmgrInfo *);   /* less or equal */
-   bool        (*f_lt) (const void *, const void *, FmgrInfo *);   /* less than */
-   int         (*f_cmp) (const void *, const void *, FmgrInfo *);  /* key compare function */
-   float8      (*f_dist) (const void *, const void *, FmgrInfo *); /* key distance function */
+   bool        (*f_gt) (const void *, const void *, FmgrInfo *);       /* greater than */
+   bool        (*f_ge) (const void *, const void *, FmgrInfo *);       /* greater or equal */
+   bool        (*f_eq) (const void *, const void *, FmgrInfo *);       /* equal */
+   bool        (*f_le) (const void *, const void *, FmgrInfo *);       /* less or equal */
+   bool        (*f_lt) (const void *, const void *, FmgrInfo *);       /* less than */
+   int         (*f_cmp) (const void *, const void *, FmgrInfo *);      /* key compare function */
+   float8      (*f_dist) (const void *, const void *, FmgrInfo *);     /* key distance function */
 } gbtree_ninfo;
 
 
index e0b4b377796a5b3a74a2f48f57744065c7941936..3648adccef7b6b5552bf1b9f6190491e30cd31f9 100644 (file)
@@ -25,7 +25,7 @@ typedef struct
 {
    const gbtree_vinfo *tinfo;
    Oid         collation;
-   FmgrInfo *flinfo;
+   FmgrInfo   *flinfo;
 } gbt_vsrt_arg;
 
 
@@ -402,8 +402,8 @@ gbt_var_penalty(float *res, const GISTENTRY *o, const GISTENTRY *n,
        *res = 0.0;
    else if (!(((*tinfo->f_cmp) (nk.lower, ok.lower, collation, flinfo) >= 0 ||
                gbt_bytea_pf_match(ok.lower, nk.lower, tinfo)) &&
-              ((*tinfo->f_cmp) (nk.upper, ok.upper, collation, flinfo) <= 0 ||
-               gbt_bytea_pf_match(ok.upper, nk.upper, tinfo))))
+            ((*tinfo->f_cmp) (nk.upper, ok.upper, collation, flinfo) <= 0 ||
+             gbt_bytea_pf_match(ok.upper, nk.upper, tinfo))))
    {
        Datum       d = PointerGetDatum(0);
        double      dres;
index fbc76ce738564369c7217f4d3d1d9347f776ae94..04a356276bf23b73331f059c3a76e7c393b2fe68 100644 (file)
@@ -34,12 +34,12 @@ typedef struct
 
    /* Methods */
 
-   bool        (*f_gt) (const void *, const void *, Oid, FmgrInfo *);      /* greater than */
-   bool        (*f_ge) (const void *, const void *, Oid, FmgrInfo *);      /* greater equal */
-   bool        (*f_eq) (const void *, const void *, Oid, FmgrInfo *);      /* equal */
-   bool        (*f_le) (const void *, const void *, Oid, FmgrInfo *);      /* less equal */
-   bool        (*f_lt) (const void *, const void *, Oid, FmgrInfo *);      /* less than */
-   int32       (*f_cmp) (const void *, const void *, Oid, FmgrInfo *);     /* compare */
+   bool        (*f_gt) (const void *, const void *, Oid, FmgrInfo *);  /* greater than */
+   bool        (*f_ge) (const void *, const void *, Oid, FmgrInfo *);  /* greater equal */
+   bool        (*f_eq) (const void *, const void *, Oid, FmgrInfo *);  /* equal */
+   bool        (*f_le) (const void *, const void *, Oid, FmgrInfo *);  /* less equal */
+   bool        (*f_lt) (const void *, const void *, Oid, FmgrInfo *);  /* less than */
+   int32       (*f_cmp) (const void *, const void *, Oid, FmgrInfo *); /* compare */
    GBT_VARKEY *(*f_l2n) (GBT_VARKEY *, FmgrInfo *flinfo);      /* convert leaf to node */
 } gbtree_vinfo;
 
index 5ed80925d3e3ad979293b54c260219aa955af072..e67b8cc989894266dd44505d94181afd47e800b1 100644 (file)
@@ -150,7 +150,7 @@ gbt_uuid_consistent(PG_FUNCTION_ARGS)
 
    PG_RETURN_BOOL(
                   gbt_num_consistent(&key, (void *) query, &strategy,
-                                     GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                   GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
        );
 }
 
index 44b67daedba699a82de7f23f3670b78f9e852d4c..a6a3c09ff8e5c2499b26e636b58a4ac9b10e1583 100644 (file)
@@ -113,7 +113,7 @@ static char *generate_relation_name(Relation rel);
 static void dblink_connstr_check(const char *connstr);
 static void dblink_security_check(PGconn *conn, remoteConn *rconn);
 static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
-                            const char *dblink_context_msg, bool fail);
+                const char *dblink_context_msg, bool fail);
 static char *get_connect_string(const char *servername);
 static char *escape_param_str(const char *from);
 static void validate_pkattnums(Relation rel,
@@ -152,16 +152,19 @@ xpstrdup(const char *in)
    return pstrdup(in);
 }
 
-static void pg_attribute_noreturn()
+static void
+pg_attribute_noreturn()
 dblink_res_internalerror(PGconn *conn, PGresult *res, const char *p2)
 {
    char       *msg = pchomp(PQerrorMessage(conn));
+
    if (res)
        PQclear(res);
    elog(ERROR, "%s: %s", p2, msg);
 }
 
-static void pg_attribute_noreturn()
+static void
+pg_attribute_noreturn()
 dblink_conn_not_avail(const char *conname)
 {
    if (conname)
@@ -176,7 +179,7 @@ dblink_conn_not_avail(const char *conname)
 
 static void
 dblink_get_conn(char *conname_or_str,
-               PGconn * volatile *conn_p, char **conname_p, volatile bool *freeconn_p)
+     PGconn *volatile * conn_p, char **conname_p, volatile bool *freeconn_p)
 {
    remoteConn *rconn = getConnectionByName(conname_or_str);
    PGconn     *conn;
@@ -201,11 +204,12 @@ dblink_get_conn(char *conname_or_str,
        if (PQstatus(conn) == CONNECTION_BAD)
        {
            char       *msg = pchomp(PQerrorMessage(conn));
+
            PQfinish(conn);
            ereport(ERROR,
-                   (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
-                    errmsg("could not establish connection"),
-                    errdetail_internal("%s", msg)));
+              (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
+               errmsg("could not establish connection"),
+               errdetail_internal("%s", msg)));
        }
        dblink_security_check(conn, rconn);
        if (PQclientEncoding(conn) != GetDatabaseEncoding())
@@ -223,11 +227,12 @@ static PGconn *
 dblink_get_named_conn(const char *conname)
 {
    remoteConn *rconn = getConnectionByName(conname);
+
    if (rconn)
        return rconn->conn;
 
    dblink_conn_not_avail(conname);
-   return NULL;        /* keep compiler quiet */
+   return NULL;                /* keep compiler quiet */
 }
 
 static void
@@ -2699,9 +2704,9 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
    message_context = xpstrdup(pg_diag_context);
 
    /*
-    * If we don't get a message from the PGresult, try the PGconn.  This
-    * is needed because for connection-level failures, PQexec may just
-    * return NULL, not a PGresult at all.
+    * If we don't get a message from the PGresult, try the PGconn.  This is
+    * needed because for connection-level failures, PQexec may just return
+    * NULL, not a PGresult at all.
     */
    if (message_primary == NULL)
        message_primary = pchomp(PQerrorMessage(conn));
@@ -2732,7 +2737,7 @@ get_connect_string(const char *servername)
    ForeignServer *foreign_server = NULL;
    UserMapping *user_mapping;
    ListCell   *cell;
-   StringInfoData  buf;
+   StringInfoData buf;
    ForeignDataWrapper *fdw;
    AclResult   aclresult;
    char       *srvname;
@@ -2820,7 +2825,7 @@ static char *
 escape_param_str(const char *str)
 {
    const char *cp;
-   StringInfoData  buf;
+   StringInfoData buf;
 
    initStringInfo(&buf);
 
index ec93e4b8d0a9498bc58c2aa5855cadaf0ac5d293..aab71aed2faa889523c4555fa1cfcd99e67c45fa 100644 (file)
@@ -507,7 +507,7 @@ sql_exec_searchtables(PGconn *conn, struct options * opts)
    todo = psprintf(
                    "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
                    "FROM pg_catalog.pg_class c\n"
-         LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
+        "  LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
                    "   LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
                    "   pg_catalog.pg_tablespace t\n"
                    "WHERE relkind IN (" CppAsString2(RELKIND_RELATION) ","
index dc9cc2d09aa3c67b2e1b2c7d544b1de91c2b136c..d52807dcdd6ab8964f16656dcc493d7009e88950 100644 (file)
@@ -226,8 +226,8 @@ brin_page_items(PG_FUNCTION_ARGS)
            if (ItemIdIsUsed(itemId))
            {
                dtup = brin_deform_tuple(bdesc,
-                                   (BrinTuple *) PageGetItem(page, itemId),
-                                   NULL);
+                                    (BrinTuple *) PageGetItem(page, itemId),
+                                        NULL);
                attno = 1;
                unusedItem = false;
            }
index 6e52969fd3447f7674fe93aaa523ab1547ab2348..228a147c9e81efc38ce3c8ba9da79a65a26fb799 100644 (file)
@@ -34,10 +34,10 @@ PG_FUNCTION_INFO_V1(hash_metapage_info);
  */
 typedef struct HashPageStat
 {
-   int     live_items;
-   int     dead_items;
-   int     page_size;
-   int     free_size;
+   int         live_items;
+   int         dead_items;
+   int         page_size;
+   int         free_size;
 
    /* opaque data */
    BlockNumber hasho_prevblkno;
@@ -45,7 +45,7 @@ typedef struct HashPageStat
    Bucket      hasho_bucket;
    uint16      hasho_flag;
    uint16      hasho_page_id;
-}  HashPageStat;
+} HashPageStat;
 
 
 /*
@@ -99,7 +99,7 @@ verify_hash_page(bytea *raw_page, int flags)
            case LH_BUCKET_PAGE | LH_OVERFLOW_PAGE:
                ereport(ERROR,
                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                        errmsg("page is not a hash bucket or overflow page")));
+                     errmsg("page is not a hash bucket or overflow page")));
            case LH_OVERFLOW_PAGE:
                ereport(ERROR,
                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -107,7 +107,7 @@ verify_hash_page(bytea *raw_page, int flags)
            default:
                elog(ERROR,
                     "hash page of type %08x not in mask %08x",
-                   pagetype, flags);
+                    pagetype, flags);
        }
    }
 
@@ -143,7 +143,7 @@ verify_hash_page(bytea *raw_page, int flags)
  * -------------------------------------------------
  */
 static void
-GetHashPageStatistics(Page page, HashPageStat * stat)
+GetHashPageStatistics(Page page, HashPageStat *stat)
 {
    OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
    HashPageOpaque opaque = (HashPageOpaque) PageGetSpecialPointer(page);
@@ -515,8 +515,8 @@ hash_metapage_info(PG_FUNCTION_ARGS)
                j;
    Datum       values[16];
    bool        nulls[16];
-   Datum       spares[HASH_MAX_SPLITPOINTS];
-   Datum       mapp[HASH_MAX_BITMAPS];
+   Datum       spares[HASH_MAX_SPLITPOINTS];
+   Datum       mapp[HASH_MAX_BITMAPS];
 
    if (!superuser())
        ereport(ERROR,
index 631e435a939c2a56c30ea3d7ac28bd623e69c681..f273dfa7cb2f5e877c64f22c4e1559a440cb087e 100644 (file)
@@ -311,9 +311,9 @@ page_checksum(PG_FUNCTION_ARGS)
    if (raw_page_size != BLCKSZ)
        ereport(ERROR,
                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                errmsg("incorrect size of input page (%d bytes)", raw_page_size)));
+         errmsg("incorrect size of input page (%d bytes)", raw_page_size)));
 
    page = (PageHeader) VARDATA(raw_page);
 
-   PG_RETURN_INT16(pg_checksum_page((char *)page, blkno));
+   PG_RETURN_INT16(pg_checksum_page((char *) page, blkno));
 }
index 5703032397915ab11bd758a65c75393339fbae08..c37eaa395dae51c6a7917ed92e6509571417cf8b 100644 (file)
@@ -57,7 +57,7 @@ char     *xlogFilePath;       /* where we are going to restore to */
 char      *nextWALFileName;    /* the file we need to get from archive */
 char      *restartWALFileName; /* the file from which we can restart restore */
 char      *priorWALFileName;   /* the file we need to get from archive */
-char       WALFilePath[MAXPGPATH * 2];     /* the file path including archive */
+char       WALFilePath[MAXPGPATH * 2]; /* the file path including archive */
 char       restoreCommand[MAXPGPATH];  /* run this to restore */
 char       exclusiveCleanupFileName[MAXFNAMELEN];      /* the file we need to
                                                         * get from archive */
index ee3936e09a9ae4046e8e153b0f166e60a176b410..480f917d0871d528a599937f8dda0da35ebcaf30 100644 (file)
@@ -774,6 +774,6 @@ check_relation_relkind(Relation rel)
        rel->rd_rel->relkind != RELKIND_TOASTVALUE)
        ereport(ERROR,
                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                errmsg("\"%s\" is not a table, materialized view, or TOAST table",
-                       RelationGetRelationName(rel))));
+          errmsg("\"%s\" is not a table, materialized view, or TOAST table",
+                 RelationGetRelationName(rel))));
 }
index 8063f34227043f0022283327a407671582347074..f71a933407d600f0fa81d37b20d5dfc437cdef7e 100644 (file)
@@ -238,7 +238,7 @@ px_find_digest(const char *name, PX_MD **res)
  * prototype for the EVP functions that return an algorithm, e.g.
  * EVP_aes_128_cbc().
  */
-typedef const EVP_CIPHER *(*ossl_EVP_cipher_func)(void);
+typedef const EVP_CIPHER *(*ossl_EVP_cipher_func) (void);
 
 /*
  * ossl_cipher contains the static information about each cipher.
@@ -706,13 +706,15 @@ static const struct ossl_cipher ossl_cast_cbc = {
 
 static const struct ossl_cipher ossl_aes_ecb = {
    ossl_aes_ecb_init,
-   NULL, /* EVP_aes_XXX_ecb(), determined in init function */
+   NULL,                       /* EVP_aes_XXX_ecb(), determined in init
+                                * function */
    128 / 8, 256 / 8
 };
 
 static const struct ossl_cipher ossl_aes_cbc = {
    ossl_aes_cbc_init,
-   NULL, /* EVP_aes_XXX_cbc(), determined in init function */
+   NULL,                       /* EVP_aes_XXX_cbc(), determined in init
+                                * function */
    128 / 8, 256 / 8
 };
 
index ccfdc20ed72c8cfd73475c2456ca5e869e6c4ac5..4e3516a86adfff3ae4e856ecdca74d6731a69fdc 100644 (file)
@@ -454,8 +454,8 @@ pg_random_uuid(PG_FUNCTION_ARGS)
    uint8      *buf = (uint8 *) palloc(UUID_LEN);
 
    /*
-    * Generate random bits. pg_backend_random() will do here, we don't
-    * promis UUIDs to be cryptographically random, when built with
+    * Generate random bits. pg_backend_random() will do here, we don't promis
+    * UUIDs to be cryptographically random, when built with
     * --disable-strong-random.
     */
    if (!pg_backend_random((char *) buf, UUID_LEN))
index 8dd561c02ad437692e720efdbb093e936148999e..00e2015c5c956b691417238686f4037692161678 100644 (file)
@@ -99,7 +99,10 @@ pgrowlocks(PG_FUNCTION_ARGS)
        relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
        rel = heap_openrv(relrv, AccessShareLock);
 
-       /* check permissions: must have SELECT on table or be in pg_stat_scan_tables */
+       /*
+        * check permissions: must have SELECT on table or be in
+        * pg_stat_scan_tables
+        */
        aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
                                      ACL_SELECT);
        if (aclresult != ACLCHECK_OK)
index 46c167a96a5e67d6bb39140dbff42126b80fbe2d..9facf6513784625be428c3bb4faf1cbadd00ea76 100644 (file)
@@ -31,7 +31,7 @@
 PG_FUNCTION_INFO_V1(pgstattuple_approx);
 PG_FUNCTION_INFO_V1(pgstattuple_approx_v1_5);
 
-Datum pgstattuple_approx_internal(Oid relid, FunctionCallInfo fcinfo);
+Datum      pgstattuple_approx_internal(Oid relid, FunctionCallInfo fcinfo);
 
 typedef struct output_type
 {
index 15aedec1bfd4f939952822970567357275b7e01f..03b387f6b6bf3fc20bfc3700108dc1a26702c7bd 100644 (file)
@@ -64,7 +64,7 @@ PG_FUNCTION_INFO_V1(pg_relpages_v1_5);
 PG_FUNCTION_INFO_V1(pg_relpagesbyid_v1_5);
 PG_FUNCTION_INFO_V1(pgstatginindex_v1_5);
 
-Datum pgstatginindex_internal(Oid relid, FunctionCallInfo fcinfo);
+Datum      pgstatginindex_internal(Oid relid, FunctionCallInfo fcinfo);
 
 #define IS_INDEX(r) ((r)->rd_rel->relkind == RELKIND_INDEX)
 #define IS_BTREE(r) ((r)->rd_rel->relam == BTREE_AM_OID)
@@ -113,17 +113,17 @@ typedef struct GinIndexStat
  */
 typedef struct HashIndexStat
 {
-   int32   version;
-   int32   space_per_page;
+   int32       version;
+   int32       space_per_page;
 
-   BlockNumber bucket_pages;
+   BlockNumber bucket_pages;
    BlockNumber overflow_pages;
    BlockNumber bitmap_pages;
    BlockNumber unused_pages;
 
-   int64   live_items;
-   int64   dead_items;
-   uint64  free_space;
+   int64       live_items;
+   int64       dead_items;
+   uint64      free_space;
 } HashIndexStat;
 
 static Datum pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo);
@@ -581,8 +581,8 @@ Datum
 pgstathashindex(PG_FUNCTION_ARGS)
 {
    Oid         relid = PG_GETARG_OID(0);
-   BlockNumber nblocks;
-   BlockNumber blkno;
+   BlockNumber nblocks;
+   BlockNumber blkno;
    Relation    rel;
    HashIndexStat stats;
    BufferAccessStrategy bstrategy;
@@ -591,7 +591,7 @@ pgstathashindex(PG_FUNCTION_ARGS)
    Datum       values[8];
    bool        nulls[8];
    Buffer      metabuf;
-   HashMetaPage    metap;
+   HashMetaPage metap;
    float8      free_percent;
    uint64      total_space;
 
@@ -648,13 +648,13 @@ pgstathashindex(PG_FUNCTION_ARGS)
                 MAXALIGN(sizeof(HashPageOpaqueData)))
            ereport(ERROR,
                    (errcode(ERRCODE_INDEX_CORRUPTED),
-                    errmsg("index \"%s\" contains corrupted page at block %u",
-                           RelationGetRelationName(rel),
-                           BufferGetBlockNumber(buf))));
+                  errmsg("index \"%s\" contains corrupted page at block %u",
+                         RelationGetRelationName(rel),
+                         BufferGetBlockNumber(buf))));
        else
        {
-           HashPageOpaque  opaque;
-           int     pagetype;
+           HashPageOpaque opaque;
+           int         pagetype;
 
            opaque = (HashPageOpaque) PageGetSpecialPointer(page);
            pagetype = opaque->hasho_flag & LH_PAGE_TYPE;
@@ -676,9 +676,9 @@ pgstathashindex(PG_FUNCTION_ARGS)
            else
                ereport(ERROR,
                        (errcode(ERRCODE_INDEX_CORRUPTED),
-                   errmsg("unexpected page type 0x%04X in HASH index \"%s\" block %u",
+                        errmsg("unexpected page type 0x%04X in HASH index \"%s\" block %u",
                            opaque->hasho_flag, RelationGetRelationName(rel),
-                           BufferGetBlockNumber(buf))));
+                               BufferGetBlockNumber(buf))));
        }
        UnlockReleaseBuffer(buf);
    }
@@ -735,12 +735,12 @@ static void
 GetHashPageStats(Page page, HashIndexStat *stats)
 {
    OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
-   int off;
+   int         off;
 
    /* count live and dead tuples, and free space */
    for (off = FirstOffsetNumber; off <= maxoff; off++)
    {
-       ItemId      id = PageGetItemId(page, off);
+       ItemId      id = PageGetItemId(page, off);
 
        if (!ItemIdIsDead(id))
            stats->live_items++;
index 1d5aa837635e84005b3b900a2fe3422b2e69ab10..482a3dd3016dbc2afc0c99a83205a48134dc7358 100644 (file)
@@ -171,8 +171,8 @@ static void deparseFromExprForRel(StringInfo buf, PlannerInfo *root,
                    RelOptInfo *joinrel, bool use_alias, List **params_list);
 static void deparseFromExpr(List *quals, deparse_expr_cxt *context);
 static void deparseRangeTblRef(StringInfo buf, PlannerInfo *root,
-                              RelOptInfo *foreignrel, bool make_subquery,
-                              List **params_list);
+                  RelOptInfo *foreignrel, bool make_subquery,
+                  List **params_list);
 static void deparseAggref(Aggref *node, deparse_expr_cxt *context);
 static void appendGroupByClause(List *tlist, deparse_expr_cxt *context);
 static void appendAggOrderBy(List *orderList, List *targetList,
@@ -185,9 +185,9 @@ static Node *deparseSortGroupClause(Index ref, List *tlist,
  * Helper functions
  */
 static bool is_subquery_var(Var *node, RelOptInfo *foreignrel,
-                           int *relno, int *colno);
+               int *relno, int *colno);
 static void get_relation_column_alias_ids(Var *node, RelOptInfo *foreignrel,
-                                         int *relno, int *colno);
+                             int *relno, int *colno);
 
 
 /*
@@ -1017,8 +1017,8 @@ deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs,
    {
        /*
         * For a relation that is deparsed as a subquery, emit expressions
-        * specified in the relation's reltarget.  Note that since this is
-        * for the subquery, no need to care about *retrieved_attrs.
+        * specified in the relation's reltarget.  Note that since this is for
+        * the subquery, no need to care about *retrieved_attrs.
         */
        deparseSubqueryTargetList(context);
    }
@@ -2189,8 +2189,8 @@ deparseVar(Var *node, deparse_expr_cxt *context)
 
    /*
     * If the Var belongs to the foreign relation that is deparsed as a
-    * subquery, use the relation and column alias to the Var provided
-    * by the subquery, instead of the remote name.
+    * subquery, use the relation and column alias to the Var provided by the
+    * subquery, instead of the remote name.
     */
    if (is_subquery_var(node, context->scanrel, &relno, &colno))
    {
index 18b4b01cfa429b99b897aff76fd487fb193c8c21..080cb0a0742b364bbf36dc987cf328b1c3b05aca 100644 (file)
@@ -417,8 +417,8 @@ static void add_foreign_grouping_paths(PlannerInfo *root,
 static void apply_server_options(PgFdwRelationInfo *fpinfo);
 static void apply_table_options(PgFdwRelationInfo *fpinfo);
 static void merge_fdw_options(PgFdwRelationInfo *fpinfo,
-                             const PgFdwRelationInfo *fpinfo_o,
-                             const PgFdwRelationInfo *fpinfo_i);
+                 const PgFdwRelationInfo *fpinfo_o,
+                 const PgFdwRelationInfo *fpinfo_i);
 
 
 /*
@@ -4170,8 +4170,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
    fpinfo->jointype = jointype;
 
    /*
-    * By default, both the input relations are not required to be deparsed
-    * as subqueries, but there might be some relations covered by the input
+    * By default, both the input relations are not required to be deparsed as
+    * subqueries, but there might be some relations covered by the input
     * relations that are required to be deparsed as subqueries, so save the
     * relids of those relations for later use by the deparser.
     */
@@ -4227,8 +4227,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
        case JOIN_FULL:
 
            /*
-            * In this case, if any of the input relations has conditions,
-            * we need to deparse that relation as a subquery so that the
+            * In this case, if any of the input relations has conditions, we
+            * need to deparse that relation as a subquery so that the
             * conditions can be evaluated before the join.  Remember it in
             * the fpinfo of this relation so that the deparser can take
             * appropriate action.  Also, save the relids of base relations
@@ -4305,7 +4305,7 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
     * Note that since this joinrel is at the end of the join_rel_list list
     * when we are called, we can get the position by list_length.
     */
-   Assert(fpinfo->relation_index == 0);    /* shouldn't be set yet */
+   Assert(fpinfo->relation_index == 0);        /* shouldn't be set yet */
    fpinfo->relation_index =
        list_length(root->parse->rtable) + list_length(root->join_rel_list);
 
@@ -4354,7 +4354,7 @@ add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel,
 static void
 apply_server_options(PgFdwRelationInfo *fpinfo)
 {
-   ListCell *lc;
+   ListCell   *lc;
 
    foreach(lc, fpinfo->server->options)
    {
@@ -4382,7 +4382,7 @@ apply_server_options(PgFdwRelationInfo *fpinfo)
 static void
 apply_table_options(PgFdwRelationInfo *fpinfo)
 {
-   ListCell *lc;
+   ListCell   *lc;
 
    foreach(lc, fpinfo->table->options)
    {
@@ -4439,7 +4439,7 @@ merge_fdw_options(PgFdwRelationInfo *fpinfo,
         * best.
         */
        fpinfo->use_remote_estimate = fpinfo_o->use_remote_estimate ||
-                                     fpinfo_i->use_remote_estimate;
+           fpinfo_i->use_remote_estimate;
 
        /*
         * Set fetch size to maximum of the joining sides, since we are
@@ -4869,7 +4869,7 @@ add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
    fpinfo->table = ifpinfo->table;
    fpinfo->server = ifpinfo->server;
    fpinfo->user = ifpinfo->user;
-   merge_fdw_options(fpinfo, ifpinfo , NULL);
+   merge_fdw_options(fpinfo, ifpinfo, NULL);
 
    /* Assess if it is safe to push down aggregation and grouping. */
    if (!foreign_grouping_ok(root, grouped_rel))
index 2bae799ccfe6c59a6c26852b8221475ddf6b20f1..25c950dd768f0974c3b405933218f2b641f3188c 100644 (file)
@@ -96,9 +96,9 @@ typedef struct PgFdwRelationInfo
    List       *grouped_tlist;
 
    /* Subquery information */
-   bool        make_outerrel_subquery; /* do we deparse outerrel as a
+   bool        make_outerrel_subquery; /* do we deparse outerrel as a
                                         * subquery? */
-   bool        make_innerrel_subquery; /* do we deparse innerrel as a
+   bool        make_innerrel_subquery; /* do we deparse innerrel as a
                                         * subquery? */
    Relids      lower_subquery_rels;    /* all relids appearing in lower
                                         * subqueries */
index 2594407754696bf2f839d88474d94242ed32c541..442a46140db6e24e6c948d2a3777541d87a05438 100644 (file)
@@ -364,7 +364,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
    MemoryContext oldcxt;
    MemoryContext perRangeCxt;
    BrinMemTuple *dtup;
-   BrinTuple    *btup = NULL;
+   BrinTuple  *btup = NULL;
    Size        btupsz = 0;
 
    opaque = (BrinOpaque *) scan->opaque;
@@ -920,13 +920,13 @@ brin_summarize_range(PG_FUNCTION_ARGS)
 Datum
 brin_desummarize_range(PG_FUNCTION_ARGS)
 {
-   Oid     indexoid = PG_GETARG_OID(0);
-   int64   heapBlk64 = PG_GETARG_INT64(1);
+   Oid         indexoid = PG_GETARG_OID(0);
+   int64       heapBlk64 = PG_GETARG_INT64(1);
    BlockNumber heapBlk;
-   Oid     heapoid;
-   Relation heapRel;
-   Relation indexRel;
-   bool    done;
+   Oid         heapoid;
+   Relation    heapRel;
+   Relation    indexRel;
+   bool        done;
 
    if (heapBlk64 > MaxBlockNumber || heapBlk64 < 0)
    {
@@ -977,7 +977,8 @@ brin_desummarize_range(PG_FUNCTION_ARGS)
                        RelationGetRelationName(indexRel))));
 
    /* the revmap does the hard work */
-   do {
+   do
+   {
        done = brinRevmapDesummarizeRange(indexRel, heapBlk);
    }
    while (!done);
index 9ed279bf42fa2b8ac1f4fee151f63ef766c3d5ff..fc8b10ab396709f684d56e02da9de5ea0dc26f39 100644 (file)
@@ -318,11 +318,11 @@ bool
 brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
 {
    BrinRevmap *revmap;
-   BlockNumber pagesPerRange;
+   BlockNumber pagesPerRange;
    RevmapContents *contents;
    ItemPointerData *iptr;
-   ItemPointerData invalidIptr;
-   BlockNumber revmapBlk;
+   ItemPointerData invalidIptr;
+   BlockNumber revmapBlk;
    Buffer      revmapBuf;
    Buffer      regBuf;
    Page        revmapPg;
@@ -415,7 +415,7 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
    if (RelationNeedsWAL(idxrel))
    {
        xl_brin_desummarize xlrec;
-       XLogRecPtr      recptr;
+       XLogRecPtr  recptr;
 
        xlrec.pagesPerRange = revmap->rm_pagesPerRange;
        xlrec.heapBlk = heapBlk;
index 8f5b5ceb3f29d4ddf96b795fc479f433c8b9084b..dff7198a39e6116bad787b7204dfa49cfc0ef7b2 100644 (file)
@@ -268,7 +268,7 @@ brin_xlog_desummarize_page(XLogReaderState *record)
    action = XLogReadBufferForRedo(record, 0, &buffer);
    if (action == BLK_NEEDS_REDO)
    {
-       ItemPointerData iptr;
+       ItemPointerData iptr;
 
        ItemPointerSetInvalid(&iptr);
        brinSetHeapBlockItemptr(buffer, xlrec->pagesPerRange, xlrec->heapBlk, iptr);
@@ -283,7 +283,7 @@ brin_xlog_desummarize_page(XLogReaderState *record)
    action = XLogReadBufferForRedo(record, 1, &buffer);
    if (action == BLK_NEEDS_REDO)
    {
-       Page    regPg = BufferGetPage(buffer);
+       Page        regPg = BufferGetPage(buffer);
 
        PageIndexTupleDeleteNoCompact(regPg, xlrec->regOffset);
 
index 5fe1c72da8385690b4f8a222319133d4730eb09d..851c3bf4debc775ab6f1db0e757950c50e35f7c2 100644 (file)
@@ -102,8 +102,8 @@ printsimple(TupleTableSlot *slot, DestReceiver *self)
 
            case INT4OID:
                {
-                   int32   num = DatumGetInt32(value);
-                   char    str[12];    /* sign, 10 digits and '\0' */
+                   int32       num = DatumGetInt32(value);
+                   char        str[12];        /* sign, 10 digits and '\0' */
 
                    pg_ltoa(num, str);
                    pq_sendcountedtext(&buf, str, strlen(str), false);
@@ -112,8 +112,8 @@ printsimple(TupleTableSlot *slot, DestReceiver *self)
 
            case INT8OID:
                {
-                   int64   num = DatumGetInt64(value);
-                   char    str[23];    /* sign, 21 digits and '\0' */
+                   int64       num = DatumGetInt64(value);
+                   char        str[23];        /* sign, 21 digits and '\0' */
 
                    pg_lltoa(num, str);
                    pq_sendcountedtext(&buf, str, strlen(str), false);
index 26c077a7bb9f025cc2b00f5c55006d895cd754f5..27e502a36064619e29f2ad3ce4f3f529bf6a7757 100644 (file)
@@ -140,9 +140,9 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
     * exclusive cleanup lock. This guarantees that no insertions currently
     * happen in this subtree. Caller also acquire Exclusive lock on deletable
     * page and is acquiring and releasing exclusive lock on left page before.
-    * Left page was locked and released. Then parent and this page are locked.
-    * We acquire left page lock here only to mark page dirty after changing
-    * right pointer.
+    * Left page was locked and released. Then parent and this page are
+    * locked. We acquire left page lock here only to mark page dirty after
+    * changing right pointer.
     */
    lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
                                 RBM_NORMAL, gvs->strategy);
@@ -258,7 +258,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
    buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
                                RBM_NORMAL, gvs->strategy);
 
-   if(!isRoot)
+   if (!isRoot)
        LockBuffer(buffer, GIN_EXCLUSIVE);
 
    page = BufferGetPage(buffer);
@@ -295,8 +295,8 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
        }
    }
 
-   if(!isRoot)
-           LockBuffer(buffer, GIN_UNLOCK);
+   if (!isRoot)
+       LockBuffer(buffer, GIN_UNLOCK);
 
    ReleaseBuffer(buffer);
 
@@ -326,7 +326,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
                                RBM_NORMAL, gvs->strategy);
    page = BufferGetPage(buffer);
 
-   ginTraverseLock(buffer,false);
+   ginTraverseLock(buffer, false);
 
    Assert(GinPageIsData(page));
 
@@ -347,15 +347,15 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
    }
    else
    {
-       OffsetNumber    i;
-       bool            hasEmptyChild = FALSE;
-       bool            hasNonEmptyChild = FALSE;
-       OffsetNumber    maxoff = GinPageGetOpaque(page)->maxoff;
-       BlockNumber*    children = palloc(sizeof(BlockNumber) * (maxoff + 1));
+       OffsetNumber i;
+       bool        hasEmptyChild = FALSE;
+       bool        hasNonEmptyChild = FALSE;
+       OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
+       BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1));
 
        /*
-        * Read all children BlockNumbers.
-        * Not sure it is safe if there are many concurrent vacuums.
+        * Read all children BlockNumbers. Not sure it is safe if there are
+        * many concurrent vacuums.
         */
 
        for (i = FirstOffsetNumber; i <= maxoff; i++)
@@ -380,26 +380,26 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
        vacuum_delay_point();
 
        /*
-        * All subtree is empty - just return TRUE to indicate that parent must
-        * do a cleanup. Unless we are ROOT an there is way to go upper.
+        * All subtree is empty - just return TRUE to indicate that parent
+        * must do a cleanup. Unless we are ROOT an there is way to go upper.
         */
 
-       if(hasEmptyChild && !hasNonEmptyChild && !isRoot)
+       if (hasEmptyChild && !hasNonEmptyChild && !isRoot)
            return TRUE;
 
-       if(hasEmptyChild)
+       if (hasEmptyChild)
        {
            DataPageDeleteStack root,
                       *ptr,
                       *tmp;
 
            buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
-                                           RBM_NORMAL, gvs->strategy);
+                                       RBM_NORMAL, gvs->strategy);
            LockBufferForCleanup(buffer);
 
            memset(&root, 0, sizeof(DataPageDeleteStack));
-               root.leftBlkno = InvalidBlockNumber;
-               root.isRoot = TRUE;
+           root.leftBlkno = InvalidBlockNumber;
+           root.isRoot = TRUE;
 
            ginScanToDelete(gvs, blkno, TRUE, &root, InvalidOffsetNumber);
 
index df54638f3e06f4b1e51aef9046028bd872355224..d0b0547491f7c34df3dca328eef3e1eb2851c3d0 100644 (file)
@@ -333,12 +333,12 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
        if (scan->kill_prior_tuple)
        {
            /*
-            * Yes, so remember it for later. (We'll deal with all such
-            * tuples at once right after leaving the index page or at
-            * end of scan.) In case if caller reverses the indexscan
-            * direction it is quite possible that the same item might
-            * get entered multiple times. But, we don't detect that;
-            * instead, we just forget any excess entries.
+            * Yes, so remember it for later. (We'll deal with all such tuples
+            * at once right after leaving the index page or at end of scan.)
+            * In case if caller reverses the indexscan direction it is quite
+            * possible that the same item might get entered multiple times.
+            * But, we don't detect that; instead, we just forget any excess
+            * entries.
             */
            if (so->killedItems == NULL)
                so->killedItems = palloc(MaxIndexTuplesPerPage *
@@ -348,7 +348,7 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
            {
                so->killedItems[so->numKilled].heapTid = so->hashso_heappos;
                so->killedItems[so->numKilled].indexOffset =
-                           ItemPointerGetOffsetNumber(&(so->hashso_curpos));
+                   ItemPointerGetOffsetNumber(&(so->hashso_curpos));
                so->numKilled++;
            }
        }
@@ -477,9 +477,8 @@ hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
    Relation    rel = scan->indexRelation;
 
    /*
-    * Before leaving current page, deal with any killed items.
-    * Also, ensure that we acquire lock on current page before
-    * calling _hash_kill_items.
+    * Before leaving current page, deal with any killed items. Also, ensure
+    * that we acquire lock on current page before calling _hash_kill_items.
     */
    if (so->numKilled > 0)
    {
@@ -516,9 +515,8 @@ hashendscan(IndexScanDesc scan)
    Relation    rel = scan->indexRelation;
 
    /*
-    * Before leaving current page, deal with any killed items.
-    * Also, ensure that we acquire lock on current page before
-    * calling _hash_kill_items.
+    * Before leaving current page, deal with any killed items. Also, ensure
+    * that we acquire lock on current page before calling _hash_kill_items.
     */
    if (so->numKilled > 0)
    {
@@ -889,8 +887,8 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 
            /*
             * Let us mark the page as clean if vacuum removes the DEAD tuples
-            * from an index page. We do this by clearing LH_PAGE_HAS_DEAD_TUPLES
-            * flag.
+            * from an index page. We do this by clearing
+            * LH_PAGE_HAS_DEAD_TUPLES flag.
             */
            if (tuples_removed && *tuples_removed > 0 &&
                H_HAS_DEAD_TUPLES(opaque))
index d1c0e6904fcd58a8d0febf63ab70f6f6bf86faad..0ea11b2e7422b20cc5a8d6ff3d3c3cc66937b94a 100644 (file)
@@ -950,22 +950,22 @@ hash_xlog_update_meta_page(XLogReaderState *record)
 static TransactionId
 hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
 {
-   xl_hash_vacuum_one_page *xlrec;
-   OffsetNumber    *unused;
+   xl_hash_vacuum_one_page *xlrec;
+   OffsetNumber *unused;
    Buffer      ibuffer,
                hbuffer;
    Page        ipage,
                hpage;
-   RelFileNode rnode;
-   BlockNumber blkno;
+   RelFileNode rnode;
+   BlockNumber blkno;
    ItemId      iitemid,
                hitemid;
    IndexTuple  itup;
-   HeapTupleHeader htuphdr;
-   BlockNumber hblkno;
-   OffsetNumber    hoffnum;
-   TransactionId   latestRemovedXid = InvalidTransactionId;
-   int     i;
+   HeapTupleHeader htuphdr;
+   BlockNumber hblkno;
+   OffsetNumber hoffnum;
+   TransactionId latestRemovedXid = InvalidTransactionId;
+   int         i;
 
    xlrec = (xl_hash_vacuum_one_page *) XLogRecGetData(record);
 
@@ -984,9 +984,9 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
        return latestRemovedXid;
 
    /*
-    * Check if WAL replay has reached a consistent database state. If not,
-    * we must PANIC. See the definition of btree_xlog_delete_get_latestRemovedXid
-    * for more details.
+    * Check if WAL replay has reached a consistent database state. If not, we
+    * must PANIC. See the definition of
+    * btree_xlog_delete_get_latestRemovedXid for more details.
     */
    if (!reachedConsistency)
        elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data");
@@ -1098,11 +1098,11 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
 static void
 hash_xlog_vacuum_one_page(XLogReaderState *record)
 {
-   XLogRecPtr lsn = record->EndRecPtr;
+   XLogRecPtr  lsn = record->EndRecPtr;
    xl_hash_vacuum_one_page *xldata;
-   Buffer buffer;
-   Buffer metabuf;
-   Page page;
+   Buffer      buffer;
+   Buffer      metabuf;
+   Page        page;
    XLogRedoAction action;
    HashPageOpaque pageopaque;
 
@@ -1123,7 +1123,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
    if (InHotStandby)
    {
        TransactionId latestRemovedXid =
-                   hash_xlog_vacuum_get_latestRemovedXid(record);
+       hash_xlog_vacuum_get_latestRemovedXid(record);
        RelFileNode rnode;
 
        XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
@@ -1146,8 +1146,8 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
        }
 
        /*
-        * Mark the page as not containing any LP_DEAD items. See comments
-        * in _hash_vacuum_one_page() for details.
+        * Mark the page as not containing any LP_DEAD items. See comments in
+        * _hash_vacuum_one_page() for details.
         */
        pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
        pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
@@ -1160,7 +1160,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
 
    if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
    {
-       Page metapage;
+       Page        metapage;
        HashMetaPage metap;
 
        metapage = BufferGetPage(metabuf);
index 8699b5bc30b46bb41569c24d6b30d4a764bb5fdc..01c8d8006c0322bb371b6fc1b16c8655968a675a 100644 (file)
@@ -24,7 +24,7 @@
 #include "storage/buf_internals.h"
 
 static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
-                                 RelFileNode hnode);
+                     RelFileNode hnode);
 
 /*
  * _hash_doinsert() -- Handle insertion of a single index tuple.
@@ -63,8 +63,8 @@ restart_insert:
 
    /*
     * Read the metapage.  We don't lock it yet; HashMaxItemSize() will
-    * examine pd_pagesize_version, but that can't change so we can examine
-    * it without a lock.
+    * examine pd_pagesize_version, but that can't change so we can examine it
+    * without a lock.
     */
    metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE);
    metapage = BufferGetPage(metabuf);
@@ -126,10 +126,9 @@ restart_insert:
        BlockNumber nextblkno;
 
        /*
-        * Check if current page has any DEAD tuples. If yes,
-        * delete these tuples and see if we can get a space for
-        * the new item to be inserted before moving to the next
-        * page in the bucket chain.
+        * Check if current page has any DEAD tuples. If yes, delete these
+        * tuples and see if we can get a space for the new item to be
+        * inserted before moving to the next page in the bucket chain.
         */
        if (H_HAS_DEAD_TUPLES(pageopaque))
        {
@@ -139,7 +138,7 @@ restart_insert:
                _hash_vacuum_one_page(rel, metabuf, buf, heapRel->rd_node);
 
                if (PageGetFreeSpace(page) >= itemsz)
-                   break;              /* OK, now we have enough space */
+                   break;      /* OK, now we have enough space */
            }
        }
 
@@ -337,13 +336,13 @@ static void
 _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
                      RelFileNode hnode)
 {
-   OffsetNumber    deletable[MaxOffsetNumber];
-   int ndeletable = 0;
+   OffsetNumber deletable[MaxOffsetNumber];
+   int         ndeletable = 0;
    OffsetNumber offnum,
-                maxoff;
-   Page    page = BufferGetPage(buf);
-   HashPageOpaque  pageopaque;
-   HashMetaPage    metap;
+               maxoff;
+   Page        page = BufferGetPage(buf);
+   HashPageOpaque pageopaque;
+   HashMetaPage metap;
 
    /* Scan each tuple in page to see if it is marked as LP_DEAD */
    maxoff = PageGetMaxOffsetNumber(page);
@@ -351,7 +350,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
         offnum <= maxoff;
         offnum = OffsetNumberNext(offnum))
    {
-       ItemId  itemId = PageGetItemId(page, offnum);
+       ItemId      itemId = PageGetItemId(page, offnum);
 
        if (ItemIdIsDead(itemId))
            deletable[ndeletable++] = offnum;
@@ -360,8 +359,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
    if (ndeletable > 0)
    {
        /*
-        * Write-lock the meta page so that we can decrement
-        * tuple count.
+        * Write-lock the meta page so that we can decrement tuple count.
         */
        LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
 
@@ -374,8 +372,8 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
         * Mark the page as not containing any LP_DEAD items. This is not
         * certainly true (there might be some that have recently been marked,
         * but weren't included in our target-item list), but it will almost
-        * always be true and it doesn't seem worth an additional page scan
-        * to check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
+        * always be true and it doesn't seem worth an additional page scan to
+        * check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
         * anyway.
         */
        pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
@@ -390,7 +388,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
        /* XLOG stuff */
        if (RelationNeedsWAL(rel))
        {
-           xl_hash_vacuum_one_page xlrec;
+           xl_hash_vacuum_one_page xlrec;
            XLogRecPtr  recptr;
 
            xlrec.hnode = hnode;
@@ -401,12 +399,12 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
            XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage);
 
            /*
-            * We need the target-offsets array whether or not we store the whole
-            * buffer, to allow us to find the latestRemovedXid on a standby
-            * server.
+            * We need the target-offsets array whether or not we store the
+            * whole buffer, to allow us to find the latestRemovedXid on a
+            * standby server.
             */
            XLogRegisterData((char *) deletable,
-                       ndeletable * sizeof(OffsetNumber));
+                            ndeletable * sizeof(OffsetNumber));
 
            XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
 
@@ -417,9 +415,10 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
        }
 
        END_CRIT_SECTION();
+
        /*
-        * Releasing write lock on meta page as we have updated
-        * the tuple count.
+        * Releasing write lock on meta page as we have updated the tuple
+        * count.
         */
        LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
    }
index bf1ffff4e8c31382b00cb21db0d142545a1b74de..4544889294a80c37ba8d2758f3d154693092ad1f 100644 (file)
@@ -177,8 +177,8 @@ _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
    pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
 
    /*
-    * Set hasho_prevblkno with current hashm_maxbucket. This value will
-    * be used to validate cached HashMetaPageData. See
+    * Set hasho_prevblkno with current hashm_maxbucket. This value will be
+    * used to validate cached HashMetaPageData. See
     * _hash_getbucketbuf_from_hashkey().
     */
    pageopaque->hasho_prevblkno = max_bucket;
@@ -509,8 +509,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
     * Choose the number of initial bucket pages to match the fill factor
     * given the estimated number of tuples.  We round up the result to the
     * total number of buckets which has to be allocated before using its
-    * _hashm_spare element. However always force at least 2 bucket pages.
-    * The upper limit is determined by considerations explained in
+    * _hashm_spare element. However always force at least 2 bucket pages. The
+    * upper limit is determined by considerations explained in
     * _hash_expandtable().
     */
    dnumbuckets = num_tuples / ffactor;
@@ -568,8 +568,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
    metap->hashm_maxbucket = num_buckets - 1;
 
    /*
-    * Set highmask as next immediate ((2 ^ x) - 1), which should be sufficient
-    * to cover num_buckets.
+    * Set highmask as next immediate ((2 ^ x) - 1), which should be
+    * sufficient to cover num_buckets.
     */
    metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
    metap->hashm_lowmask = (metap->hashm_highmask >> 1);
@@ -748,8 +748,8 @@ restart_expand:
    {
        /*
         * Copy bucket mapping info now; refer to the comment in code below
-        * where we copy this information before calling _hash_splitbucket
-        * to see why this is okay.
+        * where we copy this information before calling _hash_splitbucket to
+        * see why this is okay.
         */
        maxbucket = metap->hashm_maxbucket;
        highmask = metap->hashm_highmask;
@@ -792,8 +792,7 @@ restart_expand:
         * We treat allocation of buckets as a separate WAL-logged action.
         * Even if we fail after this operation, won't leak bucket pages;
         * rather, the next split will consume this space. In any case, even
-        * without failure we don't use all the space in one split
-        * operation.
+        * without failure we don't use all the space in one split operation.
         */
        buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
        if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
@@ -870,10 +869,9 @@ restart_expand:
 
    /*
     * Mark the old bucket to indicate that split is in progress.  (At
-    * operation end, we will clear the split-in-progress flag.)  Also,
-    * for a primary bucket page, hasho_prevblkno stores the number of
-    * buckets that existed as of the last split, so we must update that
-    * value here.
+    * operation end, we will clear the split-in-progress flag.)  Also, for a
+    * primary bucket page, hasho_prevblkno stores the number of buckets that
+    * existed as of the last split, so we must update that value here.
     */
    oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
    oopaque->hasho_prevblkno = maxbucket;
@@ -1008,8 +1006,8 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
 
    /*
     * Initialize the page.  Just zeroing the page won't work; see
-    * _hash_freeovflpage for similar usage.  We take care to make the
-    * special space valid for the benefit of tools such as pageinspect.
+    * _hash_freeovflpage for similar usage.  We take care to make the special
+    * space valid for the benefit of tools such as pageinspect.
     */
    _hash_pageinit(page, BLCKSZ);
 
@@ -1462,11 +1460,11 @@ log_split_page(Relation rel, Buffer buf)
  * _hash_getcachedmetap() -- Returns cached metapage data.
  *
  * If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
- *  the metapage.  If not set, we'll set it before returning if we have to
- *  refresh the cache, and return with a pin but no lock on it; caller is
- *  responsible for releasing the pin.
+ * the metapage.  If not set, we'll set it before returning if we have to
+ * refresh the cache, and return with a pin but no lock on it; caller is
+ * responsible for releasing the pin.
  *
- *  We refresh the cache if it's not initialized yet or force_refresh is true.
+ * We refresh the cache if it's not initialized yet or force_refresh is true.
  */
 HashMetaPage
 _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
@@ -1476,13 +1474,13 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
    Assert(metabuf);
    if (force_refresh || rel->rd_amcache == NULL)
    {
-       char   *cache = NULL;
+       char       *cache = NULL;
 
        /*
-        * It's important that we don't set rd_amcache to an invalid
-        * value.  Either MemoryContextAlloc or _hash_getbuf could fail,
-        * so don't install a pointer to the newly-allocated storage in the
-        * actual relcache entry until both have succeeeded.
+        * It's important that we don't set rd_amcache to an invalid value.
+        * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
+        * install a pointer to the newly-allocated storage in the actual
+        * relcache entry until both have succeeeded.
         */
        if (rel->rd_amcache == NULL)
            cache = MemoryContextAlloc(rel->rd_indexcxt,
@@ -1517,7 +1515,7 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
  * us an opportunity to use the previously saved metapage contents to reach
  * the target bucket buffer, instead of reading from the metapage every time.
  * This saves one buffer access every time we want to reach the target bucket
- *  buffer, which is very helpful savings in bufmgr traffic and contention.
+ * buffer, which is very helpful savings in bufmgr traffic and contention.
  *
  * The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
  * bucket buffer has to be locked for reading or writing.
index 9f832f2544fcfadfe8b3d768ee4391793369b032..c513c3b842ed6ddaff13118ef5015c2a2e866309 100644 (file)
@@ -528,20 +528,21 @@ _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket,
 void
 _hash_kill_items(IndexScanDesc scan)
 {
-   HashScanOpaque  so = (HashScanOpaque) scan->opaque;
-   Page    page;
-   HashPageOpaque  opaque;
-   OffsetNumber    offnum, maxoff;
-   int numKilled = so->numKilled;
-   int     i;
-   bool    killedsomething = false;
+   HashScanOpaque so = (HashScanOpaque) scan->opaque;
+   Page        page;
+   HashPageOpaque opaque;
+   OffsetNumber offnum,
+               maxoff;
+   int         numKilled = so->numKilled;
+   int         i;
+   bool        killedsomething = false;
 
    Assert(so->numKilled > 0);
    Assert(so->killedItems != NULL);
 
    /*
-    * Always reset the scan state, so we don't look for same
-    * items on other pages.
+    * Always reset the scan state, so we don't look for same items on other
+    * pages.
     */
    so->numKilled = 0;
 
@@ -555,7 +556,7 @@ _hash_kill_items(IndexScanDesc scan)
 
        while (offnum <= maxoff)
        {
-           ItemId  iid = PageGetItemId(page, offnum);
+           ItemId      iid = PageGetItemId(page, offnum);
            IndexTuple  ituple = (IndexTuple) PageGetItem(page, iid);
 
            if (ItemPointerEquals(&ituple->t_tid, &so->killedItems[i].heapTid))
@@ -563,15 +564,15 @@ _hash_kill_items(IndexScanDesc scan)
                /* found the item */
                ItemIdMarkDead(iid);
                killedsomething = true;
-               break;      /* out of inner search loop */
+               break;          /* out of inner search loop */
            }
            offnum = OffsetNumberNext(offnum);
        }
    }
 
    /*
-    * Since this can be redone later if needed, mark as dirty hint.
-    * Whenever we mark anything LP_DEAD, we also set the page's
+    * Since this can be redone later if needed, mark as dirty hint. Whenever
+    * we mark anything LP_DEAD, we also set the page's
     * LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint.
     */
    if (killedsomething)
index 0c3e2b065a0f23d845f67af6d6f7dc284bb5616b..e890e08c9ab4d835c50b4ee45660c2dbf99d98b3 100644 (file)
@@ -3518,10 +3518,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
     *
     * For HOT considerations, this is wasted effort if we fail to update or
     * have to put the new tuple on a different page.  But we must compute the
-    * list before obtaining buffer lock --- in the worst case, if we are doing
-    * an update on one of the relevant system catalogs, we could deadlock if
-    * we try to fetch the list later.  In any case, the relcache caches the
-    * data so this is usually pretty cheap.
+    * list before obtaining buffer lock --- in the worst case, if we are
+    * doing an update on one of the relevant system catalogs, we could
+    * deadlock if we try to fetch the list later.  In any case, the relcache
+    * caches the data so this is usually pretty cheap.
     *
     * We also need columns used by the replica identity and columns that are
     * considered the "key" of rows in the table.
@@ -3540,15 +3540,16 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
    page = BufferGetPage(buffer);
 
    interesting_attrs = NULL;
+
    /*
     * If the page is already full, there is hardly any chance of doing a HOT
     * update on this page. It might be wasteful effort to look for index
-    * column updates only to later reject HOT updates for lack of space in the
-    * same page. So we be conservative and only fetch hot_attrs if the page is
-    * not already full. Since we are already holding a pin on the buffer,
-    * there is no chance that the buffer can get cleaned up concurrently and
-    * even if that was possible, in the worst case we lose a chance to do a
-    * HOT update.
+    * column updates only to later reject HOT updates for lack of space in
+    * the same page. So we be conservative and only fetch hot_attrs if the
+    * page is not already full. Since we are already holding a pin on the
+    * buffer, there is no chance that the buffer can get cleaned up
+    * concurrently and even if that was possible, in the worst case we lose a
+    * chance to do a HOT update.
     */
    if (!PageIsFull(page))
    {
@@ -4176,7 +4177,7 @@ l2:
     * logged.
     */
    old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
-                                          bms_overlap(modified_attrs, id_attrs),
+                                      bms_overlap(modified_attrs, id_attrs),
                                           &old_key_copied);
 
    /* NO EREPORT(ERROR) from here till changes are logged */
@@ -4422,17 +4423,17 @@ static Bitmapset *
 HeapDetermineModifiedColumns(Relation relation, Bitmapset *interesting_cols,
                             HeapTuple oldtup, HeapTuple newtup)
 {
-   int     attnum;
-   Bitmapset *modified = NULL;
+   int         attnum;
+   Bitmapset  *modified = NULL;
 
    while ((attnum = bms_first_member(interesting_cols)) >= 0)
    {
        attnum += FirstLowInvalidHeapAttributeNumber;
 
        if (!heap_tuple_attr_equals(RelationGetDescr(relation),
-                                  attnum, oldtup, newtup))
+                                   attnum, oldtup, newtup))
            modified = bms_add_member(modified,
-                                     attnum - FirstLowInvalidHeapAttributeNumber);
+                               attnum - FirstLowInvalidHeapAttributeNumber);
    }
 
    return modified;
index 775f2ff1f8c343232681aab6a79f7364e09a1684..116f5f32f6ea8f509772f19f180b1ee8e424f5f7 100644 (file)
@@ -100,7 +100,7 @@ typedef struct BTParallelScanDescData
                                         * scan */
    slock_t     btps_mutex;     /* protects above variables */
    ConditionVariable btps_cv;  /* used to synchronize parallel scan */
-} BTParallelScanDescData;
+}  BTParallelScanDescData;
 
 typedef struct BTParallelScanDescData *BTParallelScanDesc;
 
@@ -289,11 +289,11 @@ btbuildempty(Relation index)
    _bt_initmetapage(metapage, P_NONE, 0);
 
    /*
-    * Write the page and log it.  It might seem that an immediate sync
-    * would be sufficient to guarantee that the file exists on disk, but
-    * recovery itself might remove it while replaying, for example, an
-    * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record.  Therefore, we
-    * need this even when wal_level=minimal.
+    * Write the page and log it.  It might seem that an immediate sync would
+    * be sufficient to guarantee that the file exists on disk, but recovery
+    * itself might remove it while replaying, for example, an
+    * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record.  Therefore, we need
+    * this even when wal_level=minimal.
     */
    PageSetChecksumInplace(metapage, BTREE_METAPAGE);
    smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
index 8eb5275a8b4833b26aee0d963f54887edeced780..637ebf30f8594382991bc4887429801f07042952 100644 (file)
@@ -66,7 +66,7 @@ brin_desc(StringInfo buf, XLogReaderState *record)
        xl_brin_desummarize *xlrec = (xl_brin_desummarize *) rec;
 
        appendStringInfo(buf, "pagesPerRange %u, heapBlk %u, page offset %u",
-                        xlrec->pagesPerRange, xlrec->heapBlk, xlrec->regOffset);
+                    xlrec->pagesPerRange, xlrec->heapBlk, xlrec->regOffset);
    }
 }
 
index ef268c5ab3003657c555d31c4e6e4b32d57b0a43..9181154ffd81c1d4e5f77bd35416e092155d3c6f 100644 (file)
@@ -36,7 +36,7 @@ clog_desc(StringInfo buf, XLogReaderState *record)
 
        memcpy(&xlrec, rec, sizeof(xl_clog_truncate));
        appendStringInfo(buf, "page %d; oldestXact %u",
-           xlrec.pageno, xlrec.oldestXact);
+                        xlrec.pageno, xlrec.oldestXact);
    }
 }
 
index b22fdd48f3e55deede5770459649bba475a272b8..df51f3ce1f50f37e5f3d91f8edc43219e13b3152 100644 (file)
@@ -117,18 +117,18 @@ gin_desc(StringInfo buf, XLogReaderState *record)
 
                    if (!(xlrec->flags & GIN_INSERT_ISDATA))
                        appendStringInfo(buf, " isdelete: %c",
-                        (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F');
+                                        (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F');
                    else if (xlrec->flags & GIN_INSERT_ISLEAF)
                        desc_recompress_leaf(buf, (ginxlogRecompressDataLeaf *) payload);
                    else
                    {
                        ginxlogInsertDataInternal *insertData =
-                           (ginxlogInsertDataInternal *) payload;
+                       (ginxlogInsertDataInternal *) payload;
 
                        appendStringInfo(buf, " pitem: %u-%u/%u",
-                                        PostingItemGetBlockNumber(&insertData->newitem),
-                                        ItemPointerGetBlockNumber(&insertData->newitem.key),
-                                        ItemPointerGetOffsetNumber(&insertData->newitem.key));
+                            PostingItemGetBlockNumber(&insertData->newitem),
+                        ItemPointerGetBlockNumber(&insertData->newitem.key),
+                       ItemPointerGetOffsetNumber(&insertData->newitem.key));
                    }
                }
            }
@@ -159,7 +159,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
                else
                {
                    ginxlogVacuumDataLeafPage *xlrec =
-                       (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
+                   (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
 
                    desc_recompress_leaf(buf, &xlrec->data);
                }
index 00a0ab4438647164b4b2df5fbbdced2f46d3c371..9a3725991648d2b6b29e54d1b14ddbb2c2af9bf8 100644 (file)
@@ -164,10 +164,10 @@ spgbuildempty(Relation index)
 
    /*
     * Write the page and log it unconditionally.  This is important
-    * particularly for indexes created on tablespaces and databases
-    * whose creation happened after the last redo pointer as recovery
-    * removes any of their existing content when the corresponding
-    * create records are replayed.
+    * particularly for indexes created on tablespaces and databases whose
+    * creation happened after the last redo pointer as recovery removes any
+    * of their existing content when the corresponding create records are
+    * replayed.
     */
    PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
    smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
index 7a007a6ba50349395ef6643d4c78dbf549a02812..bece57589e80ebceaacca0db0f8775a9ed3ba8f6 100644 (file)
@@ -84,7 +84,7 @@ static int    ZeroCLOGPage(int pageno, bool writeXlog);
 static bool CLOGPagePrecedes(int page1, int page2);
 static void WriteZeroPageXlogRec(int pageno);
 static void WriteTruncateXlogRec(int pageno, TransactionId oldestXact,
-                                Oid oldestXidDb);
+                    Oid oldestXidDb);
 static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids,
                           TransactionId *subxids, XidStatus status,
                           XLogRecPtr lsn, int pageno);
@@ -680,13 +680,13 @@ TruncateCLOG(TransactionId oldestXact, Oid oldestxid_datoid)
 
    /* vac_truncate_clog already advanced oldestXid */
    Assert(TransactionIdPrecedesOrEquals(oldestXact,
-          ShmemVariableCache->oldestXid));
+                                        ShmemVariableCache->oldestXid));
 
    /*
-    * Write XLOG record and flush XLOG to disk. We record the oldest xid we're
-    * keeping information about here so we can ensure that it's always ahead
-    * of clog truncation in case we crash, and so a standby finds out the new
-    * valid xid before the next checkpoint.
+    * Write XLOG record and flush XLOG to disk. We record the oldest xid
+    * we're keeping information about here so we can ensure that it's always
+    * ahead of clog truncation in case we crash, and so a standby finds out
+    * the new valid xid before the next checkpoint.
     */
    WriteTruncateXlogRec(cutoffPage, oldestXact, oldestxid_datoid);
 
index 03ffa20908404481f4b19d55cd909d58a01d0c9d..7646c23c4e7fa6e24a75cddb8263b95bf46bdd20 100644 (file)
@@ -748,8 +748,8 @@ ShutdownCommitTs(void)
    SimpleLruFlush(CommitTsCtl, false);
 
    /*
-    * fsync pg_commit_ts to ensure that any files flushed previously are durably
-    * on disk.
+    * fsync pg_commit_ts to ensure that any files flushed previously are
+    * durably on disk.
     */
    fsync_fname("pg_commit_ts", true);
 }
@@ -764,8 +764,8 @@ CheckPointCommitTs(void)
    SimpleLruFlush(CommitTsCtl, true);
 
    /*
-    * fsync pg_commit_ts to ensure that any files flushed previously are durably
-    * on disk.
+    * fsync pg_commit_ts to ensure that any files flushed previously are
+    * durably on disk.
     */
    fsync_fname("pg_commit_ts", true);
 }
index cc68484a5d633db18db3e4b8f30380a7662af673..cef03f83e03f3be28d02945c152971b1527ab4b1 100644 (file)
@@ -87,9 +87,9 @@ SubTransSetParent(TransactionId xid, TransactionId parent)
    ptr += entryno;
 
    /*
-    * It's possible we'll try to set the parent xid multiple times
-    * but we shouldn't ever be changing the xid from one valid xid
-    * to another valid xid, which would corrupt the data structure.
+    * It's possible we'll try to set the parent xid multiple times but we
+    * shouldn't ever be changing the xid from one valid xid to another valid
+    * xid, which would corrupt the data structure.
     */
    if (*ptr != parent)
    {
@@ -162,13 +162,13 @@ SubTransGetTopmostTransaction(TransactionId xid)
        parentXid = SubTransGetParent(parentXid);
 
        /*
-        * By convention the parent xid gets allocated first, so should
-        * always precede the child xid. Anything else points to a corrupted
-        * data structure that could lead to an infinite loop, so exit.
+        * By convention the parent xid gets allocated first, so should always
+        * precede the child xid. Anything else points to a corrupted data
+        * structure that could lead to an infinite loop, so exit.
         */
        if (!TransactionIdPrecedes(parentXid, previousXid))
            elog(ERROR, "pg_subtrans contains invalid entry: xid %u points to parent xid %u",
-                           previousXid, parentXid);
+                previousXid, parentXid);
    }
 
    Assert(TransactionIdIsValid(previousXid));
index 7bf2555af2266bcc216fe848eb94af3c97bd763e..c50f9c4bf6537d882cfe8ea877770eaa0200cde2 100644 (file)
@@ -166,7 +166,7 @@ typedef struct GlobalTransactionData
     */
    XLogRecPtr  prepare_start_lsn;      /* XLOG offset of prepare record start */
    XLogRecPtr  prepare_end_lsn;    /* XLOG offset of prepare record end */
-   TransactionId   xid;            /* The GXACT id */
+   TransactionId xid;          /* The GXACT id */
 
    Oid         owner;          /* ID of user that executed the xact */
    BackendId   locking_backend;    /* backend currently working on the xact */
@@ -220,11 +220,11 @@ static void RemoveGXact(GlobalTransaction gxact);
 
 static void XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len);
 static char *ProcessTwoPhaseBuffer(TransactionId xid,
-                           XLogRecPtr  prepare_start_lsn,
-                           bool fromdisk, bool setParent, bool setNextXid);
+                     XLogRecPtr prepare_start_lsn,
+                     bool fromdisk, bool setParent, bool setNextXid);
 static void MarkAsPreparingGuts(GlobalTransaction gxact, TransactionId xid,
-               const char *gid, TimestampTz prepared_at, Oid owner,
-               Oid databaseid);
+                   const char *gid, TimestampTz prepared_at, Oid owner,
+                   Oid databaseid);
 static void RemoveTwoPhaseFile(TransactionId xid, bool giveWarning);
 static void RecreateTwoPhaseFile(TransactionId xid, void *content, int len);
 
@@ -1304,7 +1304,7 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("out of memory"),
-          errdetail("Failed while allocating a WAL reading processor.")));
+            errdetail("Failed while allocating a WAL reading processor.")));
 
    record = XLogReadRecord(xlogreader, lsn, &errormsg);
    if (record == NULL)
@@ -1318,9 +1318,9 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
        (XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE)
        ereport(ERROR,
                (errcode_for_file_access(),
-                errmsg("expected two-phase state data is not present in WAL at %X/%X",
-                       (uint32) (lsn >> 32),
-                       (uint32) lsn)));
+       errmsg("expected two-phase state data is not present in WAL at %X/%X",
+              (uint32) (lsn >> 32),
+              (uint32) lsn)));
 
    if (len != NULL)
        *len = XLogRecGetDataLen(xlogreader);
@@ -1675,7 +1675,10 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
    LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
    for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
    {
-       /* Note that we are using gxact not pgxact so this works in recovery also */
+       /*
+        * Note that we are using gxact not pgxact so this works in recovery
+        * also
+        */
        GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
 
        if ((gxact->valid || gxact->inredo) &&
@@ -1727,8 +1730,8 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
 void
 restoreTwoPhaseData(void)
 {
-   DIR            *cldir;
-   struct dirent  *clde;
+   DIR        *cldir;
+   struct dirent *clde;
 
    cldir = AllocateDir(TWOPHASE_DIR);
    while ((clde = ReadDir(cldir, TWOPHASE_DIR)) != NULL)
@@ -1801,8 +1804,8 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
        xid = gxact->xid;
 
        buf = ProcessTwoPhaseBuffer(xid,
-               gxact->prepare_start_lsn,
-               gxact->ondisk, false, true);
+                                   gxact->prepare_start_lsn,
+                                   gxact->ondisk, false, true);
 
        if (buf == NULL)
            continue;
@@ -1876,8 +1879,8 @@ StandbyRecoverPreparedTransactions(void)
        xid = gxact->xid;
 
        buf = ProcessTwoPhaseBuffer(xid,
-               gxact->prepare_start_lsn,
-               gxact->ondisk, false, false);
+                                   gxact->prepare_start_lsn,
+                                   gxact->ondisk, false, false);
        if (buf != NULL)
            pfree(buf);
    }
@@ -1920,17 +1923,17 @@ RecoverPreparedTransactions(void)
        xid = gxact->xid;
 
        /*
-        * Reconstruct subtrans state for the transaction --- needed
-        * because pg_subtrans is not preserved over a restart.  Note that
-        * we are linking all the subtransactions directly to the
-        * top-level XID; there may originally have been a more complex
-        * hierarchy, but there's no need to restore that exactly.
-        * It's possible that SubTransSetParent has been set before, if
-        * the prepared transaction generated xid assignment records.
+        * Reconstruct subtrans state for the transaction --- needed because
+        * pg_subtrans is not preserved over a restart.  Note that we are
+        * linking all the subtransactions directly to the top-level XID;
+        * there may originally have been a more complex hierarchy, but
+        * there's no need to restore that exactly. It's possible that
+        * SubTransSetParent has been set before, if the prepared transaction
+        * generated xid assignment records.
         */
        buf = ProcessTwoPhaseBuffer(xid,
-               gxact->prepare_start_lsn,
-               gxact->ondisk, true, false);
+                                   gxact->prepare_start_lsn,
+                                   gxact->ondisk, true, false);
        if (buf == NULL)
            continue;
 
@@ -1949,9 +1952,8 @@ RecoverPreparedTransactions(void)
        bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
 
        /*
-        * Recreate its GXACT and dummy PGPROC. But, check whether
-        * it was added in redo and already has a shmem entry for
-        * it.
+        * Recreate its GXACT and dummy PGPROC. But, check whether it was
+        * added in redo and already has a shmem entry for it.
         */
        LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
        MarkAsPreparingGuts(gxact, xid, gid,
@@ -1980,9 +1982,8 @@ RecoverPreparedTransactions(void)
            StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
 
        /*
-        * We're done with recovering this transaction. Clear
-        * MyLockedGxact, like we do in PrepareTransaction() during normal
-        * operation.
+        * We're done with recovering this transaction. Clear MyLockedGxact,
+        * like we do in PrepareTransaction() during normal operation.
         */
        PostPrepare_Twophase();
 
@@ -2049,8 +2050,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
        else
        {
            ereport(WARNING,
-                   (errmsg("removing future two-phase state from memory for \"%u\"",
-                           xid)));
+           (errmsg("removing future two-phase state from memory for \"%u\"",
+                   xid)));
            PrepareRedoRemove(xid, true);
        }
        return NULL;
@@ -2063,8 +2064,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
        if (buf == NULL)
        {
            ereport(WARNING,
-                   (errmsg("removing corrupt two-phase state file for \"%u\"",
-                           xid)));
+                 (errmsg("removing corrupt two-phase state file for \"%u\"",
+                         xid)));
            RemoveTwoPhaseFile(xid, true);
            return NULL;
        }
@@ -2082,15 +2083,15 @@ ProcessTwoPhaseBuffer(TransactionId xid,
        if (fromdisk)
        {
            ereport(WARNING,
-                   (errmsg("removing corrupt two-phase state file for \"%u\"",
-                           xid)));
+                 (errmsg("removing corrupt two-phase state file for \"%u\"",
+                         xid)));
            RemoveTwoPhaseFile(xid, true);
        }
        else
        {
            ereport(WARNING,
-                   (errmsg("removing corrupt two-phase state from memory for \"%u\"",
-                           xid)));
+           (errmsg("removing corrupt two-phase state from memory for \"%u\"",
+                   xid)));
            PrepareRedoRemove(xid, true);
        }
        pfree(buf);
@@ -2098,8 +2099,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
    }
 
    /*
-    * Examine subtransaction XIDs ... they should all follow main
-    * XID, and they may force us to advance nextXid.
+    * Examine subtransaction XIDs ... they should all follow main XID, and
+    * they may force us to advance nextXid.
     */
    subxids = (TransactionId *) (buf +
                                 MAXALIGN(sizeof(TwoPhaseFileHeader)) +
@@ -2122,7 +2123,7 @@ ProcessTwoPhaseBuffer(TransactionId xid,
             */
            LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
            if (TransactionIdFollowsOrEquals(subxid,
-                                        ShmemVariableCache->nextXid))
+                                            ShmemVariableCache->nextXid))
            {
                ShmemVariableCache->nextXid = subxid;
                TransactionIdAdvance(ShmemVariableCache->nextXid);
@@ -2175,14 +2176,15 @@ RecordTransactionCommitPrepared(TransactionId xid,
    MyPgXact->delayChkpt = true;
 
    /*
-    * Emit the XLOG commit record. Note that we mark 2PC commits as potentially
-    * having AccessExclusiveLocks since we don't know whether or not they do.
+    * Emit the XLOG commit record. Note that we mark 2PC commits as
+    * potentially having AccessExclusiveLocks since we don't know whether or
+    * not they do.
     */
    recptr = XactLogCommitRecord(committs,
                                 nchildren, children, nrels, rels,
                                 ninvalmsgs, invalmsgs,
                                 initfileinval, false,
-                        MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
+                       MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
                                 xid);
 
 
@@ -2260,13 +2262,14 @@ RecordTransactionAbortPrepared(TransactionId xid,
    START_CRIT_SECTION();
 
    /*
-    * Emit the XLOG commit record. Note that we mark 2PC aborts as potentially
-    * having AccessExclusiveLocks since we don't know whether or not they do.
+    * Emit the XLOG commit record. Note that we mark 2PC aborts as
+    * potentially having AccessExclusiveLocks since we don't know whether or
+    * not they do.
     */
    recptr = XactLogAbortRecord(GetCurrentTimestamp(),
                                nchildren, children,
                                nrels, rels,
-                        MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
+                       MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
                                xid);
 
    /* Always flush, since we're about to remove the 2PC state file */
@@ -2301,8 +2304,8 @@ void
 PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
 {
    TwoPhaseFileHeader *hdr = (TwoPhaseFileHeader *) buf;
-   char              *bufptr;
-   const char        *gid;
+   char       *bufptr;
+   const char *gid;
    GlobalTransaction gxact;
 
    Assert(RecoveryInProgress());
@@ -2315,8 +2318,8 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
     *
     * This creates a gxact struct and puts it into the active array.
     *
-    * In redo, this struct is mainly used to track PREPARE/COMMIT entries
-    * in shared memory. Hence, we only fill up the bare minimum contents here.
+    * In redo, this struct is mainly used to track PREPARE/COMMIT entries in
+    * shared memory. Hence, we only fill up the bare minimum contents here.
     * The gxact also gets marked with gxact->inredo set to true to indicate
     * that it got added in the redo phase
     */
@@ -2340,7 +2343,7 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
    gxact->locking_backend = InvalidBackendId;
    gxact->valid = false;
    gxact->ondisk = XLogRecPtrIsInvalid(start_lsn);
-   gxact->inredo = true; /* yes, added in redo */
+   gxact->inredo = true;       /* yes, added in redo */
    strcpy(gxact->gid, gid);
 
    /* And insert it into the active array */
index 5efbfbd3d61b856e14301831345fe29be4541e20..b02dd6fbd25a8dfdd30b2c6578ec24617ef7958b 100644 (file)
@@ -272,7 +272,7 @@ AdvanceOldestClogXid(TransactionId oldest_datfrozenxid)
 {
    LWLockAcquire(CLogTruncationLock, LW_EXCLUSIVE);
    if (TransactionIdPrecedes(ShmemVariableCache->oldestClogXid,
-       oldest_datfrozenxid))
+                             oldest_datfrozenxid))
    {
        ShmemVariableCache->oldestClogXid = oldest_datfrozenxid;
    }
index a3ff1b22f07ef47ce6bee3d199153ccb027c02d8..7e8c598f2adc191a34f2bb5424a3a480cc342888 100644 (file)
@@ -115,7 +115,7 @@ TransactionId *ParallelCurrentXids;
  * globally accessible, so can be set from anywhere in the code that requires
  * recording flags.
  */
-int  MyXactFlags;
+int            MyXactFlags;
 
 /*
  * transaction states - transaction state from server perspective
@@ -2641,7 +2641,8 @@ CleanupTransaction(void)
     * do abort cleanup processing
     */
    AtCleanup_Portals();        /* now safe to release portal memory */
-   AtEOXact_Snapshot(false, true); /* and release the transaction's snapshots */
+   AtEOXact_Snapshot(false, true);     /* and release the transaction's
+                                        * snapshots */
 
    CurrentResourceOwner = NULL;    /* and resource owner */
    if (TopTransactionResourceOwner)
@@ -5646,8 +5647,8 @@ xact_redo(XLogReaderState *record)
    else if (info == XLOG_XACT_PREPARE)
    {
        /*
-        * Store xid and start/end pointers of the WAL record in
-        * TwoPhaseState gxact entry.
+        * Store xid and start/end pointers of the WAL record in TwoPhaseState
+        * gxact entry.
         */
        PrepareRedoAdd(XLogRecGetData(record),
                       record->ReadRecPtr,
index b98e37e1d38aed68037736cf9285331bd6548da0..399822d3fead60e0302169ac007ff8bc042a8fd6 100644 (file)
@@ -550,13 +550,12 @@ typedef struct XLogCtlInsert
    bool        fullPageWrites;
 
    /*
-    * exclusiveBackupState indicates the state of an exclusive backup
-    * (see comments of ExclusiveBackupState for more details).
-    * nonExclusiveBackups is a counter indicating the number of streaming
-    * base backups currently in progress. forcePageWrites is set to true
-    * when either of these is non-zero. lastBackupStart is the latest
-    * checkpoint redo location used as a starting point for an online
-    * backup.
+    * exclusiveBackupState indicates the state of an exclusive backup (see
+    * comments of ExclusiveBackupState for more details). nonExclusiveBackups
+    * is a counter indicating the number of streaming base backups currently
+    * in progress. forcePageWrites is set to true when either of these is
+    * non-zero. lastBackupStart is the latest checkpoint redo location used
+    * as a starting point for an online backup.
     */
    ExclusiveBackupState exclusiveBackupState;
    int         nonExclusiveBackups;
@@ -1082,7 +1081,7 @@ XLogInsertRecord(XLogRecData *rdata,
         */
        if ((flags & XLOG_MARK_UNIMPORTANT) == 0)
        {
-           int lockno = holdingAllLocks ? 0 : MyLockNo;
+           int         lockno = holdingAllLocks ? 0 : MyLockNo;
 
            WALInsertLocks[lockno].l.lastImportantAt = StartPos;
        }
@@ -1405,7 +1404,8 @@ checkXLogConsistency(XLogReaderState *record)
 
        /*
         * If the block LSN is already ahead of this WAL record, we can't
-        * expect contents to match.  This can happen if recovery is restarted.
+        * expect contents to match.  This can happen if recovery is
+        * restarted.
         */
        if (PageGetLSN(replay_image_masked) > record->EndRecPtr)
            continue;
@@ -4975,15 +4975,15 @@ BootStrapXLOG(void)
    sysidentifier |= getpid() & 0xFFF;
 
    /*
-    * Generate a random nonce. This is used for authentication requests
-    * that will fail because the user does not exist. The nonce is used to
-    * create a genuine-looking password challenge for the non-existent user,
-    * in lieu of an actual stored password.
+    * Generate a random nonce. This is used for authentication requests that
+    * will fail because the user does not exist. The nonce is used to create
+    * a genuine-looking password challenge for the non-existent user, in lieu
+    * of an actual stored password.
     */
    if (!pg_backend_random(mock_auth_nonce, MOCK_AUTH_NONCE_LEN))
        ereport(PANIC,
-           (errcode(ERRCODE_INTERNAL_ERROR),
-            errmsg("could not generate secret authorization token")));
+               (errcode(ERRCODE_INTERNAL_ERROR),
+                errmsg("could not generate secret authorization token")));
 
    /* First timeline ID is always 1 */
    ThisTimeLineID = 1;
@@ -5298,7 +5298,7 @@ readRecoveryCommandFile(void)
                DatumGetLSN(DirectFunctionCall3(pg_lsn_in,
                                                CStringGetDatum(item->value),
                                                ObjectIdGetDatum(InvalidOid),
-                                                       Int32GetDatum(-1)));
+                                               Int32GetDatum(-1)));
            ereport(DEBUG2,
                    (errmsg_internal("recovery_target_lsn = '%X/%X'",
                                     (uint32) (recoveryTargetLSN >> 32),
@@ -5643,9 +5643,9 @@ recoveryStopsBefore(XLogReaderState *record)
        recoveryStopTime = 0;
        recoveryStopName[0] = '\0';
        ereport(LOG,
-               (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"",
-                       (uint32) (recoveryStopLSN >> 32),
-                       (uint32) recoveryStopLSN)));
+            (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"",
+                    (uint32) (recoveryStopLSN >> 32),
+                    (uint32) recoveryStopLSN)));
        return true;
    }
 
@@ -5800,9 +5800,9 @@ recoveryStopsAfter(XLogReaderState *record)
        recoveryStopTime = 0;
        recoveryStopName[0] = '\0';
        ereport(LOG,
-               (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"",
-                       (uint32) (recoveryStopLSN >> 32),
-                       (uint32) recoveryStopLSN)));
+             (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"",
+                     (uint32) (recoveryStopLSN >> 32),
+                     (uint32) recoveryStopLSN)));
        return true;
    }
 
@@ -6348,12 +6348,12 @@ StartupXLOG(void)
        ereport(ERROR,
                (errcode(ERRCODE_OUT_OF_MEMORY),
                 errmsg("out of memory"),
-          errdetail("Failed while allocating a WAL reading processor.")));
+            errdetail("Failed while allocating a WAL reading processor.")));
    xlogreader->system_identifier = ControlFile->system_identifier;
 
    /*
-    * Allocate pages dedicated to WAL consistency checks, those had better
-    * be aligned.
+    * Allocate pages dedicated to WAL consistency checks, those had better be
+    * aligned.
     */
    replay_image_masked = (char *) palloc(BLCKSZ);
    master_image_masked = (char *) palloc(BLCKSZ);
@@ -6687,21 +6687,21 @@ StartupXLOG(void)
 
    /*
     * Copy any missing timeline history files between 'now' and the recovery
-    * target timeline from archive to pg_wal. While we don't need those
-    * files ourselves - the history file of the recovery target timeline
-    * covers all the previous timelines in the history too - a cascading
-    * standby server might be interested in them. Or, if you archive the WAL
-    * from this server to a different archive than the master, it'd be good
-    * for all the history files to get archived there after failover, so that
-    * you can use one of the old timelines as a PITR target. Timeline history
-    * files are small, so it's better to copy them unnecessarily than not
-    * copy them and regret later.
+    * target timeline from archive to pg_wal. While we don't need those files
+    * ourselves - the history file of the recovery target timeline covers all
+    * the previous timelines in the history too - a cascading standby server
+    * might be interested in them. Or, if you archive the WAL from this
+    * server to a different archive than the master, it'd be good for all the
+    * history files to get archived there after failover, so that you can use
+    * one of the old timelines as a PITR target. Timeline history files are
+    * small, so it's better to copy them unnecessarily than not copy them and
+    * regret later.
     */
    restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
 
    /*
-    * Before running in recovery, scan pg_twophase and fill in its status
-    * to be able to work on entries generated by redo.  Doing a scan before
+    * Before running in recovery, scan pg_twophase and fill in its status to
+    * be able to work on entries generated by redo.  Doing a scan before
     * taking any recovery action has the merit to discard any 2PC files that
     * are newer than the first record to replay, saving from any conflicts at
     * replay.  This avoids as well any subsequent scans when doing recovery
@@ -7426,7 +7426,7 @@ StartupXLOG(void)
            snprintf(reason, sizeof(reason),
                     "%s LSN %X/%X\n",
                     recoveryStopAfter ? "after" : "before",
-                    (uint32 ) (recoveryStopLSN >> 32),
+                    (uint32) (recoveryStopLSN >> 32),
                     (uint32) recoveryStopLSN);
        else if (recoveryTarget == RECOVERY_TARGET_NAME)
            snprintf(reason, sizeof(reason),
@@ -9645,6 +9645,7 @@ xlog_redo(XLogReaderState *record)
 
        MultiXactAdvanceOldest(checkPoint.oldestMulti,
                               checkPoint.oldestMultiDB);
+
        /*
         * No need to set oldestClogXid here as well; it'll be set when we
         * redo an xl_clog_truncate if it changed since initialization.
@@ -10238,8 +10239,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
    if (exclusive)
    {
        /*
-        * At first, mark that we're now starting an exclusive backup,
-        * to ensure that there are no other sessions currently running
+        * At first, mark that we're now starting an exclusive backup, to
+        * ensure that there are no other sessions currently running
         * pg_start_backup() or pg_stop_backup().
         */
        if (XLogCtl->Insert.exclusiveBackupState != EXCLUSIVE_BACKUP_NONE)
@@ -10505,8 +10506,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
        {
            /*
             * Check for existing backup label --- implies a backup is already
-            * running.  (XXX given that we checked exclusiveBackupState above,
-            * maybe it would be OK to just unlink any such label file?)
+            * running.  (XXX given that we checked exclusiveBackupState
+            * above, maybe it would be OK to just unlink any such label
+            * file?)
             */
            if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
            {
@@ -10727,8 +10729,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
    if (exclusive)
    {
        /*
-        * At first, mark that we're now stopping an exclusive backup,
-        * to ensure that there are no other sessions currently running
+        * At first, mark that we're now stopping an exclusive backup, to
+        * ensure that there are no other sessions currently running
         * pg_start_backup() or pg_stop_backup().
         */
        WALInsertLockAcquireExclusive();
@@ -10790,8 +10792,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
            durable_unlink(BACKUP_LABEL_FILE, ERROR);
 
            /*
-            * Remove tablespace_map file if present, it is created only if there
-            * are tablespaces.
+            * Remove tablespace_map file if present, it is created only if
+            * there are tablespaces.
             */
            durable_unlink(TABLESPACE_MAP, DEBUG1);
        }
@@ -10978,9 +10980,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
     * archived before returning. If archiving isn't enabled, the required WAL
     * needs to be transported via streaming replication (hopefully with
     * wal_keep_segments set high enough), or some more exotic mechanism like
-    * polling and copying files from pg_wal with script. We have no
-    * knowledge of those mechanisms, so it's up to the user to ensure that he
-    * gets all the required WAL.
+    * polling and copying files from pg_wal with script. We have no knowledge
+    * of those mechanisms, so it's up to the user to ensure that he gets all
+    * the required WAL.
     *
     * We wait until both the last WAL file filled during backup and the
     * history file have been archived, and assume that the alphabetic sorting
@@ -10990,8 +10992,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
     * We wait forever, since archive_command is supposed to work and we
     * assume the admin wanted his backup to work completely. If you don't
     * wish to wait, then either waitforarchive should be passed in as false,
-    * or you can set statement_timeout.  Also, some notices are
-    * issued to clue in anyone who might be doing this interactively.
+    * or you can set statement_timeout.  Also, some notices are issued to
+    * clue in anyone who might be doing this interactively.
     */
    if (waitforarchive && XLogArchivingActive())
    {
@@ -11717,8 +11719,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                     * little chance that the problem will just go away, but
                     * PANIC is not good for availability either, especially
                     * in hot standby mode. So, we treat that the same as
-                    * disconnection, and retry from archive/pg_wal again.
-                    * The WAL in the archive should be identical to what was
+                    * disconnection, and retry from archive/pg_wal again. The
+                    * WAL in the archive should be identical to what was
                     * streamed, so it's unlikely that it helps, but one can
                     * hope...
                     */
@@ -11881,9 +11883,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                         * not open already.  Also read the timeline history
                         * file if we haven't initialized timeline history
                         * yet; it should be streamed over and present in
-                        * pg_wal by now.  Use XLOG_FROM_STREAM so that
-                        * source info is set correctly and XLogReceiptTime
-                        * isn't changed.
+                        * pg_wal by now.  Use XLOG_FROM_STREAM so that source
+                        * info is set correctly and XLogReceiptTime isn't
+                        * changed.
                         */
                        if (readFile < 0)
                        {
index 8568c8abd64e70ee103ed77ef81a94404849a1f0..b3223d691da398659c922884062e92ac0e240758 100644 (file)
@@ -156,7 +156,8 @@ pg_stop_backup(PG_FUNCTION_ARGS)
     * Exclusive backups were typically started in a different connection, so
     * don't try to verify that status of backup is set to
     * SESSION_BACKUP_EXCLUSIVE in this function. Actual verification that an
-    * exclusive backup is in fact running is handled inside do_pg_stop_backup.
+    * exclusive backup is in fact running is handled inside
+    * do_pg_stop_backup.
     */
    stoppoint = do_pg_stop_backup(NULL, true, NULL);
 
@@ -527,7 +528,7 @@ pg_walfile_name(PG_FUNCTION_ARGS)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                 errmsg("recovery is in progress"),
-        errhint("pg_walfile_name() cannot be executed during recovery.")));
+         errhint("pg_walfile_name() cannot be executed during recovery.")));
 
    XLByteToPrevSeg(locationpoint, xlogsegno);
    XLogFileName(xlogfilename, ThisTimeLineID, xlogsegno);
index a3bd0b7f51a064557230f12338b2c73fc49cacb0..6a02738479c886d466b4d6ee0522ec81b0ded101 100644 (file)
@@ -388,10 +388,10 @@ XLogRegisterBufData(uint8 block_id, char *data, int len)
  *
  * The flags that can be used here are:
  * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
- *   included in the record.
+ *  included in the record.
  * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
- *   durability, which allows to avoid triggering WAL archiving and other
- *   background activity.
+ *  durability, which allows to avoid triggering WAL archiving and other
+ *  background activity.
  */
 void
 XLogSetRecordFlags(uint8 flags)
@@ -507,10 +507,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
    hdr_rdt.data = hdr_scratch;
 
    /*
-    * Enforce consistency checks for this record if user is looking for
-    * it. Do this before at the beginning of this routine to give the
-    * possibility for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY
-    * directly for a record.
+    * Enforce consistency checks for this record if user is looking for it.
+    * Do this before at the beginning of this routine to give the possibility
+    * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
+    * a record.
     */
    if (wal_consistency_checking[rmid])
        info |= XLR_CHECK_CONSISTENCY;
@@ -576,9 +576,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
            bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
 
        /*
-        * If needs_backup is true or WAL checking is enabled for
-        * current resource manager, log a full-page write for the current
-        * block.
+        * If needs_backup is true or WAL checking is enabled for current
+        * resource manager, log a full-page write for the current block.
         */
        include_image = needs_backup || (info & XLR_CHECK_CONSISTENCY) != 0;
 
@@ -645,8 +644,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
            bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
 
            /*
-            * If WAL consistency checking is enabled for the resource manager of
-            * this WAL record, a full-page image is included in the record
+            * If WAL consistency checking is enabled for the resource manager
+            * of this WAL record, a full-page image is included in the record
             * for the block modified. During redo, the full-page is replayed
             * only if BKPIMAGE_APPLY is set.
             */
index f077662946f4f7a0e6ebf44dfc9a747d65c95706..c3b1371764b634445c9b231035ea783edac6f453 100644 (file)
@@ -892,8 +892,8 @@ XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr)
         * that, except when caller has explicitly specified the offset that
         * falls somewhere there or when we are skipping multi-page
         * continuation record. It doesn't matter though because
-        * ReadPageInternal() is prepared to handle that and will read at least
-        * short page-header worth of data
+        * ReadPageInternal() is prepared to handle that and will read at
+        * least short page-header worth of data
         */
        targetRecOff = tmpRecPtr % XLOG_BLCKSZ;
 
index d7f2e55b0909887bbb30ebbc606ad261b2237488..7430a1f77b456f58f8319e459d8580764aa901a2 100644 (file)
@@ -805,22 +805,23 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
    Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
 
    /*
-    * If the desired page is currently read in and valid, we have nothing to do.
+    * If the desired page is currently read in and valid, we have nothing to
+    * do.
     *
     * The caller should've ensured that it didn't previously advance readOff
-    * past the valid limit of this timeline, so it doesn't matter if the current
-    * TLI has since become historical.
+    * past the valid limit of this timeline, so it doesn't matter if the
+    * current TLI has since become historical.
     */
    if (lastReadPage == wantPage &&
        state->readLen != 0 &&
-       lastReadPage + state->readLen >= wantPage + Min(wantLength,XLOG_BLCKSZ-1))
+       lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
        return;
 
    /*
     * If we're reading from the current timeline, it hasn't become historical
     * and the page we're reading is after the last page read, we can again
-    * just carry on. (Seeking backwards requires a check to make sure the older
-    * page isn't on a prior timeline).
+    * just carry on. (Seeking backwards requires a check to make sure the
+    * older page isn't on a prior timeline).
     *
     * ThisTimeLineID might've become historical since we last looked, but the
     * caller is required not to read past the flush limit it saw at the time
@@ -835,8 +836,8 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
 
    /*
     * If we're just reading pages from a previously validated historical
-    * timeline and the timeline we're reading from is valid until the
-    * end of the current segment we can just keep reading.
+    * timeline and the timeline we're reading from is valid until the end of
+    * the current segment we can just keep reading.
     */
    if (state->currTLIValidUntil != InvalidXLogRecPtr &&
        state->currTLI != ThisTimeLineID &&
@@ -845,10 +846,10 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
        return;
 
    /*
-    * If we reach this point we're either looking up a page for random access,
-    * the current timeline just became historical, or we're reading from a new
-    * segment containing a timeline switch. In all cases we need to determine
-    * the newest timeline on the segment.
+    * If we reach this point we're either looking up a page for random
+    * access, the current timeline just became historical, or we're reading
+    * from a new segment containing a timeline switch. In all cases we need
+    * to determine the newest timeline on the segment.
     *
     * If it's the current timeline we can just keep reading from here unless
     * we detect a timeline switch that makes the current timeline historical.
@@ -861,26 +862,29 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
         * We need to re-read the timeline history in case it's been changed
         * by a promotion or replay from a cascaded replica.
         */
-       List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
+       List       *timelineHistory = readTimeLineHistory(ThisTimeLineID);
 
-       XLogRecPtr endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1;
+       XLogRecPtr  endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1;
 
        Assert(wantPage / XLogSegSize == endOfSegment / XLogSegSize);
 
-       /* Find the timeline of the last LSN on the segment containing wantPage. */
+       /*
+        * Find the timeline of the last LSN on the segment containing
+        * wantPage.
+        */
        state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
        state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
-           &state->nextTLI);
+                                                 &state->nextTLI);
 
        Assert(state->currTLIValidUntil == InvalidXLogRecPtr ||
-               wantPage + wantLength < state->currTLIValidUntil);
+              wantPage + wantLength < state->currTLIValidUntil);
 
        list_free_deep(timelineHistory);
 
        elog(DEBUG3, "switched to timeline %u valid until %X/%X",
-               state->currTLI,
-               (uint32)(state->currTLIValidUntil >> 32),
-               (uint32)(state->currTLIValidUntil));
+            state->currTLI,
+            (uint32) (state->currTLIValidUntil >> 32),
+            (uint32) (state->currTLIValidUntil));
    }
 }
 
@@ -929,21 +933,22 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
         *
         * We have to do it each time through the loop because if we're in
         * recovery as a cascading standby, the current timeline might've
-        * become historical. We can't rely on RecoveryInProgress() because
-        * in a standby configuration like
+        * become historical. We can't rely on RecoveryInProgress() because in
+        * a standby configuration like
         *
-        *    A => B => C
+        * A => B => C
         *
         * if we're a logical decoding session on C, and B gets promoted, our
         * timeline will change while we remain in recovery.
         *
         * We can't just keep reading from the old timeline as the last WAL
-        * archive in the timeline will get renamed to .partial by StartupXLOG().
+        * archive in the timeline will get renamed to .partial by
+        * StartupXLOG().
         *
         * If that happens after our caller updated ThisTimeLineID but before
         * we actually read the xlog page, we might still try to read from the
-        * old (now renamed) segment and fail. There's not much we can do about
-        * this, but it can only happen when we're a leaf of a cascading
+        * old (now renamed) segment and fail. There's not much we can do
+        * about this, but it can only happen when we're a leaf of a cascading
         * standby whose master gets promoted while we're decoding, so a
         * one-off ERROR isn't too bad.
         */
index 806db7f35ea336e9397364ab6e16e9bbbd8af883..cd82cb9f29a8e99e8d57620e154e686d6134579a 100644 (file)
@@ -1125,8 +1125,10 @@ doDeletion(const ObjectAddress *object, int flags)
                        heap_drop_with_catalog(object->objectId);
                }
 
-               /* for a sequence, in addition to dropping the heap, also
-                * delete pg_sequence tuple */
+               /*
+                * for a sequence, in addition to dropping the heap, also
+                * delete pg_sequence tuple
+                */
                if (relKind == RELKIND_SEQUENCE)
                    DeleteSequenceTuple(object->objectId);
                break;
@@ -1942,7 +1944,7 @@ find_expr_references_walker(Node *node,
    }
    else if (IsA(node, NextValueExpr))
    {
-       NextValueExpr  *nve = (NextValueExpr *) node;
+       NextValueExpr *nve = (NextValueExpr *) node;
 
        add_object_address(OCLASS_CLASS, nve->seqid, 0,
                           context->addrs);
index 0f1547b5671869511a68e50a296919b73e223c32..fa926048e1102d3c658885090daf7834c4abce72 100644 (file)
@@ -1762,10 +1762,10 @@ heap_drop_with_catalog(Oid relid)
    /*
     * To drop a partition safely, we must grab exclusive lock on its parent,
     * because another backend might be about to execute a query on the parent
-    * table.  If it relies on previously cached partition descriptor, then
-    * it could attempt to access the just-dropped relation as its partition.
-    * We must therefore take a table lock strong enough to prevent all
-    * queries on the table from proceeding until we commit and send out a
+    * table.  If it relies on previously cached partition descriptor, then it
+    * could attempt to access the just-dropped relation as its partition. We
+    * must therefore take a table lock strong enough to prevent all queries
+    * on the table from proceeding until we commit and send out a
     * shared-cache-inval notice that will make them update their index lists.
     */
    tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
index 3dfb8fa4f9d1d2653e48384c167c6d4b10dda3c9..6bc05cab3a28a990f4f46b93cd06478539f23bc1 100644 (file)
@@ -854,7 +854,7 @@ get_object_address(ObjectType objtype, Node *object,
 
                    objlist = castNode(List, object);
                    domaddr = get_object_address_type(OBJECT_DOMAIN,
-                                                     linitial_node(TypeName, objlist),
+                                           linitial_node(TypeName, objlist),
                                                      missing_ok);
                    constrname = strVal(lsecond(objlist));
 
@@ -878,7 +878,7 @@ get_object_address(ObjectType objtype, Node *object,
            case OBJECT_PUBLICATION:
            case OBJECT_SUBSCRIPTION:
                address = get_object_address_unqualified(objtype,
-                                                        (Value *) object, missing_ok);
+                                              (Value *) object, missing_ok);
                break;
            case OBJECT_TYPE:
            case OBJECT_DOMAIN:
@@ -1345,7 +1345,7 @@ get_object_address_relobject(ObjectType objtype, List *object,
        if (relation != NULL)
            heap_close(relation, AccessShareLock);
 
-       relation = NULL;    /* department of accident prevention */
+       relation = NULL;        /* department of accident prevention */
        return address;
    }
 
@@ -1762,7 +1762,7 @@ get_object_address_publication_rel(List *object,
 
    relname = linitial(object);
    relation = relation_openrv_extended(makeRangeVarFromNameList(relname),
-                                        AccessShareLock, missing_ok);
+                                       AccessShareLock, missing_ok);
    if (!relation)
        return address;
 
@@ -2138,7 +2138,7 @@ pg_get_object_address(PG_FUNCTION_ARGS)
            if (list_length(name) != 1)
                ereport(ERROR,
                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                     errmsg("name list length must be exactly %d", 1)));
+                        errmsg("name list length must be exactly %d", 1)));
            objnode = linitial(name);
            break;
        case OBJECT_TYPE:
@@ -2166,18 +2166,18 @@ pg_get_object_address(PG_FUNCTION_ARGS)
        case OBJECT_FUNCTION:
        case OBJECT_AGGREGATE:
        case OBJECT_OPERATOR:
-       {
-           ObjectWithArgs *owa = makeNode(ObjectWithArgs);
+           {
+               ObjectWithArgs *owa = makeNode(ObjectWithArgs);
 
-           owa->objname = name;
-           owa->objargs = args;
-           objnode = (Node *) owa;
-           break;
-       }
+               owa->objname = name;
+               owa->objargs = args;
+               objnode = (Node *) owa;
+               break;
+           }
        case OBJECT_LARGEOBJECT:
            /* already handled above */
            break;
-       /* no default, to let compiler warn about missing case */
+           /* no default, to let compiler warn about missing case */
    }
 
    if (objnode == NULL)
@@ -3370,7 +3370,7 @@ getObjectDescription(const ObjectAddress *object)
            {
                HeapTuple   tup;
                char       *pubname;
-               Form_pg_publication_rel prform;
+               Form_pg_publication_rel prform;
 
                tup = SearchSysCache1(PUBLICATIONREL,
                                      ObjectIdGetDatum(object->objectId));
@@ -4896,7 +4896,7 @@ getObjectIdentityParts(const ObjectAddress *object,
            {
                HeapTuple   tup;
                char       *pubname;
-               Form_pg_publication_rel prform;
+               Form_pg_publication_rel prform;
 
                tup = SearchSysCache1(PUBLICATIONREL,
                                      ObjectIdGetDatum(object->objectId));
@@ -5012,8 +5012,8 @@ getOpFamilyIdentity(StringInfo buffer, Oid opfid, List **object)
 
    if (object)
        *object = list_make3(pstrdup(NameStr(amForm->amname)),
-                             pstrdup(schema),
-                             pstrdup(NameStr(opfForm->opfname)));
+                            pstrdup(schema),
+                            pstrdup(NameStr(opfForm->opfname)));
 
    ReleaseSysCache(amTup);
    ReleaseSysCache(opfTup);
index ede920955d75f1176a8bc3dff3e2d5ada1a14d68..30cd0cba19e19efe8040dc24dab3ad99ea474550 100644 (file)
@@ -80,12 +80,12 @@ CollationCreate(const char *collname, Oid collnamespace,
        if (if_not_exists)
        {
            ereport(NOTICE,
-               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                collencoding == -1
-                ? errmsg("collation \"%s\" already exists, skipping",
-                         collname)
-                : errmsg("collation \"%s\" for encoding \"%s\" already exists, skipping",
-                         collname, pg_encoding_to_char(collencoding))));
+                   (errcode(ERRCODE_DUPLICATE_OBJECT),
+                    collencoding == -1
+                    ? errmsg("collation \"%s\" already exists, skipping",
+                             collname)
+                    : errmsg("collation \"%s\" for encoding \"%s\" already exists, skipping",
+                             collname, pg_encoding_to_char(collencoding))));
            return InvalidOid;
        }
        else
@@ -94,8 +94,8 @@ CollationCreate(const char *collname, Oid collnamespace,
                     collencoding == -1
                     ? errmsg("collation \"%s\" already exists",
                              collname)
-                    : errmsg("collation \"%s\" for encoding \"%s\" already exists",
-                             collname, pg_encoding_to_char(collencoding))));
+             : errmsg("collation \"%s\" for encoding \"%s\" already exists",
+                      collname, pg_encoding_to_char(collencoding))));
    }
 
    /* open pg_collation; see below about the lock level */
@@ -123,16 +123,16 @@ CollationCreate(const char *collname, Oid collnamespace,
        {
            heap_close(rel, NoLock);
            ereport(NOTICE,
-               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                errmsg("collation \"%s\" already exists, skipping",
-                       collname)));
+                   (errcode(ERRCODE_DUPLICATE_OBJECT),
+                    errmsg("collation \"%s\" already exists, skipping",
+                           collname)));
            return InvalidOid;
        }
        else
            ereport(ERROR,
-               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                errmsg("collation \"%s\" already exists",
-                       collname)));
+                   (errcode(ERRCODE_DUPLICATE_OBJECT),
+                    errmsg("collation \"%s\" already exists",
+                           collname)));
    }
 
    tupDesc = RelationGetDescr(rel);
index 3e0db69998942e5c2c3c6930ed7a9edac37d0b63..d616df62c155be099961f02eb6f2e376993f7484 100644 (file)
@@ -577,9 +577,9 @@ getOwnedSequences(Oid relid, AttrNumber attnum)
        Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
 
        /*
-        * We assume any auto or internal dependency of a sequence on a column must be
-        * what we are looking for.  (We need the relkind test because indexes
-        * can also have auto dependencies on columns.)
+        * We assume any auto or internal dependency of a sequence on a column
+        * must be what we are looking for.  (We need the relkind test because
+        * indexes can also have auto dependencies on columns.)
         */
        if (deprec->classid == RelationRelationId &&
            deprec->objsubid == 0 &&
index 04214fc20313e6d84dd03ed8bf4bccb251cd78c2..e5fb52cfbf81aa6bb7b4d4b44dc03dfba9b97d9a 100644 (file)
@@ -38,8 +38,8 @@
  */
 typedef struct SeenRelsEntry
 {
-   Oid          rel_id;            /* relation oid */
-   ListCell    *numparents_cell;   /* corresponding list cell */
+   Oid         rel_id;         /* relation oid */
+   ListCell   *numparents_cell;    /* corresponding list cell */
 } SeenRelsEntry;
 
 /*
@@ -167,8 +167,8 @@ List *
 find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
 {
    /* hash table for O(1) rel_oid -> rel_numparents cell lookup */
-   HTAB           *seen_rels;
-   HASHCTL         ctl;
+   HTAB       *seen_rels;
+   HASHCTL     ctl;
    List       *rels_list,
               *rel_numparents;
    ListCell   *l;
@@ -212,8 +212,8 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
        foreach(lc, currentchildren)
        {
            Oid         child_oid = lfirst_oid(lc);
-           bool            found;
-           SeenRelsEntry   *hash_entry;
+           bool        found;
+           SeenRelsEntry *hash_entry;
 
            hash_entry = hash_search(seen_rels, &child_oid, HASH_ENTER, &found);
            if (found)
index 613b963683d01e7682a421082a955e87f6258cf6..3e20d051c254a2915550157933c726b1a63c0287 100644 (file)
@@ -50,7 +50,7 @@ NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp)
    TupleDesc   tupDesc;
    ObjectAddress myself;
    int         i;
-   Acl         *nspacl;
+   Acl        *nspacl;
 
    /* sanity checks */
    if (!nspName)
index 92f9902173f88a7ac74d7e7649da655499e2e691..17105f4f2cbc32bec0ce092d31e9bf2dfc9ed69b 100644 (file)
@@ -73,7 +73,7 @@ check_publication_add_relation(Relation targetrel)
                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                 errmsg("\"%s\" is a system table",
                        RelationGetRelationName(targetrel)),
-                errdetail("System tables cannot be added to publications.")));
+              errdetail("System tables cannot be added to publications.")));
 
    /* UNLOGGED and TEMP relations cannot be part of publication. */
    if (!RelationNeedsWAL(targetrel))
@@ -81,7 +81,7 @@ check_publication_add_relation(Relation targetrel)
                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                 errmsg("table \"%s\" cannot be replicated",
                        RelationGetRelationName(targetrel)),
-                errdetail("Temporary and unlogged relations cannot be replicated.")));
+       errdetail("Temporary and unlogged relations cannot be replicated.")));
 }
 
 /*
@@ -119,8 +119,8 @@ publication_add_relation(Oid pubid, Relation targetrel,
    Oid         relid = RelationGetRelid(targetrel);
    Oid         prrelid;
    Publication *pub = GetPublication(pubid);
-   ObjectAddress   myself,
-                   referenced;
+   ObjectAddress myself,
+               referenced;
 
    rel = heap_open(PublicationRelRelationId, RowExclusiveLock);
 
@@ -139,8 +139,8 @@ publication_add_relation(Oid pubid, Relation targetrel,
 
        ereport(ERROR,
                (errcode(ERRCODE_DUPLICATE_OBJECT),
-                errmsg("relation \"%s\" is already member of publication \"%s\"",
-                       RelationGetRelationName(targetrel), pub->name)));
+           errmsg("relation \"%s\" is already member of publication \"%s\"",
+                  RelationGetRelationName(targetrel), pub->name)));
    }
 
    check_publication_add_relation(targetrel);
@@ -186,9 +186,9 @@ publication_add_relation(Oid pubid, Relation targetrel,
 List *
 GetRelationPublications(Oid relid)
 {
-   List           *result = NIL;
-   CatCList       *pubrellist;
-   int             i;
+   List       *result = NIL;
+   CatCList   *pubrellist;
+   int         i;
 
    /* Find all publications associated with the relation. */
    pubrellist = SearchSysCacheList1(PUBLICATIONRELMAP,
@@ -215,11 +215,11 @@ GetRelationPublications(Oid relid)
 List *
 GetPublicationRelations(Oid pubid)
 {
-   List           *result;
-   Relation        pubrelsrel;
-   ScanKeyData     scankey;
-   SysScanDesc     scan;
-   HeapTuple       tup;
+   List       *result;
+   Relation    pubrelsrel;
+   ScanKeyData scankey;
+   SysScanDesc scan;
+   HeapTuple   tup;
 
    /* Find all publications associated with the relation. */
    pubrelsrel = heap_open(PublicationRelRelationId, AccessShareLock);
@@ -235,7 +235,7 @@ GetPublicationRelations(Oid pubid)
    result = NIL;
    while (HeapTupleIsValid(tup = systable_getnext(scan)))
    {
-       Form_pg_publication_rel     pubrel;
+       Form_pg_publication_rel pubrel;
 
        pubrel = (Form_pg_publication_rel) GETSTRUCT(tup);
 
@@ -254,11 +254,11 @@ GetPublicationRelations(Oid pubid)
 List *
 GetAllTablesPublications(void)
 {
-   List           *result;
-   Relation        rel;
-   ScanKeyData     scankey;
-   SysScanDesc     scan;
-   HeapTuple       tup;
+   List       *result;
+   Relation    rel;
+   ScanKeyData scankey;
+   SysScanDesc scan;
+   HeapTuple   tup;
 
    /* Find all publications that are marked as for all tables. */
    rel = heap_open(PublicationRelationId, AccessShareLock);
@@ -304,8 +304,8 @@ GetAllTablesPublicationRelations(void)
 
    while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
    {
-       Oid             relid = HeapTupleGetOid(tuple);
-       Form_pg_class   relForm = (Form_pg_class) GETSTRUCT(tuple);
+       Oid         relid = HeapTupleGetOid(tuple);
+       Form_pg_class relForm = (Form_pg_class) GETSTRUCT(tuple);
 
        if (is_publishable_class(relid, relForm))
            result = lappend_oid(result, relid);
@@ -325,9 +325,9 @@ GetAllTablesPublicationRelations(void)
 Publication *
 GetPublication(Oid pubid)
 {
-   HeapTuple       tup;
-   Publication    *pub;
-   Form_pg_publication pubform;
+   HeapTuple   tup;
+   Publication *pub;
+   Form_pg_publication pubform;
 
    tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
 
@@ -397,9 +397,9 @@ get_publication_oid(const char *pubname, bool missing_ok)
 char *
 get_publication_name(Oid pubid)
 {
-   HeapTuple       tup;
-   char           *pubname;
-   Form_pg_publication pubform;
+   HeapTuple   tup;
+   char       *pubname;
+   Form_pg_publication pubform;
 
    tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
 
@@ -421,10 +421,10 @@ Datum
 pg_get_publication_tables(PG_FUNCTION_ARGS)
 {
    FuncCallContext *funcctx;
-   char           *pubname = text_to_cstring(PG_GETARG_TEXT_PP(0));
-   Publication    *publication;
-   List           *tables;
-   ListCell      **lcp;
+   char       *pubname = text_to_cstring(PG_GETARG_TEXT_PP(0));
+   Publication *publication;
+   List       *tables;
+   ListCell  **lcp;
 
    /* stuff done only on the first call of the function */
    if (SRF_IS_FIRSTCALL())
@@ -455,7 +455,7 @@ pg_get_publication_tables(PG_FUNCTION_ARGS)
 
    while (*lcp != NULL)
    {
-       Oid     relid = lfirst_oid(*lcp);
+       Oid         relid = lfirst_oid(*lcp);
 
        *lcp = lnext(*lcp);
        SRF_RETURN_NEXT(funcctx, ObjectIdGetDatum(relid));
index 7dc21f10522aa98adbd926bb97b1b8bdcbbd5e20..ab5f3719fc397fdaba5f4f7fefc7dc7fd54b2a35 100644 (file)
@@ -44,11 +44,11 @@ static List *textarray_to_stringlist(ArrayType *textarray);
 Subscription *
 GetSubscription(Oid subid, bool missing_ok)
 {
-   HeapTuple       tup;
-   Subscription   *sub;
-   Form_pg_subscription    subform;
-   Datum           datum;
-   bool            isnull;
+   HeapTuple   tup;
+   Subscription *sub;
+   Form_pg_subscription subform;
+   Datum       datum;
+   bool        isnull;
 
    tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
 
@@ -115,11 +115,11 @@ GetSubscription(Oid subid, bool missing_ok)
 int
 CountDBSubscriptions(Oid dbid)
 {
-   int             nsubs = 0;
-   Relation        rel;
-   ScanKeyData     scankey;
-   SysScanDesc     scan;
-   HeapTuple       tup;
+   int         nsubs = 0;
+   Relation    rel;
+   ScanKeyData scankey;
+   SysScanDesc scan;
+   HeapTuple   tup;
 
    rel = heap_open(SubscriptionRelationId, RowExclusiveLock);
 
@@ -181,8 +181,8 @@ get_subscription_oid(const char *subname, bool missing_ok)
 char *
 get_subscription_name(Oid subid)
 {
-   HeapTuple       tup;
-   char           *subname;
+   HeapTuple   tup;
+   char       *subname;
    Form_pg_subscription subform;
 
    tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
@@ -206,9 +206,10 @@ get_subscription_name(Oid subid)
 static List *
 textarray_to_stringlist(ArrayType *textarray)
 {
-   Datum          *elems;
-   int             nelems, i;
-   List           *res = NIL;
+   Datum      *elems;
+   int         nelems,
+               i;
+   List       *res = NIL;
 
    deconstruct_array(textarray,
                      TEXTOID, -1, false, 'i',
@@ -232,7 +233,7 @@ textarray_to_stringlist(ArrayType *textarray)
  */
 Oid
 SetSubscriptionRelState(Oid subid, Oid relid, char state,
-                          XLogRecPtr sublsn)
+                       XLogRecPtr sublsn)
 {
    Relation    rel;
    HeapTuple   tup;
@@ -248,8 +249,8 @@ SetSubscriptionRelState(Oid subid, Oid relid, char state,
                              ObjectIdGetDatum(subid));
 
    /*
-    * If the record for given table does not exist yet create new
-    * record, otherwise update the existing one.
+    * If the record for given table does not exist yet create new record,
+    * otherwise update the existing one.
     */
    if (!HeapTupleIsValid(tup))
    {
@@ -415,8 +416,8 @@ GetSubscriptionRelations(Oid subid)
    Relation    rel;
    HeapTuple   tup;
    int         nkeys = 0;
-   ScanKeyData skey[2];
-   SysScanDesc scan;
+   ScanKeyData skey[2];
+   SysScanDesc scan;
 
    rel = heap_open(SubscriptionRelRelationId, AccessShareLock);
 
@@ -430,12 +431,12 @@ GetSubscriptionRelations(Oid subid)
 
    while (HeapTupleIsValid(tup = systable_getnext(scan)))
    {
-       Form_pg_subscription_rel    subrel;
-       SubscriptionRelState       *relstate;
+       Form_pg_subscription_rel subrel;
+       SubscriptionRelState *relstate;
 
        subrel = (Form_pg_subscription_rel) GETSTRUCT(tup);
 
-       relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState));
+       relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState));
        relstate->relid = subrel->srrelid;
        relstate->state = subrel->srsubstate;
        relstate->lsn = subrel->srsublsn;
@@ -462,8 +463,8 @@ GetSubscriptionNotReadyRelations(Oid subid)
    Relation    rel;
    HeapTuple   tup;
    int         nkeys = 0;
-   ScanKeyData skey[2];
-   SysScanDesc scan;
+   ScanKeyData skey[2];
+   SysScanDesc scan;
 
    rel = heap_open(SubscriptionRelRelationId, AccessShareLock);
 
@@ -482,12 +483,12 @@ GetSubscriptionNotReadyRelations(Oid subid)
 
    while (HeapTupleIsValid(tup = systable_getnext(scan)))
    {
-       Form_pg_subscription_rel    subrel;
-       SubscriptionRelState       *relstate;
+       Form_pg_subscription_rel subrel;
+       SubscriptionRelState *relstate;
 
        subrel = (Form_pg_subscription_rel) GETSTRUCT(tup);
 
-       relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState));
+       relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState));
        relstate->relid = subrel->srrelid;
        relstate->state = subrel->srsubstate;
        relstate->lsn = subrel->srsublsn;
index a4b949d8c712cce63201ebc169e9e98265682c4a..4d3fe8c745ab36ff7236c43714c3e63e73e1ffd4 100644 (file)
@@ -428,7 +428,7 @@ ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddre
 
    address =
        get_object_address_rv(stmt->objectType, stmt->relation, (List *) stmt->object,
-                           &rel, AccessExclusiveLock, false);
+                             &rel, AccessExclusiveLock, false);
 
    /*
     * If a relation was involved, it would have been opened and locked. We
index 404acb2debb83e1411a9c085bb28ce0be0c2746b..ecdd8950ee02a547bf02acc2a74b85e8c685f707 100644 (file)
@@ -1275,7 +1275,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel,
                nrels,
                i;
    ListCell   *lc;
-   bool        has_child;
+   bool        has_child;
 
    /*
     * Find all members of inheritance set.  We only need AccessShareLock on
index 9264d7fc5109e8d7b787a832a3dd12e2c0d5259a..110fb7ef6530d475dee9d811dd5851fe0458f9d1 100644 (file)
@@ -268,9 +268,9 @@ AlterCollation(AlterCollationStmt *stmt)
        elog(ERROR, "invalid collation version change");
    else if (oldversion && newversion && strcmp(newversion, oldversion) != 0)
    {
-       bool        nulls[Natts_pg_collation];
-       bool        replaces[Natts_pg_collation];
-       Datum       values[Natts_pg_collation];
+       bool        nulls[Natts_pg_collation];
+       bool        replaces[Natts_pg_collation];
+       Datum       values[Natts_pg_collation];
 
        ereport(NOTICE,
                (errmsg("changing version from %s to %s",
@@ -379,8 +379,8 @@ get_icu_language_tag(const char *localename)
    uloc_toLanguageTag(localename, buf, sizeof(buf), TRUE, &status);
    if (U_FAILURE(status))
        ereport(ERROR,
-               (errmsg("could not convert locale name \"%s\" to language tag: %s",
-                       localename, u_errorName(status))));
+         (errmsg("could not convert locale name \"%s\" to language tag: %s",
+                 localename, u_errorName(status))));
 
    return pstrdup(buf);
 }
@@ -405,7 +405,7 @@ get_icu_locale_comment(const char *localename)
 
    return result;
 }
-#endif /* USE_ICU */
+#endif   /* USE_ICU */
 
 
 Datum
@@ -493,7 +493,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
 
        CollationCreate(localebuf, nspid, GetUserId(), COLLPROVIDER_LIBC, enc,
                        localebuf, localebuf,
-                       get_collation_actual_version(COLLPROVIDER_LIBC, localebuf),
+                 get_collation_actual_version(COLLPROVIDER_LIBC, localebuf),
                        if_not_exists);
 
        CommandCounterIncrement();
@@ -526,7 +526,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
 
        CollationCreate(alias, nspid, GetUserId(), COLLPROVIDER_LIBC, enc,
                        locale, locale,
-                       get_collation_actual_version(COLLPROVIDER_LIBC, locale),
+                    get_collation_actual_version(COLLPROVIDER_LIBC, locale),
                        true);
        CommandCounterIncrement();
    }
@@ -546,7 +546,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
    }
    else
    {
-       int i;
+       int         i;
 
        /*
         * Start the loop at -1 to sneak in the root locale without too much
@@ -563,7 +563,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
            Oid         collid;
 
            if (i == -1)
-               name = "";  /* ICU root locale */
+               name = "";      /* ICU root locale */
            else
                name = ucol_getAvailable(i);
 
@@ -572,7 +572,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
            collid = CollationCreate(psprintf("%s-x-icu", langtag),
                                     nspid, GetUserId(), COLLPROVIDER_ICU, -1,
                                     collcollate, collcollate,
-                                    get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
+                get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
                                     if_not_exists);
 
            CreateComments(collid, CollationRelationId, 0,
@@ -585,29 +585,29 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
            en = ucol_getKeywordValuesForLocale("collation", name, TRUE, &status);
            if (U_FAILURE(status))
                ereport(ERROR,
-                       (errmsg("could not get keyword values for locale \"%s\": %s",
-                               name, u_errorName(status))));
+               (errmsg("could not get keyword values for locale \"%s\": %s",
+                       name, u_errorName(status))));
 
            status = U_ZERO_ERROR;
            uenum_reset(en, &status);
            while ((val = uenum_next(en, NULL, &status)))
            {
-               char *localeid = psprintf("%s@collation=%s", name, val);
+               char       *localeid = psprintf("%s@collation=%s", name, val);
 
-               langtag =  get_icu_language_tag(localeid);
+               langtag = get_icu_language_tag(localeid);
                collcollate = U_ICU_VERSION_MAJOR_NUM >= 54 ? langtag : localeid;
                collid = CollationCreate(psprintf("%s-x-icu", langtag),
-                                        nspid, GetUserId(), COLLPROVIDER_ICU, -1,
+                                   nspid, GetUserId(), COLLPROVIDER_ICU, -1,
                                         collcollate, collcollate,
-                                        get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
+                get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
                                         if_not_exists);
                CreateComments(collid, CollationRelationId, 0,
                               get_icu_locale_comment(localeid));
            }
            if (U_FAILURE(status))
                ereport(ERROR,
-                       (errmsg("could not get keyword values for locale \"%s\": %s",
-                               name, u_errorName(status))));
+               (errmsg("could not get keyword values for locale \"%s\": %s",
+                       name, u_errorName(status))));
            uenum_close(en);
        }
    }
index 137b1ef42d9662bd65007156020c2e0711629bb5..84b1a54cb9b4ed81015ef96a30f7c01179750d99 100644 (file)
@@ -111,7 +111,7 @@ typedef struct CopyStateData
    List       *attnumlist;     /* integer list of attnums to copy */
    char       *filename;       /* filename, or NULL for STDIN/STDOUT */
    bool        is_program;     /* is 'filename' a program to popen? */
-   copy_data_source_cb data_source_cb;     /* function for reading data*/
+   copy_data_source_cb data_source_cb; /* function for reading data */
    bool        binary;         /* binary format? */
    bool        oids;           /* include OIDs? */
    bool        freeze;         /* freeze rows on loading? */
@@ -532,7 +532,7 @@ CopySendEndOfRow(CopyState cstate)
            (void) pq_putmessage('d', fe_msgbuf->data, fe_msgbuf->len);
            break;
        case COPY_CALLBACK:
-           Assert(false); /* Not yet supported. */
+           Assert(false);      /* Not yet supported. */
            break;
    }
 
index c0ba2b451a7fa6f2d03ffa12682328e6d0102c1f..11038f6764c02656d193f6e14fc6a5232dd0c60e 100644 (file)
@@ -855,8 +855,8 @@ dropdb(const char *dbname, bool missing_ok)
    {
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_IN_USE),
-             errmsg("database \"%s\" is used by an active logical replication slot",
-                    dbname),
+                errmsg("database \"%s\" is used by an active logical replication slot",
+                       dbname),
                 errdetail_plural("There is %d active slot",
                                  "There are %d active slots",
                                  nslots_active, nslots_active)));
@@ -2134,7 +2134,8 @@ dbase_redo(XLogReaderState *record)
             * which can happen in some cases.
             *
             * This will lock out walsenders trying to connect to db-specific
-            * slots for logical decoding too, so it's safe for us to drop slots.
+            * slots for logical decoding too, so it's safe for us to drop
+            * slots.
             */
            LockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock);
            ResolveRecoveryConflictWithDatabase(xlrec->db_id);
index 8da924517b9a69fd43ec06297e6d1829e1ea04c3..3ad4eea59eccc73a3fa47650b917716318c9afb5 100644 (file)
@@ -336,7 +336,7 @@ defGetStringList(DefElem *def)
    if (nodeTag(def->arg) != T_List)
        elog(ERROR, "unrecognized node type: %d", (int) nodeTag(def->arg));
 
-   foreach(cell, (List *)def->arg)
+   foreach(cell, (List *) def->arg)
    {
        Node       *str = (Node *) lfirst(cell);
 
index a1a64fa8c9e168503947cbb9ff7d594b7b5f6162..9e307eb8af8262d2392578c733f4d7fc83835c4a 100644 (file)
@@ -102,7 +102,7 @@ RemoveObjects(DropStmt *stmt)
                ereport(ERROR,
                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                         errmsg("\"%s\" is an aggregate function",
-                               NameListToString(castNode(ObjectWithArgs, object)->objname)),
+               NameListToString(castNode(ObjectWithArgs, object)->objname)),
                errhint("Use DROP AGGREGATE to drop aggregate functions.")));
 
            ReleaseSysCache(tup);
@@ -145,7 +145,7 @@ owningrel_does_not_exist_skipping(List *object, const char **msg, char **name)
    RangeVar   *parent_rel;
 
    parent_object = list_truncate(list_copy(object),
-                                  list_length(object) - 1);
+                                 list_length(object) - 1);
 
    if (schema_does_not_exist_skipping(parent_object, msg, name))
        return true;
@@ -328,6 +328,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
        case OBJECT_FUNCTION:
            {
                ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
+
                if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
                    !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
                {
@@ -340,6 +341,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
        case OBJECT_AGGREGATE:
            {
                ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
+
                if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
                    !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
                {
@@ -352,6 +354,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
        case OBJECT_OPERATOR:
            {
                ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
+
                if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
                    !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
                {
@@ -390,7 +393,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                msg = gettext_noop("trigger \"%s\" for relation \"%s\" does not exist, skipping");
                name = strVal(llast(castNode(List, object)));
                args = NameListToString(list_truncate(list_copy(castNode(List, object)),
-                                                     list_length(castNode(List, object)) - 1));
+                                  list_length(castNode(List, object)) - 1));
            }
            break;
        case OBJECT_POLICY:
@@ -399,7 +402,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                msg = gettext_noop("policy \"%s\" for relation \"%s\" does not exist, skipping");
                name = strVal(llast(castNode(List, object)));
                args = NameListToString(list_truncate(list_copy(castNode(List, object)),
-                                                     list_length(castNode(List, object)) - 1));
+                                  list_length(castNode(List, object)) - 1));
            }
            break;
        case OBJECT_EVENT_TRIGGER:
@@ -412,7 +415,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                msg = gettext_noop("rule \"%s\" for relation \"%s\" does not exist, skipping");
                name = strVal(llast(castNode(List, object)));
                args = NameListToString(list_truncate(list_copy(castNode(List, object)),
-                                                     list_length(castNode(List, object)) - 1));
+                                  list_length(castNode(List, object)) - 1));
            }
            break;
        case OBJECT_FDW:
index d1983257c2fc723aefa585f5d947cc2b6e866b6d..4cfab418a6f8cf91f55c2a9cfc6c92d5f02da285 100644 (file)
@@ -2250,7 +2250,7 @@ stringify_grantobjtype(GrantObjectType objtype)
    }
 
    elog(ERROR, "unrecognized grant object type: %d", (int) objtype);
-   return "???";       /* keep compiler quiet */
+   return "???";               /* keep compiler quiet */
 }
 
 /*
@@ -2292,5 +2292,5 @@ stringify_adefprivs_objtype(GrantObjectType objtype)
    }
 
    elog(ERROR, "unrecognized grant object type: %d", (int) objtype);
-   return "???";       /* keep compiler quiet */
+   return "???";               /* keep compiler quiet */
 }
index 96cf296210f924e067449f424bb5d23691ea230f..ba85952baaef52b07463038c1d462337fb9ea983 100644 (file)
@@ -878,8 +878,8 @@ CreateForeignServer(CreateForeignServerStmt *stmt)
    ownerId = GetUserId();
 
    /*
-    * Check that there is no other foreign server by this name.
-    * Do nothing if IF NOT EXISTS was enforced.
+    * Check that there is no other foreign server by this name. Do nothing if
+    * IF NOT EXISTS was enforced.
     */
    if (GetForeignServerByName(stmt->servername, true) != NULL)
    {
@@ -1171,20 +1171,20 @@ CreateUserMapping(CreateUserMappingStmt *stmt)
        if (stmt->if_not_exists)
        {
            ereport(NOTICE,
-               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                errmsg("user mapping for \"%s\" already exists for server %s, skipping",
-                       MappingUserName(useId),
-                       stmt->servername)));
+                   (errcode(ERRCODE_DUPLICATE_OBJECT),
+                    errmsg("user mapping for \"%s\" already exists for server %s, skipping",
+                           MappingUserName(useId),
+                           stmt->servername)));
 
            heap_close(rel, RowExclusiveLock);
            return InvalidObjectAddress;
        }
        else
            ereport(ERROR,
-               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                errmsg("user mapping for \"%s\" already exists for server %s",
-                       MappingUserName(useId),
-                       stmt->servername)));
+                   (errcode(ERRCODE_DUPLICATE_OBJECT),
+              errmsg("user mapping for \"%s\" already exists for server %s",
+                     MappingUserName(useId),
+                     stmt->servername)));
    }
 
    fdw = GetForeignDataWrapper(srv->fdwid);
@@ -1275,8 +1275,8 @@ AlterUserMapping(AlterUserMappingStmt *stmt)
    if (!OidIsValid(umId))
        ereport(ERROR,
                (errcode(ERRCODE_UNDEFINED_OBJECT),
-                errmsg("user mapping for \"%s\" does not exist for the server",
-                       MappingUserName(useId))));
+             errmsg("user mapping for \"%s\" does not exist for the server",
+                    MappingUserName(useId))));
 
    user_mapping_ddl_aclcheck(useId, srv->serverid, stmt->servername);
 
@@ -1390,13 +1390,13 @@ RemoveUserMapping(DropUserMappingStmt *stmt)
        if (!stmt->missing_ok)
            ereport(ERROR,
                    (errcode(ERRCODE_UNDEFINED_OBJECT),
-                 errmsg("user mapping for \"%s\" does not exist for the server",
-                        MappingUserName(useId))));
+             errmsg("user mapping for \"%s\" does not exist for the server",
+                    MappingUserName(useId))));
 
        /* IF EXISTS specified, just note it */
        ereport(NOTICE,
-       (errmsg("user mapping for \"%s\" does not exist for the server, skipping",
-               MappingUserName(useId))));
+               (errmsg("user mapping for \"%s\" does not exist for the server, skipping",
+                       MappingUserName(useId))));
        return InvalidOid;
    }
 
index 1c8d88d336e50f64a1996d055381a3e280a544d0..8f06c23df95b94b4f66e471c56a31c94f3dbf968 100644 (file)
@@ -74,7 +74,7 @@ parse_publication_options(List *options,
    *publish_delete = true;
 
    /* Parse options */
-   foreach (lc, options)
+   foreach(lc, options)
    {
        DefElem    *defel = (DefElem *) lfirst(lc);
 
@@ -106,9 +106,9 @@ parse_publication_options(List *options,
                         errmsg("invalid publish list")));
 
            /* Process the option list. */
-           foreach (lc, publish_list)
+           foreach(lc, publish_list)
            {
-               char *publish_opt = (char *)lfirst(lc);
+               char       *publish_opt = (char *) lfirst(lc);
 
                if (strcmp(publish_opt, "insert") == 0)
                    *publish_insert = true;
@@ -157,7 +157,7 @@ CreatePublication(CreatePublicationStmt *stmt)
    if (stmt->for_all_tables && !superuser())
        ereport(ERROR,
                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                (errmsg("must be superuser to create FOR ALL TABLES publication"))));
+       (errmsg("must be superuser to create FOR ALL TABLES publication"))));
 
    rel = heap_open(PublicationRelationId, RowExclusiveLock);
 
@@ -228,7 +228,7 @@ CreatePublication(CreatePublicationStmt *stmt)
  */
 static void
 AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
-                      HeapTuple tup)
+                       HeapTuple tup)
 {
    bool        nulls[Natts_pg_publication];
    bool        replaces[Natts_pg_publication];
@@ -237,7 +237,7 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
    bool        publish_insert;
    bool        publish_update;
    bool        publish_delete;
-   ObjectAddress       obj;
+   ObjectAddress obj;
 
    parse_publication_options(stmt->options,
                              &publish_given, &publish_insert,
@@ -275,7 +275,7 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
    }
    else
    {
-       List    *relids = GetPublicationRelations(HeapTupleGetOid(tup));
+       List       *relids = GetPublicationRelations(HeapTupleGetOid(tup));
 
        /*
         * We don't want to send too many individual messages, at some point
@@ -283,11 +283,11 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
         */
        if (list_length(relids) < MAX_RELCACHE_INVAL_MSGS)
        {
-           ListCell *lc;
+           ListCell   *lc;
 
-           foreach (lc, relids)
+           foreach(lc, relids)
            {
-               Oid relid = lfirst_oid(lc);
+               Oid         relid = lfirst_oid(lc);
 
                CacheInvalidateRelcacheByRelid(relid);
            }
@@ -330,7 +330,7 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
        PublicationAddTables(pubid, rels, false, stmt);
    else if (stmt->tableAction == DEFELEM_DROP)
        PublicationDropTables(pubid, rels, false);
-   else /* DEFELEM_SET */
+   else    /* DEFELEM_SET */
    {
        List       *oldrelids = GetPublicationRelations(pubid);
        List       *delrels = NIL;
@@ -358,6 +358,7 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
            {
                Relation    oldrel = heap_open(oldrelid,
                                               ShareUpdateExclusiveLock);
+
                delrels = lappend(delrels, oldrel);
            }
        }
@@ -366,8 +367,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
        PublicationDropTables(pubid, delrels, true);
 
        /*
-        * Don't bother calculating the difference for adding, we'll catch
-        * and skip existing ones when doing catalog update.
+        * Don't bother calculating the difference for adding, we'll catch and
+        * skip existing ones when doing catalog update.
         */
        PublicationAddTables(pubid, rels, true, stmt);
 
@@ -386,8 +387,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
 void
 AlterPublication(AlterPublicationStmt *stmt)
 {
-   Relation        rel;
-   HeapTuple       tup;
+   Relation    rel;
+   HeapTuple   tup;
 
    rel = heap_open(PublicationRelationId, RowExclusiveLock);
 
@@ -444,9 +445,9 @@ RemovePublicationById(Oid pubid)
 void
 RemovePublicationRelById(Oid proid)
 {
-   Relation        rel;
-   HeapTuple       tup;
-   Form_pg_publication_rel     pubrel;
+   Relation    rel;
+   HeapTuple   tup;
+   Form_pg_publication_rel pubrel;
 
    rel = heap_open(PublicationRelRelationId, RowExclusiveLock);
 
@@ -570,14 +571,14 @@ static void
 PublicationAddTables(Oid pubid, List *rels, bool if_not_exists,
                     AlterPublicationStmt *stmt)
 {
-   ListCell       *lc;
+   ListCell   *lc;
 
    Assert(!stmt || !stmt->for_all_tables);
 
    foreach(lc, rels)
    {
        Relation    rel = (Relation) lfirst(lc);
-       ObjectAddress   obj;
+       ObjectAddress obj;
 
        /* Must be owner of the table or superuser. */
        if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId()))
@@ -602,9 +603,9 @@ PublicationAddTables(Oid pubid, List *rels, bool if_not_exists,
 static void
 PublicationDropTables(Oid pubid, List *rels, bool missing_ok)
 {
-   ObjectAddress   obj;
-   ListCell       *lc;
-   Oid             prid;
+   ObjectAddress obj;
+   ListCell   *lc;
+   Oid         prid;
 
    foreach(lc, rels)
    {
@@ -632,7 +633,7 @@ PublicationDropTables(Oid pubid, List *rels, bool missing_ok)
 /*
  * Internal workhorse for changing a publication owner
  */
-   static void
+static void
 AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
 {
    Form_pg_publication form;
@@ -663,8 +664,8 @@ AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
        if (form->puballtables && !superuser_arg(newOwnerId))
            ereport(ERROR,
                    (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                    errmsg("permission denied to change owner of publication \"%s\"",
-                           NameStr(form->pubname)),
+           errmsg("permission denied to change owner of publication \"%s\"",
+                  NameStr(form->pubname)),
                     errhint("The owner of a FOR ALL TABLES publication must be a superuser.")));
    }
 
@@ -686,9 +687,9 @@ AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
 ObjectAddress
 AlterPublicationOwner(const char *name, Oid newOwnerId)
 {
-   Oid                     subid;
-   HeapTuple       tup;
-   Relation        rel;
+   Oid         subid;
+   HeapTuple   tup;
+   Relation    rel;
    ObjectAddress address;
 
    rel = heap_open(PublicationRelationId, RowExclusiveLock);
@@ -719,8 +720,8 @@ AlterPublicationOwner(const char *name, Oid newOwnerId)
 void
 AlterPublicationOwner_oid(Oid subid, Oid newOwnerId)
 {
-   HeapTuple       tup;
-   Relation        rel;
+   HeapTuple   tup;
+   Relation    rel;
 
    rel = heap_open(PublicationRelationId, RowExclusiveLock);
 
index 0f7cf1dce8af2d47e5530c8b895fcc12a704e4e0..568b3022f2dfc7dcd5fcd2fa3691e8070705494f 100644 (file)
@@ -100,10 +100,10 @@ static Form_pg_sequence_data read_seq_tuple(Relation rel,
               Buffer *buf, HeapTuple seqdatatuple);
 static LOCKMODE alter_sequence_get_lock_level(List *options);
 static void init_params(ParseState *pstate, List *options, bool for_identity,
-                       bool isInit,
-                       Form_pg_sequence seqform,
-                       bool *changed_seqform,
-                       Form_pg_sequence_data seqdataform, List **owned_by);
+           bool isInit,
+           Form_pg_sequence seqform,
+           bool *changed_seqform,
+           Form_pg_sequence_data seqdataform, List **owned_by);
 static void do_setval(Oid relid, int64 next, bool iscalled);
 static void process_owned_by(Relation seqrel, List *owned_by, bool for_identity);
 
@@ -117,7 +117,7 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq)
 {
    FormData_pg_sequence seqform;
    FormData_pg_sequence_data seqdataform;
-   bool        changed_seqform = false; /* not used here */
+   bool        changed_seqform = false;        /* not used here */
    List       *owned_by;
    CreateStmt *stmt = makeNode(CreateStmt);
    Oid         seqoid;
@@ -703,9 +703,9 @@ nextval_internal(Oid relid, bool check_permissions)
 
                    snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
                    ereport(ERROR,
-                         (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
-                          errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
-                                 RelationGetRelationName(seqrel), buf)));
+                        (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
+                         errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
+                                RelationGetRelationName(seqrel), buf)));
                }
                next = minv;
            }
@@ -726,9 +726,9 @@ nextval_internal(Oid relid, bool check_permissions)
 
                    snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
                    ereport(ERROR,
-                         (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
-                          errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
-                                 RelationGetRelationName(seqrel), buf)));
+                        (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
+                         errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
+                                RelationGetRelationName(seqrel), buf)));
                }
                next = maxv;
            }
@@ -1390,7 +1390,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
    /* AS type */
    if (as_type != NULL)
    {
-       Oid     newtypid = typenameTypeId(pstate, defGetTypeName(as_type));
+       Oid         newtypid = typenameTypeId(pstate, defGetTypeName(as_type));
 
        if (newtypid != INT2OID &&
            newtypid != INT4OID &&
@@ -1399,7 +1399,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
                    (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                     for_identity
                     ? errmsg("identity column type must be smallint, integer, or bigint")
-                    : errmsg("sequence type must be smallint, integer, or bigint")));
+           : errmsg("sequence type must be smallint, integer, or bigint")));
 
        if (!isInit)
        {
@@ -1411,11 +1411,11 @@ init_params(ParseState *pstate, List *options, bool for_identity,
             */
            if ((seqform->seqtypid == INT2OID && seqform->seqmax == PG_INT16_MAX) ||
                (seqform->seqtypid == INT4OID && seqform->seqmax == PG_INT32_MAX) ||
-               (seqform->seqtypid == INT8OID && seqform->seqmax == PG_INT64_MAX))
+           (seqform->seqtypid == INT8OID && seqform->seqmax == PG_INT64_MAX))
                reset_max_value = true;
            if ((seqform->seqtypid == INT2OID && seqform->seqmin == PG_INT16_MIN) ||
                (seqform->seqtypid == INT4OID && seqform->seqmin == PG_INT32_MIN) ||
-               (seqform->seqtypid == INT8OID && seqform->seqmin == PG_INT64_MIN))
+           (seqform->seqtypid == INT8OID && seqform->seqmin == PG_INT64_MIN))
                reset_min_value = true;
        }
 
@@ -1479,7 +1479,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
                seqform->seqmax = PG_INT64_MAX;
        }
        else
-           seqform->seqmax = -1;   /* descending seq */
+           seqform->seqmax = -1;       /* descending seq */
        *changed_seqform = true;
        seqdataform->log_cnt = 0;
    }
@@ -1494,8 +1494,8 @@ init_params(ParseState *pstate, List *options, bool for_identity,
 
        ereport(ERROR,
                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                errmsg("MAXVALUE (%s) is out of range for sequence data type %s",
-                       bufx, format_type_be(seqform->seqtypid))));
+           errmsg("MAXVALUE (%s) is out of range for sequence data type %s",
+                  bufx, format_type_be(seqform->seqtypid))));
    }
 
    /* MINVALUE (null arg means NO MINVALUE) */
@@ -1518,7 +1518,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
                seqform->seqmin = PG_INT64_MIN;
        }
        else
-           seqform->seqmin = 1; /* ascending seq */
+           seqform->seqmin = 1;    /* ascending seq */
        *changed_seqform = true;
        seqdataform->log_cnt = 0;
    }
@@ -1533,8 +1533,8 @@ init_params(ParseState *pstate, List *options, bool for_identity,
 
        ereport(ERROR,
                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                errmsg("MINVALUE (%s) is out of range for sequence data type %s",
-                       bufm, format_type_be(seqform->seqtypid))));
+           errmsg("MINVALUE (%s) is out of range for sequence data type %s",
+                  bufm, format_type_be(seqform->seqtypid))));
    }
 
    /* crosscheck min/max */
@@ -1560,9 +1560,9 @@ init_params(ParseState *pstate, List *options, bool for_identity,
    else if (isInit)
    {
        if (seqform->seqincrement > 0)
-           seqform->seqstart = seqform->seqmin;    /* ascending seq */
+           seqform->seqstart = seqform->seqmin;        /* ascending seq */
        else
-           seqform->seqstart = seqform->seqmax;    /* descending seq */
+           seqform->seqstart = seqform->seqmax;        /* descending seq */
        *changed_seqform = true;
    }
 
index 94865b395b7db06ed18a871c7aa429950e9baa96..2b3785f394514f7bc938c61d8e17fd7bd8442038 100644 (file)
@@ -90,8 +90,8 @@ CreateStatistics(CreateStatsStmt *stmt)
        {
            ereport(NOTICE,
                    (errcode(ERRCODE_DUPLICATE_OBJECT),
-                    errmsg("statistics object \"%s\" already exists, skipping",
-                           namestr)));
+                 errmsg("statistics object \"%s\" already exists, skipping",
+                        namestr)));
            return InvalidObjectAddress;
        }
 
index 1f7274bc572fdcbdb6d36d59eb51ea4c8eab32da..89358a4ec3c06f9a3da7287ab26b34bf48c27795 100644 (file)
@@ -94,7 +94,7 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given,
        *synchronous_commit = NULL;
 
    /* Parse options */
-   foreach (lc, options)
+   foreach(lc, options)
    {
        DefElem    *defel = (DefElem *) lfirst(lc);
 
@@ -200,8 +200,8 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given,
    }
 
    /*
-    * Do additional checking for disallowed combination when
-    * slot_name = NONE was used.
+    * Do additional checking for disallowed combination when slot_name = NONE
+    * was used.
     */
    if (slot_name && *slot_name_given && !*slot_name)
    {
@@ -367,7 +367,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
    values[Anum_pg_subscription_subsynccommit - 1] =
        CStringGetTextDatum(synchronous_commit);
    values[Anum_pg_subscription_subpublications - 1] =
-        publicationListToArray(publications);
+       publicationListToArray(publications);
 
    tup = heap_form_tuple(RelationGetDescr(rel), values, nulls);
 
@@ -386,12 +386,12 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
     */
    if (connect)
    {
-       XLogRecPtr          lsn;
-       char               *err;
-       WalReceiverConn    *wrconn;
-       List               *tables;
-       ListCell           *lc;
-       char                table_state;
+       XLogRecPtr  lsn;
+       char       *err;
+       WalReceiverConn *wrconn;
+       List       *tables;
+       ListCell   *lc;
+       char        table_state;
 
        /* Try to connect to the publisher. */
        wrconn = walrcv_connect(conninfo, true, stmt->subname, &err);
@@ -412,7 +412,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
             * info.
             */
            tables = fetch_table_list(wrconn, publications);
-           foreach (lc, tables)
+           foreach(lc, tables)
            {
                RangeVar   *rv = (RangeVar *) lfirst(lc);
                Oid         relid;
@@ -431,9 +431,9 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
                    (errmsg("synchronized table states")));
 
            /*
-            * If requested, create permanent slot for the subscription.
-            * We won't use the initial snapshot for anything, so no need
-            * to export it.
+            * If requested, create permanent slot for the subscription. We
+            * won't use the initial snapshot for anything, so no need to
+            * export it.
             */
            if (create_slot)
            {
@@ -442,8 +442,8 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
                walrcv_create_slot(wrconn, slotname, false,
                                   CRS_NOEXPORT_SNAPSHOT, &lsn);
                ereport(NOTICE,
-                       (errmsg("created replication slot \"%s\" on publisher",
-                               slotname)));
+                     (errmsg("created replication slot \"%s\" on publisher",
+                             slotname)));
            }
        }
        PG_CATCH();
@@ -478,7 +478,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
 static void
 AlterSubscription_refresh(Subscription *sub, bool copy_data)
 {
-   char           *err;
+   char       *err;
    List       *pubrel_names;
    List       *subrel_states;
    Oid        *subrel_local_oids;
@@ -505,31 +505,31 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
    subrel_states = GetSubscriptionRelations(sub->oid);
 
    /*
-    * Build qsorted array of local table oids for faster lookup.
-    * This can potentially contain all tables in the database so
-    * speed of lookup is important.
+    * Build qsorted array of local table oids for faster lookup. This can
+    * potentially contain all tables in the database so speed of lookup is
+    * important.
     */
    subrel_local_oids = palloc(list_length(subrel_states) * sizeof(Oid));
    off = 0;
    foreach(lc, subrel_states)
    {
        SubscriptionRelState *relstate = (SubscriptionRelState *) lfirst(lc);
+
        subrel_local_oids[off++] = relstate->relid;
    }
    qsort(subrel_local_oids, list_length(subrel_states),
          sizeof(Oid), oid_cmp);
 
    /*
-    * Walk over the remote tables and try to match them to locally
-    * known tables. If the table is not known locally create a new state
-    * for it.
+    * Walk over the remote tables and try to match them to locally known
+    * tables. If the table is not known locally create a new state for it.
     *
     * Also builds array of local oids of remote tables for the next step.
     */
    off = 0;
    pubrel_local_oids = palloc(list_length(pubrel_names) * sizeof(Oid));
 
-   foreach (lc, pubrel_names)
+   foreach(lc, pubrel_names)
    {
        RangeVar   *rv = (RangeVar *) lfirst(lc);
        Oid         relid;
@@ -546,7 +546,7 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
                     list_length(subrel_states), sizeof(Oid), oid_cmp))
        {
            SetSubscriptionRelState(sub->oid, relid,
-                                   copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY,
+                         copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY,
                                    InvalidXLogRecPtr);
            ereport(NOTICE,
                    (errmsg("added subscription for table %s.%s",
@@ -556,20 +556,20 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
    }
 
    /*
-    * Next remove state for tables we should not care about anymore using
-    * the data we collected above
+    * Next remove state for tables we should not care about anymore using the
+    * data we collected above
     */
    qsort(pubrel_local_oids, list_length(pubrel_names),
          sizeof(Oid), oid_cmp);
 
    for (off = 0; off < list_length(subrel_states); off++)
    {
-       Oid relid = subrel_local_oids[off];
+       Oid         relid = subrel_local_oids[off];
 
        if (!bsearch(&relid, pubrel_local_oids,
                     list_length(pubrel_names), sizeof(Oid), oid_cmp))
        {
-           char   *namespace;
+           char       *namespace;
 
            RemoveSubscriptionRel(sub->oid, relid);
 
@@ -596,7 +596,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
    HeapTuple   tup;
    Oid         subid;
    bool        update_tuple = false;
-   Subscription   *sub;
+   Subscription *sub;
 
    rel = heap_open(SubscriptionRelationId, RowExclusiveLock);
 
@@ -644,7 +644,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
 
                    if (slotname)
                        values[Anum_pg_subscription_subslotname - 1] =
-                       DirectFunctionCall1(namein, CStringGetDatum(slotname));
+                           DirectFunctionCall1(namein, CStringGetDatum(slotname));
                    else
                        nulls[Anum_pg_subscription_subslotname - 1] = true;
                    replaces[Anum_pg_subscription_subslotname - 1] = true;
@@ -663,8 +663,8 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
 
        case ALTER_SUBSCRIPTION_ENABLED:
            {
-               bool enabled,
-                    enabled_given;
+               bool        enabled,
+                           enabled_given;
 
                parse_subscription_options(stmt->options, NULL,
                                           &enabled_given, &enabled, NULL,
@@ -702,14 +702,14 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
        case ALTER_SUBSCRIPTION_PUBLICATION:
        case ALTER_SUBSCRIPTION_PUBLICATION_REFRESH:
            {
-               bool            copy_data;
+               bool        copy_data;
 
                parse_subscription_options(stmt->options, NULL, NULL, NULL,
                                           NULL, NULL, NULL, ©_data,
                                           NULL);
 
                values[Anum_pg_subscription_subpublications - 1] =
-                    publicationListToArray(stmt->publication);
+                   publicationListToArray(stmt->publication);
                replaces[Anum_pg_subscription_subpublications - 1] = true;
 
                update_tuple = true;
@@ -733,7 +733,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
 
        case ALTER_SUBSCRIPTION_REFRESH:
            {
-               bool            copy_data;
+               bool        copy_data;
 
                if (!sub->enabled)
                    ereport(ERROR,
@@ -791,14 +791,13 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
    char       *slotname;
    char        originname[NAMEDATALEN];
    char       *err = NULL;
-   RepOriginId originid;
-   WalReceiverConn    *wrconn = NULL;
-   StringInfoData      cmd;
+   RepOriginId originid;
+   WalReceiverConn *wrconn = NULL;
+   StringInfoData cmd;
 
    /*
-    * Lock pg_subscription with AccessExclusiveLock to ensure
-    * that the launcher doesn't restart new worker during dropping
-    * the subscription
+    * Lock pg_subscription with AccessExclusiveLock to ensure that the
+    * launcher doesn't restart new worker during dropping the subscription
     */
    rel = heap_open(SubscriptionRelationId, AccessExclusiveLock);
 
@@ -833,8 +832,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
    InvokeObjectDropHook(SubscriptionRelationId, subid, 0);
 
    /*
-    * Lock the subscription so nobody else can do anything with it
-    * (including the replication workers).
+    * Lock the subscription so nobody else can do anything with it (including
+    * the replication workers).
     */
    LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
 
@@ -895,7 +894,10 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
    if (originid != InvalidRepOriginId)
        replorigin_drop(originid);
 
-   /* If there is no slot associated with the subscription, we can finish here. */
+   /*
+    * If there is no slot associated with the subscription, we can finish
+    * here.
+    */
    if (!slotname)
    {
        heap_close(rel, NoLock);
@@ -903,8 +905,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
    }
 
    /*
-    * Otherwise drop the replication slot at the publisher node using
-    * the replication connection.
+    * Otherwise drop the replication slot at the publisher node using the
+    * replication connection.
     */
    load_file("libpqwalreceiver", false);
 
@@ -922,14 +924,15 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
 
    PG_TRY();
    {
-       WalRcvExecResult   *res;
+       WalRcvExecResult *res;
+
        res = walrcv_exec(wrconn, cmd.data, 0, NULL);
 
        if (res->status != WALRCV_OK_COMMAND)
            ereport(ERROR,
-                   (errmsg("could not drop the replication slot \"%s\" on publisher",
-                           slotname),
-                    errdetail("The error was: %s", res->err)));
+           (errmsg("could not drop the replication slot \"%s\" on publisher",
+                   slotname),
+            errdetail("The error was: %s", res->err)));
        else
            ereport(NOTICE,
                    (errmsg("dropped replication slot \"%s\" on publisher",
@@ -973,9 +976,9 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
    if (!superuser_arg(newOwnerId))
        ereport(ERROR,
                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-         errmsg("permission denied to change owner of subscription \"%s\"",
-                NameStr(form->subname)),
-            errhint("The owner of a subscription must be a superuser.")));
+          errmsg("permission denied to change owner of subscription \"%s\"",
+                 NameStr(form->subname)),
+              errhint("The owner of a subscription must be a superuser.")));
 
    form->subowner = newOwnerId;
    CatalogTupleUpdate(rel, &tup->t_self, tup);
@@ -1055,24 +1058,24 @@ AlterSubscriptionOwner_oid(Oid subid, Oid newOwnerId)
 static List *
 fetch_table_list(WalReceiverConn *wrconn, List *publications)
 {
-   WalRcvExecResult   *res;
-   StringInfoData      cmd;
-   TupleTableSlot     *slot;
-   Oid                 tableRow[2] = {TEXTOID, TEXTOID};
-   ListCell           *lc;
-   bool                first;
-   List               *tablelist = NIL;
+   WalRcvExecResult *res;
+   StringInfoData cmd;
+   TupleTableSlot *slot;
+   Oid         tableRow[2] = {TEXTOID, TEXTOID};
+   ListCell   *lc;
+   bool        first;
+   List       *tablelist = NIL;
 
    Assert(list_length(publications) > 0);
 
    initStringInfo(&cmd);
    appendStringInfo(&cmd, "SELECT DISTINCT t.schemaname, t.tablename\n"
-                          "  FROM pg_catalog.pg_publication_tables t\n"
-                          " WHERE t.pubname IN (");
+                    "  FROM pg_catalog.pg_publication_tables t\n"
+                    " WHERE t.pubname IN (");
    first = true;
-   foreach (lc, publications)
+   foreach(lc, publications)
    {
-       char *pubname = strVal(lfirst(lc));
+       char       *pubname = strVal(lfirst(lc));
 
        if (first)
            first = false;
index e259378051150ad8f0c8509812ded292a8fabd87..7319aa597e71322a7f803d647578730271101475 100644 (file)
@@ -363,9 +363,9 @@ static ObjectAddress ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
 static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
                    Node *newDefault, LOCKMODE lockmode);
 static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
-                   Node *def, LOCKMODE lockmode);
+                 Node *def, LOCKMODE lockmode);
 static ObjectAddress ATExecSetIdentity(Relation rel, const char *colName,
-                   Node *def, LOCKMODE lockmode);
+                 Node *def, LOCKMODE lockmode);
 static ObjectAddress ATExecDropIdentity(Relation rel, const char *colName, bool missing_ok, LOCKMODE lockmode);
 static void ATPrepSetStatistics(Relation rel, const char *colName,
                    Node *newValue, LOCKMODE lockmode);
@@ -643,8 +643,8 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
    descriptor->tdhasoid = (localHasOids || parentOidCount > 0);
 
    /*
-    * If a partitioned table doesn't have the system OID column, then none
-    * of its partitions should have it.
+    * If a partitioned table doesn't have the system OID column, then none of
+    * its partitions should have it.
     */
    if (stmt->partbound && parentOidCount == 0 && localHasOids)
        ereport(ERROR,
@@ -1112,9 +1112,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
    }
 
    /*
-    * Similarly, if we previously locked some other partition's heap, and
-    * the name we're looking up no longer refers to that relation, release
-    * the now-useless lock.
+    * Similarly, if we previously locked some other partition's heap, and the
+    * name we're looking up no longer refers to that relation, release the
+    * now-useless lock.
     */
    if (relOid != oldRelOid && OidIsValid(state->partParentOid))
    {
@@ -2219,8 +2219,8 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
                    else
                        ereport(ERROR,
                                (errcode(ERRCODE_DUPLICATE_COLUMN),
-                                errmsg("column \"%s\" specified more than once",
-                                       coldef->colname)));
+                            errmsg("column \"%s\" specified more than once",
+                                   coldef->colname)));
                }
                prev = rest;
                rest = next;
@@ -4541,7 +4541,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
 
                    values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate,
                                                          econtext,
-                                                    &isnull[ex->attnum - 1]);
+                                                   &isnull[ex->attnum - 1]);
                }
 
                /*
@@ -5589,12 +5589,12 @@ static void
 ATPrepDropNotNull(Relation rel, bool recurse, bool recursing)
 {
    /*
-    * If the parent is a partitioned table, like check constraints, we do
-    * not support removing the NOT NULL while partitions exist.
+    * If the parent is a partitioned table, like check constraints, we do not
+    * support removing the NOT NULL while partitions exist.
     */
    if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
    {
-       PartitionDesc   partdesc = RelationGetPartitionDesc(rel);
+       PartitionDesc partdesc = RelationGetPartitionDesc(rel);
 
        Assert(partdesc != NULL);
        if (partdesc->nparts > 0 && !recurse && !recursing)
@@ -5639,8 +5639,8 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode)
    if (get_attidentity(RelationGetRelid(rel), attnum))
        ereport(ERROR,
                (errcode(ERRCODE_SYNTAX_ERROR),
-                errmsg("column \"%s\" of relation \"%s\" is an identity column",
-                       colName, RelationGetRelationName(rel))));
+            errmsg("column \"%s\" of relation \"%s\" is an identity column",
+                   colName, RelationGetRelationName(rel))));
 
    /*
     * Check that the attribute is not in a primary key
@@ -5768,7 +5768,7 @@ ATPrepSetNotNull(Relation rel, bool recurse, bool recursing)
     */
    if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
    {
-       PartitionDesc   partdesc = RelationGetPartitionDesc(rel);
+       PartitionDesc partdesc = RelationGetPartitionDesc(rel);
 
        if (partdesc && partdesc->nparts > 0 && !recurse && !recursing)
            ereport(ERROR,
@@ -5867,8 +5867,8 @@ ATExecColumnDefault(Relation rel, const char *colName,
    if (get_attidentity(RelationGetRelid(rel), attnum))
        ereport(ERROR,
                (errcode(ERRCODE_SYNTAX_ERROR),
-                errmsg("column \"%s\" of relation \"%s\" is an identity column",
-                       colName, RelationGetRelationName(rel)),
+            errmsg("column \"%s\" of relation \"%s\" is an identity column",
+                   colName, RelationGetRelationName(rel)),
                 newDefault ? 0 : errhint("Use ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY instead.")));
 
    /*
@@ -5959,8 +5959,8 @@ ATExecAddIdentity(Relation rel, const char *colName,
    if (attTup->atthasdef)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("column \"%s\" of relation \"%s\" already has a default value",
-                       colName, RelationGetRelationName(rel))));
+       errmsg("column \"%s\" of relation \"%s\" already has a default value",
+              colName, RelationGetRelationName(rel))));
 
    attTup->attidentity = cdef->identity;
    CatalogTupleUpdate(attrelation, &tuple->t_self, tuple);
@@ -5986,7 +5986,7 @@ static ObjectAddress
 ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmode)
 {
    ListCell   *option;
-   DefElem    *generatedEl = NULL;
+   DefElem    *generatedEl = NULL;
    HeapTuple   tuple;
    Form_pg_attribute attTup;
    AttrNumber  attnum;
@@ -5995,7 +5995,7 @@ ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmod
 
    foreach(option, castNode(List, def))
    {
-       DefElem    *defel = lfirst_node(DefElem, option);
+       DefElem    *defel = lfirst_node(DefElem, option);
 
        if (strcmp(defel->defname, "generated") == 0)
        {
@@ -6036,8 +6036,8 @@ ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmod
    if (!attTup->attidentity)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("column \"%s\" of relation \"%s\" is not an identity column",
-                       colName, RelationGetRelationName(rel))));
+        errmsg("column \"%s\" of relation \"%s\" is not an identity column",
+               colName, RelationGetRelationName(rel))));
 
    if (generatedEl)
    {
@@ -11137,7 +11137,7 @@ CreateInheritance(Relation child_rel, Relation parent_rel)
                             inhseqno + 1,
                             catalogRelation,
                             parent_rel->rd_rel->relkind ==
-                                           RELKIND_PARTITIONED_TABLE);
+                            RELKIND_PARTITIONED_TABLE);
 
    /* Now we're done with pg_inherits */
    heap_close(catalogRelation, RowExclusiveLock);
index 1566fb46074a16509859eaeb7a8c0481306cca19..0271788bf9908c66041daf97a14a0e3baf5660a5 100644 (file)
@@ -340,7 +340,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 
        foreach(lc, varList)
        {
-           TriggerTransition   *tt = lfirst_node(TriggerTransition, lc);
+           TriggerTransition *tt = lfirst_node(TriggerTransition, lc);
 
            if (!(tt->isTable))
                ereport(ERROR,
@@ -359,21 +359,21 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                         errmsg("\"%s\" is a partitioned table",
                                RelationGetRelationName(rel)),
-                    errdetail("Triggers on partitioned tables cannot have transition tables.")));
+                        errdetail("Triggers on partitioned tables cannot have transition tables.")));
 
            if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
                ereport(ERROR,
                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                         errmsg("\"%s\" is a foreign table",
                                RelationGetRelationName(rel)),
-                    errdetail("Triggers on foreign tables cannot have transition tables.")));
+                        errdetail("Triggers on foreign tables cannot have transition tables.")));
 
            if (rel->rd_rel->relkind == RELKIND_VIEW)
                ereport(ERROR,
                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                         errmsg("\"%s\" is a view",
                                RelationGetRelationName(rel)),
-                    errdetail("Triggers on views cannot have transition tables.")));
+                        errdetail("Triggers on views cannot have transition tables.")));
 
            if (stmt->timing != TRIGGER_TYPE_AFTER)
                ereport(ERROR,
@@ -396,7 +396,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                if (newtablename != NULL)
                    ereport(ERROR,
                            (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-                            errmsg("NEW TABLE cannot be specified multiple times")));
+                   errmsg("NEW TABLE cannot be specified multiple times")));
 
                newtablename = tt->name;
            }
@@ -411,7 +411,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                if (oldtablename != NULL)
                    ereport(ERROR,
                            (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-                            errmsg("OLD TABLE cannot be specified multiple times")));
+                   errmsg("OLD TABLE cannot be specified multiple times")));
 
                oldtablename = tt->name;
            }
@@ -421,7 +421,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
            strcmp(newtablename, oldtablename) == 0)
            ereport(ERROR,
                    (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-                    errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
+           errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
    }
 
    /*
@@ -782,12 +782,12 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 
    if (oldtablename)
        values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
-                                                 CStringGetDatum(oldtablename));
+                                             CStringGetDatum(oldtablename));
    else
        nulls[Anum_pg_trigger_tgoldtable - 1] = true;
    if (newtablename)
        values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
-                                                 CStringGetDatum(newtablename));
+                                             CStringGetDatum(newtablename));
    else
        nulls[Anum_pg_trigger_tgnewtable - 1] = true;
 
@@ -3412,7 +3412,8 @@ typedef struct AfterTriggersData
    AfterTriggerEventList events;       /* deferred-event list */
    int         query_depth;    /* current query list index */
    AfterTriggerEventList *query_stack; /* events pending from each query */
-   Tuplestorestate **fdw_tuplestores;  /* foreign tuples for one row from each query */
+   Tuplestorestate **fdw_tuplestores;  /* foreign tuples for one row from
+                                        * each query */
    Tuplestorestate **old_tuplestores;  /* all old tuples from each query */
    Tuplestorestate **new_tuplestores;  /* all new tuples from each query */
    int         maxquerydepth;  /* allocated len of above array */
@@ -3778,8 +3779,8 @@ AfterTriggerExecute(AfterTriggerEvent event,
        case AFTER_TRIGGER_FDW_FETCH:
            {
                Tuplestorestate *fdw_tuplestore =
-                   GetTriggerTransitionTuplestore
-                       (afterTriggers.fdw_tuplestores);
+               GetTriggerTransitionTuplestore
+               (afterTriggers.fdw_tuplestores);
 
                if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
                                             trig_tuple_slot1))
@@ -5130,7 +5131,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
            Assert(oldtup != NULL);
            old_tuplestore =
                GetTriggerTransitionTuplestore
-                   (afterTriggers.old_tuplestores);
+               (afterTriggers.old_tuplestores);
            tuplestore_puttuple(old_tuplestore, oldtup);
        }
        if ((event == TRIGGER_EVENT_INSERT &&
@@ -5143,14 +5144,14 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
            Assert(newtup != NULL);
            new_tuplestore =
                GetTriggerTransitionTuplestore
-                   (afterTriggers.new_tuplestores);
+               (afterTriggers.new_tuplestores);
            tuplestore_puttuple(new_tuplestore, newtup);
        }
 
        /* If transition tables are the only reason we're here, return. */
        if ((event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
-           (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
-           (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
+       (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
+        (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
            return;
    }
 
@@ -5253,7 +5254,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
            {
                fdw_tuplestore =
                    GetTriggerTransitionTuplestore
-                       (afterTriggers.fdw_tuplestores);
+                   (afterTriggers.fdw_tuplestores);
                new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
            }
            else
index b58d60c855d2a933ab9b231940ff4e3312b10bcc..dfb95a1ed35b5c55bec0d898bfc8125e675544a6 100644 (file)
@@ -1685,7 +1685,7 @@ deserialize_deflist(Datum txt)
                        *wsptr++ = '\0';
                        result = lappend(result,
                                         makeDefElem(pstrdup(workspace),
-                                                    (Node *) makeString(pstrdup(startvalue)), -1));
+                             (Node *) makeString(pstrdup(startvalue)), -1));
                        state = CS_WAITKEY;
                    }
                }
@@ -1717,7 +1717,7 @@ deserialize_deflist(Datum txt)
                        *wsptr++ = '\0';
                        result = lappend(result,
                                         makeDefElem(pstrdup(workspace),
-                                                    (Node *) makeString(pstrdup(startvalue)), -1));
+                             (Node *) makeString(pstrdup(startvalue)), -1));
                        state = CS_WAITKEY;
                    }
                }
@@ -1732,7 +1732,7 @@ deserialize_deflist(Datum txt)
                    *wsptr++ = '\0';
                    result = lappend(result,
                                     makeDefElem(pstrdup(workspace),
-                                                (Node *) makeString(pstrdup(startvalue)), -1));
+                             (Node *) makeString(pstrdup(startvalue)), -1));
                    state = CS_WAITKEY;
                }
                else
@@ -1751,7 +1751,7 @@ deserialize_deflist(Datum txt)
        *wsptr++ = '\0';
        result = lappend(result,
                         makeDefElem(pstrdup(workspace),
-                                    (Node *) makeString(pstrdup(startvalue)), -1));
+                             (Node *) makeString(pstrdup(startvalue)), -1));
    }
    else if (state != CS_WAITKEY)
        ereport(ERROR,
index 36d5f40f0626d572f81c39712dee49ea85af88bb..10d6ba9e04cb111ec784218885a37a691a88f96b 100644 (file)
@@ -1463,7 +1463,7 @@ AddRoleMems(const char *rolename, Oid roleid,
            ereport(ERROR,
                    (errcode(ERRCODE_INVALID_GRANT_OPERATION),
                     (errmsg("role \"%s\" is a member of role \"%s\"",
-                       rolename, get_rolespec_name(memberRole)))));
+                            rolename, get_rolespec_name(memberRole)))));
 
        /*
         * Check if entry for this role/member already exists; if so, give
@@ -1478,7 +1478,7 @@ AddRoleMems(const char *rolename, Oid roleid,
        {
            ereport(NOTICE,
                    (errmsg("role \"%s\" is already a member of role \"%s\"",
-                        get_rolespec_name(memberRole), rolename)));
+                           get_rolespec_name(memberRole), rolename)));
            ReleaseSysCache(authmem_tuple);
            continue;
        }
@@ -1587,7 +1587,7 @@ DelRoleMems(const char *rolename, Oid roleid,
        {
            ereport(WARNING,
                    (errmsg("role \"%s\" is not a member of role \"%s\"",
-                        get_rolespec_name(memberRole), rolename)));
+                           get_rolespec_name(memberRole), rolename)));
            continue;
        }
 
index 5b43a66bdc91a6bfe5def1eb7fffbf780060a340..56356de670d83d9cda2bf741a64dee66a74ada5c 100644 (file)
@@ -1337,7 +1337,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
     */
    initStringInfo(&buf);
    appendStringInfo(&buf,
-                    _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
+       _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
                     nkeep, OldestXmin);
    appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
                     nunused);
@@ -1912,8 +1912,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
        /* If we haven't prefetched this lot yet, do so now. */
        if (prefetchedUntil > blkno)
        {
-           BlockNumber prefetchStart;
-           BlockNumber pblkno;
+           BlockNumber prefetchStart;
+           BlockNumber pblkno;
 
            prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
            for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
index 996acae6e0c8e2528e5ffbce1071e2030b60b90f..a5d6574eaf37a5c370209821e95998c662c5a868 100644 (file)
@@ -469,11 +469,11 @@ DefineView(ViewStmt *stmt, const char *queryString,
    if (stmt->withCheckOption == LOCAL_CHECK_OPTION)
        stmt->options = lappend(stmt->options,
                                makeDefElem("check_option",
-                                           (Node *) makeString("local"), -1));
+                                         (Node *) makeString("local"), -1));
    else if (stmt->withCheckOption == CASCADED_CHECK_OPTION)
        stmt->options = lappend(stmt->options,
                                makeDefElem("check_option",
-                                           (Node *) makeString("cascaded"), -1));
+                                      (Node *) makeString("cascaded"), -1));
 
    /*
     * Check that the view is auto-updatable if WITH CHECK OPTION was
index 7e85c66da38b1f20bd5845ea502256621837de17..7337d21d7d2f5b5980d269e69d9c3123356c87f4 100644 (file)
@@ -413,12 +413,13 @@ ExecSupportsMarkRestore(Path *pathnode)
            return true;
 
        case T_CustomScan:
-       {
-           CustomPath *customPath = castNode(CustomPath, pathnode);
-           if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
-               return true;
-           return false;
-       }
+           {
+               CustomPath *customPath = castNode(CustomPath, pathnode);
+
+               if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
+                   return true;
+               return false;
+           }
        case T_Result:
 
            /*
index 4b1f634e2114ae03172b10e0f51aa78598e1ed13..07c8852fca8228c4e65e8ab4a3799fc7e90bd33f 100644 (file)
@@ -380,7 +380,7 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
    hashtable->in_hash_funcs = hashtable->tab_hash_funcs;
    hashtable->cur_eq_funcs = hashtable->tab_eq_funcs;
 
-   key = NULL; /* flag to reference inputslot */
+   key = NULL;                 /* flag to reference inputslot */
 
    if (isnew)
    {
index fb2ba3302c0a5c5a38ae3ad6f5ad7274a34d68ac..4a899f1eb567c74d2e8b73b2912b9b31b3c154d7 100644 (file)
@@ -868,7 +868,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
        estate->es_num_root_result_relations = 0;
        if (plannedstmt->nonleafResultRelations)
        {
-           int     num_roots = list_length(plannedstmt->rootResultRelations);
+           int         num_roots = list_length(plannedstmt->rootResultRelations);
 
            /*
             * Firstly, build ResultRelInfos for all the partitioned table
@@ -876,7 +876,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
             * triggers, if any.
             */
            resultRelInfos = (ResultRelInfo *)
-                                   palloc(num_roots * sizeof(ResultRelInfo));
+               palloc(num_roots * sizeof(ResultRelInfo));
            resultRelInfo = resultRelInfos;
            foreach(l, plannedstmt->rootResultRelations)
            {
@@ -900,7 +900,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
            /* Simply lock the rest of them. */
            foreach(l, plannedstmt->nonleafResultRelations)
            {
-               Index   resultRelIndex = lfirst_int(l);
+               Index       resultRelIndex = lfirst_int(l);
 
                /* We locked the roots above. */
                if (!list_member_int(plannedstmt->rootResultRelations,
@@ -1919,13 +1919,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
                if (resultRelInfo->ri_PartitionRoot)
                {
                    HeapTuple   tuple = ExecFetchSlotTuple(slot);
-                   TupleConversionMap  *map;
+                   TupleConversionMap *map;
 
                    rel = resultRelInfo->ri_PartitionRoot;
                    tupdesc = RelationGetDescr(rel);
                    /* a reverse map */
                    map = convert_tuples_by_name(orig_tupdesc, tupdesc,
-                               gettext_noop("could not convert row type"));
+                                gettext_noop("could not convert row type"));
                    if (map != NULL)
                    {
                        tuple = do_convert_tuple(tuple, map);
@@ -1966,13 +1966,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
            {
                HeapTuple   tuple = ExecFetchSlotTuple(slot);
                TupleDesc   old_tupdesc = RelationGetDescr(rel);
-               TupleConversionMap  *map;
+               TupleConversionMap *map;
 
                rel = resultRelInfo->ri_PartitionRoot;
                tupdesc = RelationGetDescr(rel);
                /* a reverse map */
                map = convert_tuples_by_name(old_tupdesc, tupdesc,
-                           gettext_noop("could not convert row type"));
+                                gettext_noop("could not convert row type"));
                if (map != NULL)
                {
                    tuple = do_convert_tuple(tuple, map);
@@ -2008,13 +2008,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
        {
            HeapTuple   tuple = ExecFetchSlotTuple(slot);
            TupleDesc   old_tupdesc = RelationGetDescr(rel);
-           TupleConversionMap  *map;
+           TupleConversionMap *map;
 
            rel = resultRelInfo->ri_PartitionRoot;
            tupdesc = RelationGetDescr(rel);
            /* a reverse map */
            map = convert_tuples_by_name(old_tupdesc, tupdesc,
-                       gettext_noop("could not convert row type"));
+                                gettext_noop("could not convert row type"));
            if (map != NULL)
            {
                tuple = do_convert_tuple(tuple, map);
@@ -3340,7 +3340,7 @@ ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd,
                (errcode(ERRCODE_CHECK_VIOLATION),
                 errmsg("no partition of relation \"%s\" found for row",
                        RelationGetRelationName(failed_rel)),
-           val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
+                val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
    }
 
    return result;
@@ -3359,8 +3359,8 @@ ExecBuildSlotPartitionKeyDescription(Relation rel,
                                     bool *isnull,
                                     int maxfieldlen)
 {
-   StringInfoData  buf;
-   PartitionKey    key = RelationGetPartitionKey(rel);
+   StringInfoData buf;
+   PartitionKey key = RelationGetPartitionKey(rel);
    int         partnatts = get_partition_natts(key);
    int         i;
    Oid         relid = RelationGetRelid(rel);
index 9c98f5492e81b9687b812a3021faf1c860850ee1..061018001602413b74e8267a3e5c6cf3ac971701 100644 (file)
@@ -608,9 +608,9 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
    /*
     * Also store the per-worker detail.
     *
-    * Worker instrumentation should be allocated in the same context as
-    * the regular instrumentation information, which is the per-query
-    * context. Switch into per-query memory context.
+    * Worker instrumentation should be allocated in the same context as the
+    * regular instrumentation information, which is the per-query context.
+    * Switch into per-query memory context.
     */
    oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
    ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
index 486ddf1762856a1f83a53927087e8a05d7920682..5469cde1e00c25f7e90c1322d602009bda6b3780 100644 (file)
@@ -259,7 +259,7 @@ ExecInitNode(Plan *node, EState *estate, int eflags)
 
        case T_NamedTuplestoreScan:
            result = (PlanState *) ExecInitNamedTuplestoreScan((NamedTuplestoreScan *) node,
-                                                  estate, eflags);
+                                                            estate, eflags);
            break;
 
        case T_WorkTableScan:
index 6af8018b71198d020785c89020c7c9d2a05c626f..c6a66b6195f2cc0e0ba87e49008c338e92d8ec59 100644 (file)
@@ -116,15 +116,15 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid,
                             TupleTableSlot *searchslot,
                             TupleTableSlot *outslot)
 {
-   HeapTuple       scantuple;
-   ScanKeyData     skey[INDEX_MAX_KEYS];
-   IndexScanDesc   scan;
-   SnapshotData    snap;
-   TransactionId   xwait;
-   Relation        idxrel;
-   bool            found;
-
-   /* Open the index.*/
+   HeapTuple   scantuple;
+   ScanKeyData skey[INDEX_MAX_KEYS];
+   IndexScanDesc scan;
+   SnapshotData snap;
+   TransactionId xwait;
+   Relation    idxrel;
+   bool        found;
+
+   /* Open the index. */
    idxrel = index_open(idxoid, RowExclusiveLock);
 
    /* Start an index scan. */
@@ -152,8 +152,8 @@ retry:
            snap.xmin : snap.xmax;
 
        /*
-        * If the tuple is locked, wait for locking transaction to finish
-        * and retry.
+        * If the tuple is locked, wait for locking transaction to finish and
+        * retry.
         */
        if (TransactionIdIsValid(xwait))
        {
@@ -165,7 +165,7 @@ retry:
    /* Found tuple, try to lock it in the lockmode. */
    if (found)
    {
-       Buffer buf;
+       Buffer      buf;
        HeapUpdateFailureData hufd;
        HTSU_Result res;
        HeapTupleData locktup;
@@ -177,7 +177,7 @@ retry:
        res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
                              lockmode,
                              LockWaitBlock,
-                             false /* don't follow updates */,
+                             false /* don't follow updates */ ,
                              &buf, &hufd);
        /* the tuple slot already has the buffer pinned */
        ReleaseBuffer(buf);
@@ -219,7 +219,7 @@ retry:
  * to use.
  */
 static bool
-tuple_equals_slot(TupleDesc    desc, HeapTuple tup, TupleTableSlot *slot)
+tuple_equals_slot(TupleDesc desc, HeapTuple tup, TupleTableSlot *slot)
 {
    Datum       values[MaxTupleAttributeNumber];
    bool        isnull[MaxTupleAttributeNumber];
@@ -267,12 +267,12 @@ bool
 RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode,
                         TupleTableSlot *searchslot, TupleTableSlot *outslot)
 {
-   HeapTuple       scantuple;
-   HeapScanDesc    scan;
-   SnapshotData    snap;
-   TransactionId   xwait;
-   bool            found;
-   TupleDesc       desc = RelationGetDescr(rel);
+   HeapTuple   scantuple;
+   HeapScanDesc scan;
+   SnapshotData snap;
+   TransactionId xwait;
+   bool        found;
+   TupleDesc   desc = RelationGetDescr(rel);
 
    Assert(equalTupleDescs(desc, outslot->tts_tupleDescriptor));
 
@@ -299,8 +299,8 @@ retry:
            snap.xmin : snap.xmax;
 
        /*
-        * If the tuple is locked, wait for locking transaction to finish
-        * and retry.
+        * If the tuple is locked, wait for locking transaction to finish and
+        * retry.
         */
        if (TransactionIdIsValid(xwait))
        {
@@ -312,7 +312,7 @@ retry:
    /* Found tuple, try to lock it in the lockmode. */
    if (found)
    {
-       Buffer buf;
+       Buffer      buf;
        HeapUpdateFailureData hufd;
        HTSU_Result res;
        HeapTupleData locktup;
@@ -324,7 +324,7 @@ retry:
        res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
                              lockmode,
                              LockWaitBlock,
-                             false /* don't follow updates */,
+                             false /* don't follow updates */ ,
                              &buf, &hufd);
        /* the tuple slot already has the buffer pinned */
        ReleaseBuffer(buf);
@@ -363,10 +363,10 @@ retry:
 void
 ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
 {
-   bool            skip_tuple = false;
-   HeapTuple       tuple;
-   ResultRelInfo  *resultRelInfo = estate->es_result_relation_info;
-   Relation        rel = resultRelInfo->ri_RelationDesc;
+   bool        skip_tuple = false;
+   HeapTuple   tuple;
+   ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
+   Relation    rel = resultRelInfo->ri_RelationDesc;
 
    /* For now we support only tables. */
    Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@@ -379,7 +379,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
    {
        slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
 
-       if (slot == NULL)   /* "do nothing" */
+       if (slot == NULL)       /* "do nothing" */
            skip_tuple = true;
    }
 
@@ -420,10 +420,10 @@ void
 ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
                         TupleTableSlot *searchslot, TupleTableSlot *slot)
 {
-   bool            skip_tuple = false;
-   HeapTuple       tuple;
-   ResultRelInfo  *resultRelInfo = estate->es_result_relation_info;
-   Relation        rel = resultRelInfo->ri_RelationDesc;
+   bool        skip_tuple = false;
+   HeapTuple   tuple;
+   ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
+   Relation    rel = resultRelInfo->ri_RelationDesc;
 
    /* For now we support only tables. */
    Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@@ -438,7 +438,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
                                    &searchslot->tts_tuple->t_self,
                                    NULL, slot);
 
-       if (slot == NULL)   /* "do nothing" */
+       if (slot == NULL)       /* "do nothing" */
            skip_tuple = true;
    }
 
@@ -482,9 +482,9 @@ void
 ExecSimpleRelationDelete(EState *estate, EPQState *epqstate,
                         TupleTableSlot *searchslot)
 {
-   bool            skip_tuple = false;
-   ResultRelInfo  *resultRelInfo = estate->es_result_relation_info;
-   Relation        rel = resultRelInfo->ri_RelationDesc;
+   bool        skip_tuple = false;
+   ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
+   Relation    rel = resultRelInfo->ri_RelationDesc;
 
    /* For now we support only tables. */
    Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@@ -568,6 +568,6 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
    if (relkind != RELKIND_RELATION)
        ereport(ERROR,
                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                errmsg("logical replication target relation \"%s.%s\" is not a table",
-                       nspname, relname)));
+       errmsg("logical replication target relation \"%s.%s\" is not a table",
+              nspname, relname)));
 }
index 08229bd6a727a4fd19519e793fc76b2794934086..cb2596cb317e293cb525c7a4f0d5fb7c0f87b2e1 100644 (file)
@@ -826,14 +826,14 @@ void
 ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate)
 {
    PlannedStmt *stmt = estate->es_plannedstmt;
-   ListCell    *lc;
+   ListCell   *lc;
 
    foreach(lc, partitioned_rels)
    {
        ListCell   *l;
-       Index   rti = lfirst_int(lc);
-       bool    is_result_rel = false;
-       Oid     relid = getrelid(rti, estate->es_range_table);
+       Index       rti = lfirst_int(lc);
+       bool        is_result_rel = false;
+       Oid         relid = getrelid(rti, estate->es_range_table);
 
        /* If this is a result relation, already locked in InitPlan */
        foreach(l, stmt->nonleafResultRelations)
index c2b861828d320aa54ffc4ce080fe749a193cbe7e..7eeda95af752b992de48733b80faff09590cece4 100644 (file)
@@ -471,7 +471,7 @@ typedef struct AggStatePerGroupData
     * NULL and not auto-replace it with a later input value. Only the first
     * non-NULL input will be auto-substituted.
     */
-} AggStatePerGroupData;
+}  AggStatePerGroupData;
 
 /*
  * AggStatePerPhaseData - per-grouping-set-phase state
@@ -515,7 +515,7 @@ typedef struct AggStatePerHashData
    AttrNumber *hashGrpColIdxInput;     /* hash col indices in input slot */
    AttrNumber *hashGrpColIdxHash;      /* indices in hashtbl tuples */
    Agg        *aggnode;        /* original Agg node, for numGroups etc. */
-} AggStatePerHashData;
+}  AggStatePerHashData;
 
 
 static void select_current_set(AggState *aggstate, int setno, bool is_hash);
index a107545b831a137feedbd6e3f4558043bbdf7f10..aae5e3fa63c9f6e574121bf76739b5764351b9c7 100644 (file)
@@ -129,8 +129,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags)
    Assert(!(eflags & EXEC_FLAG_MARK));
 
    /*
-    * Lock the non-leaf tables in the partition tree controlled by this
-    * node. It's a no-op for non-partitioned parent tables.
+    * Lock the non-leaf tables in the partition tree controlled by this node.
+    * It's a no-op for non-partitioned parent tables.
     */
    ExecLockNonLeafAppendTables(node->partitioned_rels, estate);
 
index d240f9c03e61f200c85fd6f5c869f23781425734..c453362230856a6aa2a547557d141edd7eb9f770 100644 (file)
@@ -506,8 +506,9 @@ BitmapAdjustPrefetchIterator(BitmapHeapScanState *node,
             * In case of shared mode, we can not ensure that the current
             * blockno of the main iterator and that of the prefetch iterator
             * are same.  It's possible that whatever blockno we are
-            * prefetching will be processed by another process.  Therefore, we
-            * don't validate the blockno here as we do in non-parallel case.
+            * prefetching will be processed by another process.  Therefore,
+            * we don't validate the blockno here as we do in non-parallel
+            * case.
             */
            if (prefetch_iterator)
                tbm_shared_iterate(prefetch_iterator);
index 1e5b1b7675c5a10d0f9fdb79f245be80be4ef1c2..c1db2e263bf4ddc61d7c8ada7241ff78efe7bc94 100644 (file)
@@ -225,7 +225,7 @@ ExecGather(GatherState *node)
 void
 ExecEndGather(GatherState *node)
 {
-   ExecEndNode(outerPlanState(node));      /* let children clean up first */
+   ExecEndNode(outerPlanState(node));  /* let children clean up first */
    ExecShutdownGather(node);
    ExecFreeExprContext(&node->ps);
    ExecClearTuple(node->ps.ps_ResultTupleSlot);
index 62c399e0b18620a397ac2f5c2ce0d68a8a9400a1..e066574836b687e8fdc2ae7a4d94a462e06c9286 100644 (file)
@@ -35,7 +35,7 @@ typedef struct GMReaderTupleBuffer
    int         readCounter;
    int         nTuples;
    bool        done;
-}  GMReaderTupleBuffer;
+} GMReaderTupleBuffer;
 
 /*
  * When we read tuples from workers, it's a good idea to read several at once
@@ -230,17 +230,17 @@ ExecGatherMerge(GatherMergeState *node)
    ResetExprContext(econtext);
 
    /*
-    * Get next tuple, either from one of our workers, or by running the
-    * plan ourselves.
+    * Get next tuple, either from one of our workers, or by running the plan
+    * ourselves.
     */
    slot = gather_merge_getnext(node);
    if (TupIsNull(slot))
        return NULL;
 
    /*
-    * form the result tuple using ExecProject(), and return it --- unless
-    * the projection produces an empty set, in which case we must loop
-    * back around for another tuple
+    * form the result tuple using ExecProject(), and return it --- unless the
+    * projection produces an empty set, in which case we must loop back
+    * around for another tuple
     */
    econtext->ecxt_outertuple = slot;
    return ExecProject(node->ps.ps_ProjInfo);
@@ -255,7 +255,7 @@ ExecGatherMerge(GatherMergeState *node)
 void
 ExecEndGatherMerge(GatherMergeState *node)
 {
-   ExecEndNode(outerPlanState(node));      /* let children clean up first */
+   ExecEndNode(outerPlanState(node));  /* let children clean up first */
    ExecShutdownGatherMerge(node);
    ExecFreeExprContext(&node->ps);
    ExecClearTuple(node->ps.ps_ResultTupleSlot);
@@ -534,8 +534,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
    HeapTuple   tup = NULL;
 
    /*
-    * If we're being asked to generate a tuple from the leader, then we
-    * just call ExecProcNode as normal to produce one.
+    * If we're being asked to generate a tuple from the leader, then we just
+    * call ExecProcNode as normal to produce one.
     */
    if (gm_state->nreaders == reader)
    {
@@ -582,8 +582,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
                                               &tuple_buffer->done));
 
        /*
-        * Attempt to read more tuples in nowait mode and store them in
-        * the tuple array.
+        * Attempt to read more tuples in nowait mode and store them in the
+        * tuple array.
         */
        if (HeapTupleIsValid(tup))
            form_tuple_array(gm_state, reader);
index 8a2e78266b10dcdda79b0ccb1ea4bee3e2cae114..fef83dbdbd314f276ef6b428127b82d96f212fc4 100644 (file)
@@ -72,8 +72,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
    Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
 
    /*
-    * Lock the non-leaf tables in the partition tree controlled by this
-    * node.  It's a no-op for non-partitioned parent tables.
+    * Lock the non-leaf tables in the partition tree controlled by this node.
+    * It's a no-op for non-partitioned parent tables.
     */
    ExecLockNonLeafAppendTables(node->partitioned_rels, estate);
 
index 652cd9759961dcb5d37a74f8594562392339f266..cf555fe78d91b38f39f8f38594074bf55c4b7d00 100644 (file)
@@ -1328,7 +1328,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
 static void
 fireBSTriggers(ModifyTableState *node)
 {
-   ResultRelInfo   *resultRelInfo = node->resultRelInfo;
+   ResultRelInfo *resultRelInfo = node->resultRelInfo;
 
    /*
     * If the node modifies a partitioned table, we must fire its triggers.
@@ -1364,7 +1364,7 @@ fireBSTriggers(ModifyTableState *node)
 static void
 fireASTriggers(ModifyTableState *node)
 {
-   ResultRelInfo   *resultRelInfo = node->resultRelInfo;
+   ResultRelInfo *resultRelInfo = node->resultRelInfo;
 
    /*
     * If the node modifies a partitioned table, we must fire its triggers.
@@ -1676,7 +1676,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
    /* If modifying a partitioned table, initialize the root table info */
    if (node->rootResultRelIndex >= 0)
        mtstate->rootResultRelInfo = estate->es_root_result_relations +
-                                               node->rootResultRelIndex;
+           node->rootResultRelIndex;
 
    mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
    mtstate->mt_nplans = nplans;
@@ -1753,12 +1753,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
    /* The root table RT index is at the head of the partitioned_rels list */
    if (node->partitioned_rels)
    {
-       Index   root_rti;
-       Oid     root_oid;
+       Index       root_rti;
+       Oid         root_oid;
 
        root_rti = linitial_int(node->partitioned_rels);
        root_oid = getrelid(root_rti, estate->es_range_table);
-       rel = heap_open(root_oid, NoLock);  /* locked by InitPlan */
+       rel = heap_open(root_oid, NoLock);      /* locked by InitPlan */
    }
    else
        rel = mtstate->resultRelInfo->ri_RelationDesc;
@@ -1815,15 +1815,15 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
    }
 
    /*
-    * Build WITH CHECK OPTION constraints for each leaf partition rel.
-    * Note that we didn't build the withCheckOptionList for each partition
-    * within the planner, but simple translation of the varattnos for each
-    * partition will suffice.  This only occurs for the INSERT case;
-    * UPDATE/DELETE cases are handled above.
+    * Build WITH CHECK OPTION constraints for each leaf partition rel. Note
+    * that we didn't build the withCheckOptionList for each partition within
+    * the planner, but simple translation of the varattnos for each partition
+    * will suffice.  This only occurs for the INSERT case; UPDATE/DELETE
+    * cases are handled above.
     */
    if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
    {
-       List        *wcoList;
+       List       *wcoList;
 
        Assert(operation == CMD_INSERT);
        resultRelInfo = mtstate->mt_partitions;
index 01048cc8268e0b59239e2a60163f90d7f8ba8327..2f0a4e647b9b33facb8bda09513c666e09e5cf0e 100644 (file)
@@ -120,7 +120,7 @@ ExecProjectSRF(ProjectSetState *node, bool continuing)
 {
    TupleTableSlot *resultSlot = node->ps.ps_ResultTupleSlot;
    ExprContext *econtext = node->ps.ps_ExprContext;
-   bool        hassrf PG_USED_FOR_ASSERTS_ONLY;
+   bool hassrf PG_USED_FOR_ASSERTS_ONLY;
    bool        hasresult;
    int         argno;
 
index 85b3f67b3333abb3e4a38b2f64d60e4c024b60f2..9ae53bb8a71305bba677cd6f93344425f4b853f2 100644 (file)
@@ -64,7 +64,7 @@ typedef struct SetOpStatePerGroupData
 {
    long        numLeft;        /* number of left-input dups in group */
    long        numRight;       /* number of right-input dups in group */
-} SetOpStatePerGroupData;
+}  SetOpStatePerGroupData;
 
 
 static TupleTableSlot *setop_retrieve_direct(SetOpState *setopstate);
index e9df48044e36c6ed61f10ee85c885fce5d6622c2..da557ceb6f15421e621e837eb2af90f1c5ad202a 100644 (file)
@@ -288,7 +288,7 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext)
    PG_TRY();
    {
        routine->InitOpaque(tstate,
-                           tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts);
+                   tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts);
 
        /*
         * If evaluating the document expression returns NULL, the table
@@ -343,7 +343,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc)
    int         colno;
    Datum       value;
    int         ordinalitycol =
-       ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
+   ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
 
    /*
     * Install the document as a possibly-toasted Datum into the tablefunc
@@ -443,8 +443,8 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
        ExecClearTuple(tstate->ss.ss_ScanTupleSlot);
 
        /*
-        * Obtain the value of each column for this row, installing them into the
-        * slot; then add the tuple to the tuplestore.
+        * Obtain the value of each column for this row, installing them into
+        * the slot; then add the tuple to the tuplestore.
         */
        for (colno = 0; colno < natts; colno++)
        {
@@ -456,12 +456,12 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
            }
            else
            {
-               bool    isnull;
+               bool        isnull;
 
                values[colno] = routine->GetValue(tstate,
                                                  colno,
-                                                 tupdesc->attrs[colno]->atttypid,
-                                                 tupdesc->attrs[colno]->atttypmod,
+                                            tupdesc->attrs[colno]->atttypid,
+                                           tupdesc->attrs[colno]->atttypmod,
                                                  &isnull);
 
                /* No value?  Evaluate and apply the default, if any */
@@ -479,7 +479,7 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
                    ereport(ERROR,
                            (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
                             errmsg("null is not allowed in column \"%s\"",
-                                   NameStr(tupdesc->attrs[colno]->attname))));
+                                 NameStr(tupdesc->attrs[colno]->attname))));
 
                nulls[colno] = isnull;
            }
index 35021e1839b4cbba04003ba0eb79901049cd6539..97c39258741f65cea88891f7b239070bcde6b854 100644 (file)
@@ -1230,7 +1230,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
    if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
    {
        if (list_length(stmt_list) == 1 &&
-           linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
+        linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
            linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL &&
            ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree))
            portal->cursorOptions |= CURSOR_OPT_SCROLL;
@@ -1246,7 +1246,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
    if (portal->cursorOptions & CURSOR_OPT_SCROLL)
    {
        if (list_length(stmt_list) == 1 &&
-           linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
+        linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
            linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL)
            ereport(ERROR,
                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -1990,8 +1990,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
                stmt_list = pg_analyze_and_rewrite_params(parsetree,
                                                          src,
                                                          plan->parserSetup,
-                                                      plan->parserSetupArg,
-                                                     _SPI_current->queryEnv);
+                                                       plan->parserSetupArg,
+                                                    _SPI_current->queryEnv);
            }
            else
            {
@@ -2668,7 +2668,7 @@ SPI_register_relation(EphemeralNamedRelation enr)
    if (enr == NULL || enr->md.name == NULL)
        return SPI_ERROR_ARGUMENT;
 
-   res = _SPI_begin_call(false);   /* keep current memory context */
+   res = _SPI_begin_call(false);       /* keep current memory context */
    if (res < 0)
        return res;
 
@@ -2702,7 +2702,7 @@ SPI_unregister_relation(const char *name)
    if (name == NULL)
        return SPI_ERROR_ARGUMENT;
 
-   res = _SPI_begin_call(false);   /* keep current memory context */
+   res = _SPI_begin_call(false);       /* keep current memory context */
    if (res < 0)
        return res;
 
@@ -2735,8 +2735,8 @@ SPI_register_trigger_data(TriggerData *tdata)
    if (tdata->tg_newtable)
    {
        EphemeralNamedRelation enr =
-           palloc(sizeof(EphemeralNamedRelationData));
-       int     rc;
+       palloc(sizeof(EphemeralNamedRelationData));
+       int         rc;
 
        enr->md.name = tdata->tg_trigger->tgnewtable;
        enr->md.reliddesc = tdata->tg_relation->rd_id;
@@ -2752,8 +2752,8 @@ SPI_register_trigger_data(TriggerData *tdata)
    if (tdata->tg_oldtable)
    {
        EphemeralNamedRelation enr =
-           palloc(sizeof(EphemeralNamedRelationData));
-       int     rc;
+       palloc(sizeof(EphemeralNamedRelationData));
+       int         rc;
 
        enr->md.name = tdata->tg_trigger->tgoldtable;
        enr->md.reliddesc = tdata->tg_relation->rd_id;
index b08e48b344ecb743aa07c55dce8f18999a7940f7..cdf8a73aa522d45ba151b1604a3b5d988382d8c1 100644 (file)
@@ -818,7 +818,7 @@ loop:
            if (current == NULL)
            {
                iter->is_over = true;
-               break;      /* end of iteration */
+               break;          /* end of iteration */
            }
            else if (came_from == current->right)
            {
index 6d3ff68607dcf8e06a4fbb0127917c1d97be4f3d..f36d7b9b6dae9b23c6cbf0e6ed2979a2cb9954e4 100644 (file)
@@ -757,10 +757,10 @@ CheckPWChallengeAuth(Port *port, char **logdetail)
     * If the user does not exist, or has no password or it's expired, we
     * still go through the motions of authentication, to avoid revealing to
     * the client that the user didn't exist.  If 'md5' is allowed, we choose
-    * whether to use 'md5' or 'scram-sha-256' authentication based on
-    * current password_encryption setting.  The idea is that most genuine
-    * users probably have a password of that type, and if we pretend that
-    * this user had a password of that type, too, it "blends in" best.
+    * whether to use 'md5' or 'scram-sha-256' authentication based on current
+    * password_encryption setting.  The idea is that most genuine users
+    * probably have a password of that type, and if we pretend that this user
+    * had a password of that type, too, it "blends in" best.
     */
    if (!shadow_pass)
        pwtype = Password_encryption;
@@ -770,8 +770,8 @@ CheckPWChallengeAuth(Port *port, char **logdetail)
    /*
     * If 'md5' authentication is allowed, decide whether to perform 'md5' or
     * 'scram-sha-256' authentication based on the type of password the user
-    * has.  If it's an MD5 hash, we must do MD5 authentication, and if it's
-    * SCRAM verifier, we must do SCRAM authentication.
+    * has.  If it's an MD5 hash, we must do MD5 authentication, and if it's a
+    * SCRAM verifier, we must do SCRAM authentication.
     *
     * If MD5 authentication is not allowed, always use SCRAM.  If the user
     * had an MD5 password, CheckSCRAMAuth() will fail.
index e7a6b04fb5a549355f9011c81047288c185d459f..0013ee38786d8928715bcdca9b64bbfe2f7db96b 100644 (file)
@@ -50,7 +50,7 @@ get_role_password(const char *role, char **logdetail)
    {
        *logdetail = psprintf(_("Role \"%s\" does not exist."),
                              role);
-       return NULL;    /* no such user */
+       return NULL;            /* no such user */
    }
 
    datum = SysCacheGetAttr(AUTHNAME, roleTup,
@@ -60,7 +60,7 @@ get_role_password(const char *role, char **logdetail)
        ReleaseSysCache(roleTup);
        *logdetail = psprintf(_("User \"%s\" has no password assigned."),
                              role);
-       return NULL;    /* user has no password */
+       return NULL;            /* user has no password */
    }
    shadow_pass = TextDatumGetCString(datum);
 
@@ -76,7 +76,7 @@ get_role_password(const char *role, char **logdetail)
        *logdetail = psprintf(_("User \"%s\" has an empty password."),
                              role);
        pfree(shadow_pass);
-       return NULL;    /* empty password */
+       return NULL;            /* empty password */
    }
 
    /*
@@ -122,8 +122,8 @@ encrypt_password(PasswordType target_type, const char *role,
    if (guessed_type != PASSWORD_TYPE_PLAINTEXT)
    {
        /*
-        * Cannot convert an already-encrypted password from one
-        * format to another, so return it as it is.
+        * Cannot convert an already-encrypted password from one format to
+        * another, so return it as it is.
         */
        return pstrdup(password);
    }
@@ -274,6 +274,7 @@ plain_crypt_verify(const char *role, const char *shadow_pass,
            break;
 
        case PASSWORD_TYPE_PLAINTEXT:
+
            /*
             * We never store passwords in plaintext, so this shouldn't
             * happen.
index 5561c399da4643dc3ac7e5e21b1f366f882d602c..823880ebff404c3b14853f671e22b185df73ff72 100644 (file)
@@ -617,7 +617,10 @@ check_db(const char *dbname, const char *role, Oid roleid, List *tokens)
        tok = lfirst(cell);
        if (am_walsender && !am_db_walsender)
        {
-           /* physical replication walsender connections can only match replication keyword */
+           /*
+            * physical replication walsender connections can only match
+            * replication keyword
+            */
            if (token_is_keyword(tok, "replication"))
                return true;
        }
@@ -1842,7 +1845,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
        int         ret;
        List       *parsed_servers;
        ListCell   *l;
-       char       *dupval = pstrdup(val);
+       char       *dupval = pstrdup(val);
 
        REQUIRE_AUTH_OPTION(uaRADIUS, "radiusservers", "radius");
 
@@ -1891,7 +1894,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
    {
        List       *parsed_ports;
        ListCell   *l;
-       char       *dupval = pstrdup(val);
+       char       *dupval = pstrdup(val);
 
        REQUIRE_AUTH_OPTION(uaRADIUS, "radiusports", "radius");
 
@@ -1926,7 +1929,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
    else if (strcmp(name, "radiussecrets") == 0)
    {
        List       *parsed_secrets;
-       char       *dupval = pstrdup(val);
+       char       *dupval = pstrdup(val);
 
        REQUIRE_AUTH_OPTION(uaRADIUS, "radiussecrets", "radius");
 
@@ -1948,7 +1951,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
    else if (strcmp(name, "radiusidentifiers") == 0)
    {
        List       *parsed_identifiers;
-       char       *dupval = pstrdup(val);
+       char       *dupval = pstrdup(val);
 
        REQUIRE_AUTH_OPTION(uaRADIUS, "radiusidentifiers", "radius");
 
index f7b205f195e6a5ee4423e7f65de8fdf9340fcc3f..d1cc38beb2b25d6e38417a30e0651db7673c2c34 100644 (file)
@@ -85,7 +85,7 @@
 #ifdef HAVE_UTIME_H
 #include 
 #endif
-#ifdef _MSC_VER        /* mstcpip.h is missing on mingw */
+#ifdef _MSC_VER                    /* mstcpip.h is missing on mingw */
 #include 
 #endif
 
index 6ad38443a03b9621a8d010b93e929e99a4f579cb..7811ad5d526b698cda749036c729adf672ede366 100644 (file)
@@ -373,7 +373,7 @@ _copyGather(const Gather *from)
 static GatherMerge *
 _copyGatherMerge(const GatherMerge *from)
 {
-   GatherMerge    *newnode = makeNode(GatherMerge);
+   GatherMerge *newnode = makeNode(GatherMerge);
 
    /*
     * copy node superclass fields
@@ -691,7 +691,7 @@ _copyCteScan(const CteScan *from)
 static NamedTuplestoreScan *
 _copyNamedTuplestoreScan(const NamedTuplestoreScan *from)
 {
-   NamedTuplestoreScan    *newnode = makeNode(NamedTuplestoreScan);
+   NamedTuplestoreScan *newnode = makeNode(NamedTuplestoreScan);
 
    /*
     * copy node superclass fields
index 3e8189ced36f6f785bedb6eaa2b81e2ecaf93364..95c1d3efbb57619a02bcc93e01dce1ce6fb18fd8 100644 (file)
@@ -1129,7 +1129,8 @@ exprSetCollation(Node *expr, Oid collation)
            Assert(!OidIsValid(collation));     /* result is always boolean */
            break;
        case T_NextValueExpr:
-           Assert(!OidIsValid(collation));     /* result is always an integer type */
+           Assert(!OidIsValid(collation));     /* result is always an integer
+                                                * type */
            break;
        default:
            elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr));
index 8d9ff63931c0a47916bd77f5bfcbf3d720218e17..4949d58864d5b5f7fa1f600dc3078b313ed94cb6 100644 (file)
@@ -468,7 +468,7 @@ _outGather(StringInfo str, const Gather *node)
 static void
 _outGatherMerge(StringInfo str, const GatherMerge *node)
 {
-   int     i;
+   int         i;
 
    WRITE_NODE_TYPE("GATHERMERGE");
 
index c66019e3ba11ee947296df17c1528ef3b2c35a81..bbd39a2ed933e54c2d14d809aae2805a7409cbf3 100644 (file)
@@ -109,7 +109,7 @@ typedef struct PagetableEntry
  */
 typedef struct PTEntryArray
 {
-   pg_atomic_uint32    refcount;       /* no. of iterator attached */
+   pg_atomic_uint32 refcount;  /* no. of iterator attached */
    PagetableEntry ptentry[FLEXIBLE_ARRAY_MEMBER];
 } PTEntryArray;
 
@@ -206,7 +206,7 @@ typedef struct TBMSharedIteratorState
  */
 typedef struct PTIterationArray
 {
-   pg_atomic_uint32            refcount;   /* no. of iterator attached */
+   pg_atomic_uint32 refcount;  /* no. of iterator attached */
    int         index[FLEXIBLE_ARRAY_MEMBER];   /* index array */
 } PTIterationArray;
 
@@ -905,8 +905,8 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
 
    /*
     * For every shared iterator, referring to pagetable and iterator array,
-    * increase the refcount by 1 so that while freeing the shared iterator
-    * we don't free pagetable and iterator array until its refcount becomes 0.
+    * increase the refcount by 1 so that while freeing the shared iterator we
+    * don't free pagetable and iterator array until its refcount becomes 0.
     */
    if (ptbase != NULL)
        pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
index b93b4fc77369a551636d418b9789bdbb553b0411..78ca55bbd6dc1049f624bb0c401fee823dab1168 100644 (file)
@@ -112,7 +112,7 @@ static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
 static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
                 RangeTblEntry *rte);
 static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
-                RangeTblEntry *rte);
+                            RangeTblEntry *rte);
 static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
                       RangeTblEntry *rte);
 static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
@@ -648,6 +648,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
            return;
 
        case RTE_NAMEDTUPLESTORE:
+
            /*
             * tuplestore cannot be shared, at least without more
             * infrastructure to support that.
@@ -1579,7 +1580,7 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
                                                            total_subpaths,
                                                            pathkeys,
                                                            NULL,
-                                                           partitioned_rels));
+                                                         partitioned_rels));
    }
 }
 
@@ -2220,10 +2221,10 @@ generate_gather_paths(PlannerInfo *root, RelOptInfo *rel)
     * For each useful ordering, we can consider an order-preserving Gather
     * Merge.
     */
-   foreach (lc, rel->partial_pathlist)
+   foreach(lc, rel->partial_pathlist)
    {
-       Path   *subpath = (Path *) lfirst(lc);
-       GatherMergePath   *path;
+       Path       *subpath = (Path *) lfirst(lc);
+       GatherMergePath *path;
 
        if (subpath->pathkeys == NIL)
            continue;
index 52643d0ad6117665bd2cbd6e8b145420690d9c0d..cdb18d978db4e96a90f5330dca723000a5426243 100644 (file)
@@ -664,8 +664,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
    {
        /*
         * For index only scans compute workers based on number of index pages
-        * fetched; the number of heap pages we fetch might be so small as
-        * to effectively rule out parallelism, which we don't want to do.
+        * fetched; the number of heap pages we fetch might be so small as to
+        * effectively rule out parallelism, which we don't want to do.
         */
        if (indexonly)
            rand_heap_pages = -1;
@@ -2188,7 +2188,7 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
    /* For partial paths, scale row estimate. */
    if (path->path.parallel_workers > 0)
    {
-       double  parallel_divisor = get_parallel_divisor(&path->path);
+       double      parallel_divisor = get_parallel_divisor(&path->path);
 
        path->path.rows =
            clamp_row_est(path->path.rows / parallel_divisor);
@@ -2624,7 +2624,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
    /* For partial paths, scale row estimate. */
    if (path->jpath.path.parallel_workers > 0)
    {
-       double  parallel_divisor = get_parallel_divisor(&path->jpath.path);
+       double      parallel_divisor = get_parallel_divisor(&path->jpath.path);
 
        path->jpath.path.rows =
            clamp_row_est(path->jpath.path.rows / parallel_divisor);
@@ -3029,7 +3029,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
    /* For partial paths, scale row estimate. */
    if (path->jpath.path.parallel_workers > 0)
    {
-       double  parallel_divisor = get_parallel_divisor(&path->jpath.path);
+       double      parallel_divisor = get_parallel_divisor(&path->jpath.path);
 
        path->jpath.path.rows =
            clamp_row_est(path->jpath.path.rows / parallel_divisor);
index 6e4bae854a3df5867b569f1f9470d67cc199b560..607a8f97bff5f61bdb8d89b2fe1cd9d93a6878d7 100644 (file)
@@ -1073,8 +1073,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
                                      true);
 
            /*
-            * if, after costing the path, we find that it's not worth
-            * using parallel workers, just free it.
+            * if, after costing the path, we find that it's not worth using
+            * parallel workers, just free it.
             */
            if (ipath->path.parallel_workers > 0)
                add_partial_path(rel, (Path *) ipath);
index 1c252c0ef55166f044f3bbea1c0db08ddf5b0595..94beeb858d8cddcd582ab7f61525bed5dcd3da07 100644 (file)
@@ -140,7 +140,7 @@ static TableFuncScan *create_tablefuncscan_plan(PlannerInfo *root, Path *best_pa
 static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
                    List *tlist, List *scan_clauses);
 static NamedTuplestoreScan *create_namedtuplestorescan_plan(PlannerInfo *root,
-                   Path *best_path, List *tlist, List *scan_clauses);
+                          Path *best_path, List *tlist, List *scan_clauses);
 static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
                          List *tlist, List *scan_clauses);
 static ForeignScan *create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
@@ -200,7 +200,7 @@ static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual,
 static CteScan *make_ctescan(List *qptlist, List *qpqual,
             Index scanrelid, int ctePlanId, int cteParam);
 static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
-            Index scanrelid, char *enrname);
+                        Index scanrelid, char *enrname);
 static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
                   Index scanrelid, int wtParam);
 static Append *make_append(List *appendplans, List *tlist, List *partitioned_rels);
@@ -4910,7 +4910,7 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
 
 /*
  * bitmap_subplan_mark_shared
- *   Set isshared flag in bitmap subplan so that it will be created in
+ *  Set isshared flag in bitmap subplan so that it will be created in
  *  shared memory.
  */
 static void
@@ -6425,7 +6425,7 @@ make_modifytable(PlannerInfo *root,
    node->partitioned_rels = partitioned_rels;
    node->resultRelations = resultRelations;
    node->resultRelIndex = -1;  /* will be set correctly in setrefs.c */
-   node->rootResultRelIndex = -1;  /* will be set correctly in setrefs.c */
+   node->rootResultRelIndex = -1;      /* will be set correctly in setrefs.c */
    node->plans = subplans;
    if (!onconflict)
    {
index c4a5651abd2afd1f91745cc389e925e4a6b8a38f..40cb79d4cd23ef90d0aa31dabf4ab52899932efb 100644 (file)
@@ -73,9 +73,9 @@ create_upper_paths_hook_type create_upper_paths_hook = NULL;
 #define EXPRKIND_QUAL              0
 #define EXPRKIND_TARGET                1
 #define EXPRKIND_RTFUNC                2
-#define EXPRKIND_RTFUNC_LATERAL    3
+#define EXPRKIND_RTFUNC_LATERAL        3
 #define EXPRKIND_VALUES                4
-#define EXPRKIND_VALUES_LATERAL    5
+#define EXPRKIND_VALUES_LATERAL        5
 #define EXPRKIND_LIMIT             6
 #define EXPRKIND_APPINFO           7
 #define EXPRKIND_PHV               8
@@ -1041,7 +1041,7 @@ inheritance_planner(PlannerInfo *root)
    ListCell   *lc;
    Index       rti;
    RangeTblEntry *parent_rte;
-   List          *partitioned_rels = NIL;
+   List       *partitioned_rels = NIL;
 
    Assert(parse->commandType != CMD_INSERT);
 
@@ -1102,10 +1102,10 @@ inheritance_planner(PlannerInfo *root)
    /*
     * If the parent RTE is a partitioned table, we should use that as the
     * nominal relation, because the RTEs added for partitioned tables
-    * (including the root parent) as child members of the inheritance set
-    * do not appear anywhere else in the plan.  The situation is exactly
-    * the opposite in the case of non-partitioned inheritance parent as
-    * described below.
+    * (including the root parent) as child members of the inheritance set do
+    * not appear anywhere else in the plan.  The situation is exactly the
+    * opposite in the case of non-partitioned inheritance parent as described
+    * below.
     */
    parent_rte = rt_fetch(parentRTindex, root->parse->rtable);
    if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE)
@@ -1278,9 +1278,9 @@ inheritance_planner(PlannerInfo *root)
         * is used elsewhere in the plan, so using the original parent RTE
         * would give rise to confusing use of multiple aliases in EXPLAIN
         * output for what the user will think is the "same" table.  OTOH,
-        * it's not a problem in the partitioned inheritance case, because
-        * the duplicate child RTE added for the parent does not appear
-        * anywhere else in the plan tree.
+        * it's not a problem in the partitioned inheritance case, because the
+        * duplicate child RTE added for the parent does not appear anywhere
+        * else in the plan tree.
         */
        if (nominalRelation < 0)
            nominalRelation = appinfo->child_relid;
@@ -3364,7 +3364,7 @@ get_number_of_groups(PlannerInfo *root,
            ListCell   *lc;
            ListCell   *lc2;
 
-           Assert(gd);  /* keep Coverity happy */
+           Assert(gd);         /* keep Coverity happy */
 
            dNumGroups = 0;
 
@@ -4336,8 +4336,8 @@ consider_groupingsets_paths(PlannerInfo *root,
            /*
             * We treat this as a knapsack problem: the knapsack capacity
             * represents work_mem, the item weights are the estimated memory
-            * usage of the hashtables needed to implement a single rollup, and
-            * we really ought to use the cost saving as the item value;
+            * usage of the hashtables needed to implement a single rollup,
+            * and we really ought to use the cost saving as the item value;
             * however, currently the costs assigned to sort nodes don't
             * reflect the comparison costs well, and so we treat all items as
             * of equal value (each rollup we hash instead saves us one sort).
@@ -6072,7 +6072,7 @@ get_partitioned_child_rels(PlannerInfo *root, Index rti)
 
    foreach(l, root->pcinfo_list)
    {
-       PartitionedChildRelInfo *pc = lfirst(l);
+       PartitionedChildRelInfo *pc = lfirst(l);
 
        if (pc->parent_relid == rti)
        {
index c192dc4f7009b0b3e2474431e4d809e0896eb45c..5cac171cb6e411cb5ab4deac14bcfefcb4ad29f7 100644 (file)
@@ -883,8 +883,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
                 * If the main target relation is a partitioned table, the
                 * following list contains the RT indexes of partitioned child
                 * relations including the root, which are not included in the
-                * above list.  We also keep RT indexes of the roots separately
-                * to be identitied as such during the executor initialization.
+                * above list.  We also keep RT indexes of the roots
+                * separately to be identitied as such during the executor
+                * initialization.
                 */
                if (splan->partitioned_rels != NIL)
                {
@@ -893,9 +894,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
                                    list_copy(splan->partitioned_rels));
                    /* Remember where this root will be in the global list. */
                    splan->rootResultRelIndex =
-                               list_length(root->glob->rootResultRelations);
+                       list_length(root->glob->rootResultRelations);
                    root->glob->rootResultRelations =
-                               lappend_int(root->glob->rootResultRelations,
+                       lappend_int(root->glob->rootResultRelations,
                                    linitial_int(splan->partitioned_rels));
                }
            }
index a1be8589015a5220bcd7d41a68ad2dee51c9819a..8b44fb96b08c6ca0c4d6ffaf002b7af6e8544465 100644 (file)
@@ -1555,9 +1555,10 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
            newrc->waitPolicy = oldrc->waitPolicy;
 
            /*
-            * We mark RowMarks for partitioned child tables as parent RowMarks
-            * so that the executor ignores them (except their existence means
-            * that the child tables be locked using appropriate mode).
+            * We mark RowMarks for partitioned child tables as parent
+            * RowMarks so that the executor ignores them (except their
+            * existence means that the child tables be locked using
+            * appropriate mode).
             */
            newrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE);
 
@@ -1593,8 +1594,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
     * parent RT index to the list of RT indexes of its partitioned child
     * tables.  When creating an Append or a ModifyTable path for the parent,
     * we copy the child RT index list verbatim to the path so that it could
-    * be carried over to the executor so that the latter could identify
-    * the partitioned child tables.
+    * be carried over to the executor so that the latter could identify the
+    * partitioned child tables.
     */
    if (partitioned_child_rels != NIL)
    {
index 2d5caae9a96979cbfbe99bcaf2d62c9e7edccb57..46778aaefd3c983b944290d2d630db10b329accf 100644 (file)
@@ -1642,8 +1642,8 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
                         Relids required_outer, double *rows)
 {
    GatherMergePath *pathnode = makeNode(GatherMergePath);
-   Cost             input_startup_cost = 0;
-   Cost             input_total_cost = 0;
+   Cost        input_startup_cost = 0;
+   Cost        input_total_cost = 0;
 
    Assert(subpath->parallel_safe);
    Assert(pathkeys);
@@ -1669,7 +1669,7 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
    else
    {
        /* We'll need to insert a Sort node, so include cost for that */
-       Path        sort_path;      /* dummy for result of cost_sort */
+       Path        sort_path;  /* dummy for result of cost_sort */
 
        cost_sort(&sort_path,
                  root,
index 2a5ec181deac5a61382541430c97a79c60cb09e0..8f9dd9099b0c4ea16a46a7c4ba44eb1cf7cebb5c 100644 (file)
@@ -1149,7 +1149,7 @@ get_relation_constraints(PlannerInfo *root,
    Index       varno = rel->relid;
    Relation    relation;
    TupleConstr *constr;
-   List        *pcqual;
+   List       *pcqual;
 
    /*
     * We assume the relation has already been safely locked.
index 342d88400314956d74dac182d92393af3c841e5b..76a3868fa079305a4ec7407536d762895449c889 100644 (file)
@@ -149,9 +149,9 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
 
    /*
     * Pass top parent's relids down the inheritance hierarchy. If the parent
-    * has top_parent_relids set, it's a direct or an indirect child of the top
-    * parent indicated by top_parent_relids. By extension this child is also
-    * an indirect child of that parent.
+    * has top_parent_relids set, it's a direct or an indirect child of the
+    * top parent indicated by top_parent_relids. By extension this child is
+    * also an indirect child of that parent.
     */
    if (parent)
    {
index 567dd54c6c64394c9b165a579af11f0471dbe643..86482eba26ee894cda3edb665190470b240456e8 100644 (file)
@@ -1637,7 +1637,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
     * Recursively transform the components of the tree.
     */
    sostmt = castNode(SetOperationStmt,
-                     transformSetOperationTree(pstate, stmt, true, NULL));
+                     transformSetOperationTree(pstate, stmt, true, NULL));
    Assert(sostmt);
    qry->setOperations = (Node *) sostmt;
 
@@ -2809,8 +2809,8 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
                                    (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                            /*------
                              translator: %s is a SQL row locking clause such as FOR UPDATE */
-                              errmsg("%s cannot be applied to a named tuplestore",
-                                     LCS_asString(lc->strength)),
+                                    errmsg("%s cannot be applied to a named tuplestore",
+                                           LCS_asString(lc->strength)),
                             parser_errposition(pstate, thisrel->location)));
                            break;
                        default:
index e268a127d130644fefe342c29acabedfa4b5648c..27dd49d3019c6a1b66bd2fefb49c8ac17e396b91 100644 (file)
@@ -60,7 +60,7 @@ static Node *transformJoinUsingClause(ParseState *pstate,
 static Node *transformJoinOnClause(ParseState *pstate, JoinExpr *j,
                      List *namespace);
 static RangeTblEntry *getRTEForSpecialRelationTypes(ParseState *pstate,
-                       RangeVar *rv);
+                             RangeVar *rv);
 static RangeTblEntry *transformTableEntry(ParseState *pstate, RangeVar *r);
 static RangeTblEntry *transformCTEReference(ParseState *pstate, RangeVar *r,
                      CommonTableExpr *cte, Index levelsup);
@@ -70,7 +70,7 @@ static RangeTblEntry *transformRangeSubselect(ParseState *pstate,
 static RangeTblEntry *transformRangeFunction(ParseState *pstate,
                       RangeFunction *r);
 static RangeTblEntry *transformRangeTableFunc(ParseState *pstate,
-                      RangeTableFunc *t);
+                       RangeTableFunc *t);
 static TableSampleClause *transformRangeTableSample(ParseState *pstate,
                          RangeTableSample *rts);
 static Node *transformFromClauseItem(ParseState *pstate, Node *n,
@@ -359,7 +359,7 @@ transformJoinUsingClause(ParseState *pstate,
 
        /* Now create the lvar = rvar join condition */
        e = makeSimpleA_Expr(AEXPR_OP, "=",
-                            (Node *) copyObject(lvar), (Node *) copyObject(rvar),
+                       (Node *) copyObject(lvar), (Node *) copyObject(rvar),
                             -1);
 
        /* Prepare to combine into an AND clause, if multiple join columns */
@@ -759,7 +759,7 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
    /* Transform and apply typecast to the row-generating expression ... */
    Assert(rtf->rowexpr != NULL);
    tf->rowexpr = coerce_to_specific_type(pstate,
-                                         transformExpr(pstate, rtf->rowexpr, EXPR_KIND_FROM_FUNCTION),
+               transformExpr(pstate, rtf->rowexpr, EXPR_KIND_FROM_FUNCTION),
                                          TEXTOID,
                                          constructName);
    assign_expr_collations(pstate, tf->rowexpr);
@@ -767,7 +767,7 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
    /* ... and to the document itself */
    Assert(rtf->docexpr != NULL);
    tf->docexpr = coerce_to_specific_type(pstate,
-                                         transformExpr(pstate, rtf->docexpr, EXPR_KIND_FROM_FUNCTION),
+               transformExpr(pstate, rtf->docexpr, EXPR_KIND_FROM_FUNCTION),
                                          docType,
                                          constructName);
    assign_expr_collations(pstate, tf->docexpr);
@@ -792,9 +792,8 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
                               makeString(pstrdup(rawc->colname)));
 
        /*
-        * Determine the type and typmod for the new column. FOR
-        * ORDINALITY columns are INTEGER per spec; the others are
-        * user-specified.
+        * Determine the type and typmod for the new column. FOR ORDINALITY
+        * columns are INTEGER per spec; the others are user-specified.
         */
        if (rawc->for_ordinality)
        {
@@ -824,14 +823,14 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
        tf->coltypes = lappend_oid(tf->coltypes, typid);
        tf->coltypmods = lappend_int(tf->coltypmods, typmod);
        tf->colcollations = lappend_oid(tf->colcollations,
-                                       type_is_collatable(typid) ? DEFAULT_COLLATION_OID : InvalidOid);
+            type_is_collatable(typid) ? DEFAULT_COLLATION_OID : InvalidOid);
 
        /* Transform the PATH and DEFAULT expressions */
        if (rawc->colexpr)
        {
            colexpr = coerce_to_specific_type(pstate,
-                                             transformExpr(pstate, rawc->colexpr,
-                                                           EXPR_KIND_FROM_FUNCTION),
+                                        transformExpr(pstate, rawc->colexpr,
+                                                   EXPR_KIND_FROM_FUNCTION),
                                              TEXTOID,
                                              constructName);
            assign_expr_collations(pstate, colexpr);
@@ -842,8 +841,8 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
        if (rawc->coldefexpr)
        {
            coldefexpr = coerce_to_specific_type_typmod(pstate,
-                                                       transformExpr(pstate, rawc->coldefexpr,
-                                                                     EXPR_KIND_FROM_FUNCTION),
+                                     transformExpr(pstate, rawc->coldefexpr,
+                                                   EXPR_KIND_FROM_FUNCTION),
                                                        typid, typmod,
                                                        constructName);
            assign_expr_collations(pstate, coldefexpr);
@@ -1050,7 +1049,6 @@ transformRangeTableSample(ParseState *pstate, RangeTableSample *rts)
 static RangeTblEntry *
 getRTEForSpecialRelationTypes(ParseState *pstate, RangeVar *rv)
 {
-
    CommonTableExpr *cte;
    Index       levelsup;
    RangeTblEntry *rte = NULL;
index 4f9b1a76b0e7fa3eac64f5d21a3ca586df32e0e6..92101c9103d3080796f51ff3a6eba9e92cae472d 100644 (file)
@@ -1255,7 +1255,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
            /* ROW() op ROW() is handled specially */
            cmp = make_row_comparison_op(pstate,
                                         a->name,
-                                        copyObject(((RowExpr *) lexpr)->args),
+                                      copyObject(((RowExpr *) lexpr)->args),
                                         ((RowExpr *) rexpr)->args,
                                         a->location);
        }
index 40451f3fef2e4f561df1e8e40db1ce324185f7b8..e412d0f9d30b8779594b9543bf194bee3472148d 100644 (file)
@@ -1164,6 +1164,7 @@ parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockmode)
             */
            if (get_visible_ENR_metadata(pstate->p_queryEnv, relation->relname))
                rel = NULL;
+
            /*
             * An unqualified name might have been meant as a reference to
             * some not-yet-in-scope CTE.  The bare "does not exist" message
@@ -2002,7 +2003,7 @@ addRangeTableEntryForENR(ParseState *pstate,
 
        default:
            elog(ERROR, "unexpected enrtype: %d", enrmd->enrtype);
-           return NULL;  /* for fussy compilers */
+           return NULL;        /* for fussy compilers */
    }
 
    /*
index 882955bb1c98b0284387d8eb498724c9b1b8807e..beb099569bae989956be7e69877b718f1b27a3c3 100644 (file)
@@ -363,7 +363,7 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
                         char **snamespace_p, char **sname_p)
 {
    ListCell   *option;
-   DefElem    *nameEl = NULL;
+   DefElem    *nameEl = NULL;
    Oid         snamespaceid;
    char       *snamespace;
    char       *sname;
@@ -378,12 +378,12 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
     * used by pg_dump.  Else, generate a name.
     *
     * Although we use ChooseRelationName, it's not guaranteed that the
-    * selected sequence name won't conflict; given sufficiently long
-    * field names, two different serial columns in the same table could
-    * be assigned the same sequence name, and we'd not notice since we
-    * aren't creating the sequence quite yet.  In practice this seems
-    * quite unlikely to be a problem, especially since few people would
-    * need two serial columns in one table.
+    * selected sequence name won't conflict; given sufficiently long field
+    * names, two different serial columns in the same table could be assigned
+    * the same sequence name, and we'd not notice since we aren't creating
+    * the sequence quite yet.  In practice this seems quite unlikely to be a
+    * problem, especially since few people would need two serial columns in
+    * one table.
     */
 
    foreach(option, seqoptions)
@@ -402,7 +402,8 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
 
    if (nameEl)
    {
-       RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg));
+       RangeVar   *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg));
+
        snamespace = rv->schemaname;
        sname = rv->relname;
        seqoptions = list_delete_ptr(seqoptions, nameEl);
@@ -429,14 +430,14 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
                    cxt->relation->relname, column->colname)));
 
    /*
-    * Build a CREATE SEQUENCE command to create the sequence object, and
-    * add it to the list of things to be done before this CREATE/ALTER
-    * TABLE.
+    * Build a CREATE SEQUENCE command to create the sequence object, and add
+    * it to the list of things to be done before this CREATE/ALTER TABLE.
     */
    seqstmt = makeNode(CreateSeqStmt);
    seqstmt->for_identity = for_identity;
    seqstmt->sequence = makeRangeVar(snamespace, sname, -1);
    seqstmt->options = seqoptions;
+
    /*
     * If a sequence data type was specified, add it to the options.  Prepend
     * to the list rather than append; in case a user supplied their own AS
@@ -448,11 +449,11 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
                                 seqstmt->options);
 
    /*
-    * If this is ALTER ADD COLUMN, make sure the sequence will be owned
-    * by the table's owner.  The current user might be someone else
-    * (perhaps a superuser, or someone who's only a member of the owning
-    * role), but the SEQUENCE OWNED BY mechanisms will bleat unless table
-    * and sequence have exactly the same owning role.
+    * If this is ALTER ADD COLUMN, make sure the sequence will be owned by
+    * the table's owner.  The current user might be someone else (perhaps a
+    * superuser, or someone who's only a member of the owning role), but the
+    * SEQUENCE OWNED BY mechanisms will bleat unless table and sequence have
+    * exactly the same owning role.
     */
    if (cxt->rel)
        seqstmt->ownerId = cxt->rel->rd_rel->relowner;
@@ -462,9 +463,9 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
    cxt->blist = lappend(cxt->blist, seqstmt);
 
    /*
-    * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence
-    * as owned by this column, and add it to the list of things to be
-    * done after this CREATE/ALTER TABLE.
+    * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence as
+    * owned by this column, and add it to the list of things to be done after
+    * this CREATE/ALTER TABLE.
     */
    altseqstmt = makeNode(AlterSeqStmt);
    altseqstmt->sequence = makeRangeVar(snamespace, sname, -1);
@@ -647,31 +648,31 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
                break;
 
            case CONSTR_IDENTITY:
-           {
-               Type        ctype;
-               Oid         typeOid;
+               {
+                   Type        ctype;
+                   Oid         typeOid;
 
-               ctype = typenameType(cxt->pstate, column->typeName, NULL);
-               typeOid = HeapTupleGetOid(ctype);
-               ReleaseSysCache(ctype);
+                   ctype = typenameType(cxt->pstate, column->typeName, NULL);
+                   typeOid = HeapTupleGetOid(ctype);
+                   ReleaseSysCache(ctype);
 
-               if (saw_identity)
-                   ereport(ERROR,
-                           (errcode(ERRCODE_SYNTAX_ERROR),
-                            errmsg("multiple identity specifications for column \"%s\" of table \"%s\"",
+                   if (saw_identity)
+                       ereport(ERROR,
+                               (errcode(ERRCODE_SYNTAX_ERROR),
+                                errmsg("multiple identity specifications for column \"%s\" of table \"%s\"",
                                    column->colname, cxt->relation->relname),
-                            parser_errposition(cxt->pstate,
-                                               constraint->location)));
+                                parser_errposition(cxt->pstate,
+                                                   constraint->location)));
 
-               generateSerialExtraStmts(cxt, column,
-                                        typeOid, constraint->options, true,
-                                        NULL, NULL);
+                   generateSerialExtraStmts(cxt, column,
+                                         typeOid, constraint->options, true,
+                                            NULL, NULL);
 
-               column->identity = constraint->generated_when;
-               saw_identity = true;
-               column->is_not_null = TRUE;
-               break;
-           }
+                   column->identity = constraint->generated_when;
+                   saw_identity = true;
+                   column->is_not_null = TRUE;
+                   break;
+               }
 
            case CONSTR_CHECK:
                cxt->ckconstraints = lappend(cxt->ckconstraints, constraint);
@@ -1036,7 +1037,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
        if (attribute->attidentity &&
            (table_like_clause->options & CREATE_TABLE_LIKE_IDENTITY))
        {
-           Oid         seq_relid;
+           Oid         seq_relid;
            List       *seq_options;
 
            /*
@@ -1067,7 +1068,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
 
            stmt->objtype = OBJECT_COLUMN;
            stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname),
-                                              makeString(cxt->relation->relname),
+                                         makeString(cxt->relation->relname),
                                               makeString(def->colname));
            stmt->comment = comment;
 
@@ -1132,7 +1133,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
 
                stmt->objtype = OBJECT_TABCONSTRAINT;
                stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname),
-                                                  makeString(cxt->relation->relname),
+                                         makeString(cxt->relation->relname),
                                                   makeString(n->conname));
                stmt->comment = comment;
 
@@ -2766,7 +2767,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                     * change the data type of the sequence.
                     */
                    attnum = get_attnum(relid, cmd->name);
-                   /* if attribute not found, something will error about it later */
+
+                   /*
+                    * if attribute not found, something will error about it
+                    * later
+                    */
                    if (attnum != InvalidAttrNumber && get_attidentity(relid, attnum))
                    {
                        Oid         seq_relid = getOwnedSequence(relid, attnum);
@@ -2774,7 +2779,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                        AlterSeqStmt *altseqstmt = makeNode(AlterSeqStmt);
 
                        altseqstmt->sequence = makeRangeVar(get_namespace_name(get_rel_namespace(seq_relid)),
-                                                           get_rel_name(seq_relid),
+                                                    get_rel_name(seq_relid),
                                                            -1);
                        altseqstmt->options = list_make1(makeDefElem("as", (Node *) makeTypeNameFromOid(typeOid, -1), -1));
                        altseqstmt->for_identity = true;
@@ -2787,8 +2792,8 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
 
            case AT_AddIdentity:
                {
-                   Constraint  *def = castNode(Constraint, cmd->def);
-                   ColumnDef *newdef = makeNode(ColumnDef);
+                   Constraint *def = castNode(Constraint, cmd->def);
+                   ColumnDef  *newdef = makeNode(ColumnDef);
                    AttrNumber  attnum;
 
                    newdef->colname = cmd->name;
@@ -2796,7 +2801,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                    cmd->def = (Node *) newdef;
 
                    attnum = get_attnum(relid, cmd->name);
-                   /* if attribute not found, something will error about it later */
+
+                   /*
+                    * if attribute not found, something will error about it
+                    * later
+                    */
                    if (attnum != InvalidAttrNumber)
                        generateSerialExtraStmts(&cxt, newdef,
                                                 get_atttype(relid, attnum),
@@ -2825,7 +2834,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                     */
                    foreach(lc, castNode(List, cmd->def))
                    {
-                       DefElem    *def = lfirst_node(DefElem, lc);
+                       DefElem    *def = lfirst_node(DefElem, lc);
 
                        if (strcmp(def->defname, "generated") == 0)
                            newdef = lappend(newdef, def);
@@ -2846,7 +2855,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                            seqstmt = makeNode(AlterSeqStmt);
                            seq_relid = linitial_oid(seqlist);
                            seqstmt->sequence = makeRangeVar(get_namespace_name(get_rel_namespace(seq_relid)),
-                                                            get_rel_name(seq_relid), -1);
+                                               get_rel_name(seq_relid), -1);
                            seqstmt->options = newseqopts;
                            seqstmt->for_identity = true;
                            seqstmt->missing_ok = false;
@@ -2854,8 +2863,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                            cxt.alist = lappend(cxt.alist, seqstmt);
                        }
                    }
-                   /* If column was not found or was not an identity column, we
-                    * just let the ALTER TABLE command error out later. */
+
+                   /*
+                    * If column was not found or was not an identity column,
+                    * we just let the ALTER TABLE command error out later.
+                    */
 
                    cmd->def = (Node *) newdef;
                    newcmds = lappend(newcmds, cmd);
@@ -3392,8 +3404,8 @@ transformPartitionBound(ParseState *pstate, Relation parent, Node *bound)
            else if (seen_unbounded)
                ereport(ERROR,
                        (errcode(ERRCODE_DATATYPE_MISMATCH),
-                        errmsg("cannot specify finite value after UNBOUNDED"),
-                        parser_errposition(pstate, exprLocation((Node *) ldatum))));
+                      errmsg("cannot specify finite value after UNBOUNDED"),
+                parser_errposition(pstate, exprLocation((Node *) ldatum))));
        }
        seen_unbounded = false;
        foreach(cell1, spec->upperdatums)
@@ -3406,8 +3418,8 @@ transformPartitionBound(ParseState *pstate, Relation parent, Node *bound)
            else if (seen_unbounded)
                ereport(ERROR,
                        (errcode(ERRCODE_DATATYPE_MISMATCH),
-                        errmsg("cannot specify finite value after UNBOUNDED"),
-                        parser_errposition(pstate, exprLocation((Node *) rdatum))));
+                      errmsg("cannot specify finite value after UNBOUNDED"),
+                parser_errposition(pstate, exprLocation((Node *) rdatum))));
        }
 
        i = j = 0;
index 6e9e03a5718e8844ffc8679a739a93fa53439ba1..f251ac6788dd5374245c2c295f566108cefe8d1e 100644 (file)
@@ -125,7 +125,7 @@ PosixSemaphoreCreate(void)
  * Attempt to create a new unnamed semaphore.
  */
 static void
-PosixSemaphoreCreate(sem_t * sem)
+PosixSemaphoreCreate(sem_t *sem)
 {
    if (sem_init(sem, 1, 1) < 0)
        elog(FATAL, "sem_init failed: %m");
@@ -137,7 +137,7 @@ PosixSemaphoreCreate(sem_t * sem)
  * PosixSemaphoreKill  - removes a semaphore
  */
 static void
-PosixSemaphoreKill(sem_t * sem)
+PosixSemaphoreKill(sem_t *sem)
 {
 #ifdef USE_NAMED_POSIX_SEMAPHORES
    /* Got to use sem_close for named semaphores */
index f1194891f5091f7735a2e3f44685d096b2776cf2..c3454276bfa406dcc2a47d9737bccd1c643e625e 100644 (file)
@@ -458,7 +458,7 @@ ReportBackgroundWorkerExit(slist_mutable_iter *cur)
 {
    RegisteredBgWorker *rw;
    BackgroundWorkerSlot *slot;
-   int     notify_pid;
+   int         notify_pid;
 
    rw = slist_container(RegisteredBgWorker, rw_lnode, cur->cur);
 
index 48efe15e8253386ff9e7bf714581fd0ab46aa911..2674bb49ba8350e2619b3ba8b4a15724fb1e29c2 100644 (file)
@@ -310,8 +310,8 @@ BackgroundWriterMain(void)
         * check whether there has been any WAL inserted since the last time
         * we've logged a running xacts.
         *
-        * We do this logging in the bgwriter as it is the only process that is
-        * run regularly and returns to its mainloop all the time. E.g.
+        * We do this logging in the bgwriter as it is the only process that
+        * is run regularly and returns to its mainloop all the time. E.g.
         * Checkpointer, when active, is barely ever in its mainloop and thus
         * makes it hard to log regularly.
         */
@@ -350,7 +350,7 @@ BackgroundWriterMain(void)
         */
        rc = WaitLatch(MyLatch,
                       WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
-                      BgWriterDelay /* ms */, WAIT_EVENT_BGWRITER_MAIN);
+                      BgWriterDelay /* ms */ , WAIT_EVENT_BGWRITER_MAIN);
 
        /*
         * If no latch event and BgBufferSync says nothing's happening, extend
index a8dc355eada90b2d9605874eefbefec19c646c0c..a55071900d8467f1323c7e38318c94dd2fe06053 100644 (file)
@@ -558,7 +558,7 @@ CheckpointerMain(void)
 
        rc = WaitLatch(MyLatch,
                       WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
-                      cur_timeout * 1000L /* convert to ms */,
+                      cur_timeout * 1000L /* convert to ms */ ,
                       WAIT_EVENT_CHECKPOINTER_MAIN);
 
        /*
index ba0ad3eb03aa470620122cea93105e350c711988..f453dade6c63c77ff7a5f69709a440971fad169b 100644 (file)
@@ -181,8 +181,8 @@ static TabStatusArray *pgStatTabList = NULL;
  */
 typedef struct TabStatHashEntry
 {
-   Oid t_id;
-   PgStat_TableStatustsa_entry;
+   Oid         t_id;
+   PgStat_TableStatus *tsa_entry;
 } TabStatHashEntry;
 
 /*
@@ -1748,17 +1748,17 @@ pgstat_initstats(Relation rel)
 static PgStat_TableStatus *
 get_tabstat_entry(Oid rel_id, bool isshared)
 {
-   TabStatHashEntryhash_entry;
+   TabStatHashEntry *hash_entry;
    PgStat_TableStatus *entry;
    TabStatusArray *tsa;
-   bool found;
+   bool        found;
 
    /*
     * Create hash table if we don't have it already.
     */
    if (pgStatTabHash == NULL)
    {
-       HASHCTL         ctl;
+       HASHCTL     ctl;
 
        memset(&ctl, 0, sizeof(ctl));
        ctl.keysize = sizeof(Oid);
@@ -1837,14 +1837,14 @@ get_tabstat_entry(Oid rel_id, bool isshared)
 PgStat_TableStatus *
 find_tabstat_entry(Oid rel_id)
 {
-   TabStatHashEntryhash_entry;
+   TabStatHashEntry *hash_entry;
 
    /* If hashtable doesn't exist, there are no entries at all */
-   if(!pgStatTabHash)
+   if (!pgStatTabHash)
        return NULL;
 
    hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL);
-   if(!hash_entry)
+   if (!hash_entry)
        return NULL;
 
    /* Note that this step could also return NULL, but that's correct */
@@ -2872,7 +2872,7 @@ pgstat_bestart(void)
                break;
            default:
                elog(FATAL, "unrecognized process type: %d",
-                   (int) MyAuxProcType);
+                    (int) MyAuxProcType);
                proc_exit(1);
        }
    }
@@ -2891,8 +2891,8 @@ pgstat_bestart(void)
 
    /* We have userid for client-backends, wal-sender and bgworker processes */
    if (beentry->st_backendType == B_BACKEND
-           || beentry->st_backendType == B_WAL_SENDER
-           || beentry->st_backendType == B_BG_WORKER)
+       || beentry->st_backendType == B_WAL_SENDER
+       || beentry->st_backendType == B_BG_WORKER)
        beentry->st_userid = GetSessionUserId();
    else
        beentry->st_userid = InvalidOid;
@@ -3409,14 +3409,14 @@ pgstat_get_wait_event(uint32 wait_event_info)
            break;
        case PG_WAIT_ACTIVITY:
            {
-               WaitEventActivity   w = (WaitEventActivity) wait_event_info;
+               WaitEventActivity w = (WaitEventActivity) wait_event_info;
 
                event_name = pgstat_get_wait_activity(w);
                break;
            }
        case PG_WAIT_CLIENT:
            {
-               WaitEventClient w = (WaitEventClient) wait_event_info;
+               WaitEventClient w = (WaitEventClient) wait_event_info;
 
                event_name = pgstat_get_wait_client(w);
                break;
@@ -3426,14 +3426,14 @@ pgstat_get_wait_event(uint32 wait_event_info)
            break;
        case PG_WAIT_IPC:
            {
-               WaitEventIPC    w = (WaitEventIPC) wait_event_info;
+               WaitEventIPC w = (WaitEventIPC) wait_event_info;
 
                event_name = pgstat_get_wait_ipc(w);
                break;
            }
        case PG_WAIT_TIMEOUT:
            {
-               WaitEventTimeout    w = (WaitEventTimeout) wait_event_info;
+               WaitEventTimeout w = (WaitEventTimeout) wait_event_info;
 
                event_name = pgstat_get_wait_timeout(w);
                break;
@@ -3508,7 +3508,7 @@ pgstat_get_wait_activity(WaitEventActivity w)
        case WAIT_EVENT_LOGICAL_APPLY_MAIN:
            event_name = "LogicalApplyMain";
            break;
-       /* no default case, so that compiler will warn */
+           /* no default case, so that compiler will warn */
    }
 
    return event_name;
@@ -3548,7 +3548,7 @@ pgstat_get_wait_client(WaitEventClient w)
        case WAIT_EVENT_WAL_SENDER_WRITE_DATA:
            event_name = "WalSenderWriteData";
            break;
-       /* no default case, so that compiler will warn */
+           /* no default case, so that compiler will warn */
    }
 
    return event_name;
@@ -3612,7 +3612,7 @@ pgstat_get_wait_ipc(WaitEventIPC w)
        case WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE:
            event_name = "LogicalSyncStateChange";
            break;
-       /* no default case, so that compiler will warn */
+           /* no default case, so that compiler will warn */
    }
 
    return event_name;
@@ -3640,7 +3640,7 @@ pgstat_get_wait_timeout(WaitEventTimeout w)
        case WAIT_EVENT_RECOVERY_APPLY_DELAY:
            event_name = "RecoveryApplyDelay";
            break;
-       /* no default case, so that compiler will warn */
+           /* no default case, so that compiler will warn */
    }
 
    return event_name;
@@ -4061,6 +4061,7 @@ pgstat_get_backend_desc(BackendType backendType)
 
    return backendDesc;
 }
+
 /* ------------------------------------------------------------
  * Local support functions follow
  * ------------------------------------------------------------
@@ -4405,7 +4406,7 @@ PgstatCollectorMain(int argc, char *argv[])
        wr = WaitLatchOrSocket(MyLatch,
        WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT,
                               pgStatSock,
-                              2 * 1000L /* msec */,
+                              2 * 1000L /* msec */ ,
                               WAIT_EVENT_PGSTAT_MAIN);
 #endif
 
index fdce5524f4c4141599c6aec76799b814b88cb09e..35b4ec88d35786508781a62d9c06b1f2be712ba7 100644 (file)
@@ -1251,7 +1251,7 @@ PostmasterMain(int argc, char *argv[])
        ereport(LOG,
                (errcode_for_file_access(),
                 errmsg("could not remove file \"%s\": %m",
-                   LOG_METAINFO_DATAFILE)));
+                       LOG_METAINFO_DATAFILE)));
 
    /*
     * If enabled, start up syslogger collection subprocess
@@ -3071,7 +3071,7 @@ CleanupBackgroundWorker(int pid,
                        int exitstatus) /* child's exit status */
 {
    char        namebuf[MAXPGPATH];
-   slist_mutable_iter  iter;
+   slist_mutable_iter iter;
 
    slist_foreach_modify(iter, &BackgroundWorkerList)
    {
@@ -3147,7 +3147,7 @@ CleanupBackgroundWorker(int pid,
        rw->rw_backend = NULL;
        rw->rw_pid = 0;
        rw->rw_child_slot = 0;
-       ReportBackgroundWorkerExit(&iter);  /* report child death */
+       ReportBackgroundWorkerExit(&iter);      /* report child death */
 
        LogChildExit(EXIT_STATUS_0(exitstatus) ? DEBUG1 : LOG,
                     namebuf, pid, exitstatus);
@@ -5149,11 +5149,12 @@ RandomCancelKey(int32 *cancel_key)
 #ifdef HAVE_STRONG_RANDOM
    return pg_strong_random((char *) cancel_key, sizeof(int32));
 #else
+
    /*
     * If built with --disable-strong-random, use plain old erand48.
     *
-    * We cannot use pg_backend_random() in postmaster, because it stores
-    * its state in shared memory.
+    * We cannot use pg_backend_random() in postmaster, because it stores its
+    * state in shared memory.
     */
    static unsigned short seed[3];
 
@@ -5348,10 +5349,10 @@ StartAutovacuumWorker(void)
    if (canAcceptConnections() == CAC_OK)
    {
        /*
-        * Compute the cancel key that will be assigned to this session.
-        * We probably don't need cancel keys for autovac workers, but
-        * we'd better have something random in the field to prevent
-        * unfriendly people from sending cancels to them.
+        * Compute the cancel key that will be assigned to this session. We
+        * probably don't need cancel keys for autovac workers, but we'd
+        * better have something random in the field to prevent unfriendly
+        * people from sending cancels to them.
         */
        if (!RandomCancelKey(&MyCancelKey))
        {
index aaefdaebad9f8536e1bd7ff3681f100a30fccbf2..9f5ca5cac08e025248bb39a4963034fd55d0fe2b 100644 (file)
@@ -1360,7 +1360,7 @@ set_next_rotation_time(void)
 static void
 update_metainfo_datafile(void)
 {
-   FILE    *fh;
+   FILE       *fh;
 
    if (!(Log_destination & LOG_DESTINATION_STDERR) &&
        !(Log_destination & LOG_DESTINATION_CSVLOG))
@@ -1369,7 +1369,7 @@ update_metainfo_datafile(void)
            ereport(LOG,
                    (errcode_for_file_access(),
                     errmsg("could not remove file \"%s\": %m",
-                       LOG_METAINFO_DATAFILE)));
+                           LOG_METAINFO_DATAFILE)));
        return;
    }
 
@@ -1378,7 +1378,7 @@ update_metainfo_datafile(void)
        ereport(LOG,
                (errcode_for_file_access(),
                 errmsg("could not open file \"%s\": %m",
-                   LOG_METAINFO_DATAFILE_TMP)));
+                       LOG_METAINFO_DATAFILE_TMP)));
        return;
    }
 
@@ -1388,7 +1388,7 @@ update_metainfo_datafile(void)
        {
            ereport(LOG,
                    (errcode_for_file_access(),
-                   errmsg("could not write file \"%s\": %m",
+                    errmsg("could not write file \"%s\": %m",
                            LOG_METAINFO_DATAFILE_TMP)));
            fclose(fh);
            return;
@@ -1401,7 +1401,7 @@ update_metainfo_datafile(void)
        {
            ereport(LOG,
                    (errcode_for_file_access(),
-                   errmsg("could not write file \"%s\": %m",
+                    errmsg("could not write file \"%s\": %m",
                            LOG_METAINFO_DATAFILE_TMP)));
            fclose(fh);
            return;
@@ -1412,8 +1412,8 @@ update_metainfo_datafile(void)
    if (rename(LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE) != 0)
        ereport(LOG,
                (errcode_for_file_access(),
-               errmsg("could not rename file \"%s\" to \"%s\": %m",
-                      LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE)));
+                errmsg("could not rename file \"%s\" to \"%s\": %m",
+                       LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE)));
 }
 
 /* --------------------------------
index 3ee0dd5aa4541fc3470c38356d8f31019ffa42b0..cb5f58b6ba26fd3a96b5feed51e4df4165b21321 100644 (file)
@@ -58,8 +58,8 @@ static bool sendFile(char *readfilename, char *tarfilename,
 static void sendFileWithContent(const char *filename, const char *content);
 static int64 _tarWriteHeader(const char *filename, const char *linktarget,
                struct stat * statbuf, bool sizeonly);
-static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
-               bool sizeonly);
+static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf,
+            bool sizeonly);
 static void send_int8_string(StringInfoData *buf, int64 intval);
 static void SendBackupHeader(List *tablespaces);
 static void base_backup_cleanup(int code, Datum arg);
@@ -106,15 +106,15 @@ static const char *excludeDirContents[] =
 {
    /*
     * Skip temporary statistics files. PG_STAT_TMP_DIR must be skipped even
-    * when stats_temp_directory is set because PGSS_TEXT_FILE is always created
-    * there.
+    * when stats_temp_directory is set because PGSS_TEXT_FILE is always
+    * created there.
     */
    PG_STAT_TMP_DIR,
 
    /*
-    * It is generally not useful to backup the contents of this directory even
-    * if the intention is to restore to another master. See backup.sgml for a
-    * more detailed description.
+    * It is generally not useful to backup the contents of this directory
+    * even if the intention is to restore to another master. See backup.sgml
+    * for a more detailed description.
     */
    "pg_replslot",
 
@@ -365,7 +365,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
        dir = AllocateDir("pg_wal");
        if (!dir)
            ereport(ERROR,
-                (errmsg("could not open directory \"%s\": %m", "pg_wal")));
+                 (errmsg("could not open directory \"%s\": %m", "pg_wal")));
        while ((de = ReadDir(dir, "pg_wal")) != NULL)
        {
            /* Does it look like a WAL segment, and is it in the range? */
@@ -404,8 +404,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
        qsort(walFiles, nWalFiles, sizeof(char *), compareWalFileNames);
 
        /*
-        * There must be at least one xlog file in the pg_wal directory,
-        * since we are doing backup-including-xlog.
+        * There must be at least one xlog file in the pg_wal directory, since
+        * we are doing backup-including-xlog.
         */
        if (nWalFiles < 1)
            ereport(ERROR,
@@ -1036,7 +1036,7 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces,
            if (strcmp(de->d_name, excludeDirContents[excludeIdx]) == 0)
            {
                elog(DEBUG1, "contents of directory \"%s\" excluded from backup", de->d_name);
-               size += _tarWriteDir(pathbuf, basepathlen, &statbuf,  sizeonly);
+               size += _tarWriteDir(pathbuf, basepathlen, &statbuf, sizeonly);
                excludeFound = true;
                break;
            }
@@ -1281,7 +1281,7 @@ _tarWriteHeader(const char *filename, const char *linktarget,
    if (!sizeonly)
    {
        rc = tarCreateHeader(h, filename, linktarget, statbuf->st_size,
-                            statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
+                         statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
                             statbuf->st_mtime);
 
        switch (rc)
@@ -1295,9 +1295,9 @@ _tarWriteHeader(const char *filename, const char *linktarget,
                break;
            case TAR_SYMLINK_TOO_LONG:
                ereport(ERROR,
-                       (errmsg("symbolic link target too long for tar format: "
-                               "file name \"%s\", target \"%s\"",
-                               filename, linktarget)));
+                    (errmsg("symbolic link target too long for tar format: "
+                            "file name \"%s\", target \"%s\"",
+                            filename, linktarget)));
                break;
            default:
                elog(ERROR, "unrecognized tar error: %d", rc);
@@ -1314,7 +1314,7 @@ _tarWriteHeader(const char *filename, const char *linktarget,
  * write it as a directory anyway.
  */
 static int64
-_tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
+_tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf,
             bool sizeonly)
 {
    /* If symlink, write it as a directory anyway */
index 9d7bb25d3976a305cc940b288a391b26851cd803..ebe9c91e9815b3dbe7eed32f3259a9fa969f14e2 100644 (file)
@@ -40,42 +40,42 @@ void        _PG_init(void);
 struct WalReceiverConn
 {
    /* Current connection to the primary, if any */
-   PGconn *streamConn;
+   PGconn     *streamConn;
    /* Used to remember if the connection is logical or physical */
-   bool    logical;
+   bool        logical;
    /* Buffer for currently read records */
-   char   *recvBuf;
+   char       *recvBuf;
 };
 
 /* Prototypes for interface functions */
 static WalReceiverConn *libpqrcv_connect(const char *conninfo,
-                                        bool logical, const char *appname,
-                                        char **err);
+                bool logical, const char *appname,
+                char **err);
 static void libpqrcv_check_conninfo(const char *conninfo);
 static char *libpqrcv_get_conninfo(WalReceiverConn *conn);
 static char *libpqrcv_identify_system(WalReceiverConn *conn,
-                                     TimeLineID *primary_tli,
-                                     int *server_version);
+                        TimeLineID *primary_tli,
+                        int *server_version);
 static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
                                 TimeLineID tli, char **filename,
                                 char **content, int *len);
 static bool libpqrcv_startstreaming(WalReceiverConn *conn,
-                                   const WalRcvStreamOptions *options);
+                       const WalRcvStreamOptions *options);
 static void libpqrcv_endstreaming(WalReceiverConn *conn,
-                                 TimeLineID *next_tli);
-static int libpqrcv_receive(WalReceiverConn *conn, char **buffer,
-                            pgsocket *wait_fd);
+                     TimeLineID *next_tli);
+static int libpqrcv_receive(WalReceiverConn *conn, char **buffer,
+                pgsocket *wait_fd);
 static void libpqrcv_send(WalReceiverConn *conn, const char *buffer,
-                         int nbytes);
+             int nbytes);
 static char *libpqrcv_create_slot(WalReceiverConn *conn,
-                                 const char *slotname,
-                                 bool temporary,
-                                 CRSSnapshotAction snapshot_action,
-                                 XLogRecPtr *lsn);
+                    const char *slotname,
+                    bool temporary,
+                    CRSSnapshotAction snapshot_action,
+                    XLogRecPtr *lsn);
 static WalRcvExecResult *libpqrcv_exec(WalReceiverConn *conn,
-                                      const char *query,
-                                      const int nRetTypes,
-                                      const Oid *retTypes);
+             const char *query,
+             const int nRetTypes,
+             const Oid *retTypes);
 static void libpqrcv_disconnect(WalReceiverConn *conn);
 
 static WalReceiverFunctionsType PQWalReceiverFunctions = {
@@ -153,7 +153,7 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname,
 
    conn = palloc0(sizeof(WalReceiverConn));
    conn->streamConn = PQconnectStartParams(keys, vals,
-                                           /* expand_dbname = */ true);
+                                            /* expand_dbname = */ true);
    if (PQstatus(conn->streamConn) == CONNECTION_BAD)
    {
        *err = pchomp(PQerrorMessage(conn->streamConn));
@@ -216,8 +216,8 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname,
 static void
 libpqrcv_check_conninfo(const char *conninfo)
 {
-   PQconninfoOption   *opts = NULL;
-   char               *err = NULL;
+   PQconninfoOption *opts = NULL;
+   char       *err = NULL;
 
    opts = PQconninfoParse(conninfo, &err);
    if (opts == NULL)
@@ -362,9 +362,9 @@ libpqrcv_startstreaming(WalReceiverConn *conn,
     */
    if (options->logical)
    {
-       char   *pubnames_str;
-       List   *pubnames;
-       char   *pubnames_literal;
+       char       *pubnames_str;
+       List       *pubnames;
+       char       *pubnames_literal;
 
        appendStringInfoString(&cmd, " (");
 
@@ -435,8 +435,8 @@ libpqrcv_endstreaming(WalReceiverConn *conn, TimeLineID *next_tli)
     * next timeline's ID, or just CommandComplete if the server was shut
     * down.
     *
-    * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT
-    * is also possible in case we aborted the copy in mid-stream.
+    * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT is
+    * also possible in case we aborted the copy in mid-stream.
     */
    res = PQgetResult(conn->streamConn);
    if (PQresultStatus(res) == PGRES_TUPLES_OK)
@@ -545,9 +545,9 @@ libpqrcv_PQexec(PGconn *streamConn, const char *query)
 
    /*
     * PQexec() silently discards any prior query results on the connection.
-    * This is not required for this function as it's expected that the
-    * caller (which is this library in all cases) will behave correctly and
-    * we don't have to be backwards compatible with old libpq.
+    * This is not required for this function as it's expected that the caller
+    * (which is this library in all cases) will behave correctly and we don't
+    * have to be backwards compatible with old libpq.
     */
 
    /*
@@ -737,9 +737,9 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname,
                     bool temporary, CRSSnapshotAction snapshot_action,
                     XLogRecPtr *lsn)
 {
-   PGresult       *res;
-   StringInfoData  cmd;
-   char           *snapshot;
+   PGresult   *res;
+   StringInfoData cmd;
+   char       *snapshot;
 
    initStringInfo(&cmd);
 
@@ -777,7 +777,7 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname,
    }
 
    *lsn = DatumGetLSN(DirectFunctionCall1Coll(pg_lsn_in, InvalidOid,
-                     CStringGetDatum(PQgetvalue(res, 0, 1))));
+                                   CStringGetDatum(PQgetvalue(res, 0, 1))));
    if (!PQgetisnull(res, 0, 2))
        snapshot = pstrdup(PQgetvalue(res, 0, 2));
    else
@@ -793,15 +793,15 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname,
  */
 static void
 libpqrcv_processTuples(PGresult *pgres, WalRcvExecResult *walres,
-                       const int nRetTypes, const Oid *retTypes)
+                      const int nRetTypes, const Oid *retTypes)
 {
-   int     tupn;
-   int     coln;
-   int     nfields = PQnfields(pgres);
-   HeapTuple       tuple;
-   AttInMetadata  *attinmeta;
-   MemoryContext   rowcontext;
-   MemoryContext   oldcontext;
+   int         tupn;
+   int         coln;
+   int         nfields = PQnfields(pgres);
+   HeapTuple   tuple;
+   AttInMetadata *attinmeta;
+   MemoryContext rowcontext;
+   MemoryContext oldcontext;
 
    /* Make sure we got expected number of fields. */
    if (nfields != nRetTypes)
@@ -832,7 +832,7 @@ libpqrcv_processTuples(PGresult *pgres, WalRcvExecResult *walres,
    /* Process returned rows. */
    for (tupn = 0; tupn < PQntuples(pgres); tupn++)
    {
-       char   *cstrs[MaxTupleAttributeNumber];
+       char       *cstrs[MaxTupleAttributeNumber];
 
        CHECK_FOR_INTERRUPTS();
 
@@ -877,7 +877,7 @@ libpqrcv_exec(WalReceiverConn *conn, const char *query,
    if (MyDatabaseId == InvalidOid)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("the query interface requires a database connection")));
+             errmsg("the query interface requires a database connection")));
 
    pgres = libpqrcv_PQexec(conn->streamConn, query);
 
@@ -905,7 +905,7 @@ libpqrcv_exec(WalReceiverConn *conn, const char *query,
            walres->status = WALRCV_OK_COMMAND;
            break;
 
-       /* Empty query is considered error. */
+           /* Empty query is considered error. */
        case PGRES_EMPTY_QUERY:
            walres->status = WALRCV_ERROR;
            walres->err = _("empty query");
@@ -935,16 +935,16 @@ libpqrcv_exec(WalReceiverConn *conn, const char *query,
 static char *
 stringlist_to_identifierstr(PGconn *conn, List *strings)
 {
-   ListCell *lc;
+   ListCell   *lc;
    StringInfoData res;
-   bool first = true;
+   bool        first = true;
 
    initStringInfo(&res);
 
-   foreach (lc, strings)
+   foreach(lc, strings)
    {
-       char *val = strVal(lfirst(lc));
-       char *val_escaped;
+       char       *val = strVal(lfirst(lc));
+       char       *val_escaped;
 
        if (first)
            first = false;
index 09c87d7c53a8013e21f73a379f111f8fe92e17e8..4e2c350dc7e604625b865f2b2e05cc1e7d689a96 100644 (file)
@@ -57,8 +57,8 @@
 /* max sleep time between cycles (3min) */
 #define DEFAULT_NAPTIME_PER_CYCLE 180000L
 
-int    max_logical_replication_workers = 4;
-int max_sync_workers_per_subscription = 2;
+int            max_logical_replication_workers = 4;
+int            max_sync_workers_per_subscription = 2;
 
 LogicalRepWorker *MyLogicalRepWorker = NULL;
 
@@ -68,7 +68,7 @@ typedef struct LogicalRepCtxStruct
    pid_t       launcher_pid;
 
    /* Background workers. */
-   LogicalRepWorker    workers[FLEXIBLE_ARRAY_MEMBER];
+   LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER];
 } LogicalRepCtxStruct;
 
 LogicalRepCtxStruct *LogicalRepCtx;
@@ -83,9 +83,9 @@ static void logicalrep_worker_cleanup(LogicalRepWorker *worker);
 volatile sig_atomic_t got_SIGHUP = false;
 volatile sig_atomic_t got_SIGTERM = false;
 
-static bool    on_commit_launcher_wakeup = false;
+static bool on_commit_launcher_wakeup = false;
 
-Datum pg_stat_get_subscription(PG_FUNCTION_ARGS);
+Datum      pg_stat_get_subscription(PG_FUNCTION_ARGS);
 
 
 /*
@@ -122,8 +122,8 @@ get_subscription_list(void)
    while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection)))
    {
        Form_pg_subscription subform = (Form_pg_subscription) GETSTRUCT(tup);
-       Subscription   *sub;
-       MemoryContext   oldcxt;
+       Subscription *sub;
+       MemoryContext oldcxt;
 
        /*
         * Allocate our results in the caller's context, not the
@@ -224,15 +224,16 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
 LogicalRepWorker *
 logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
 {
-   int i;
-   LogicalRepWorker   *res = NULL;
+   int         i;
+   LogicalRepWorker *res = NULL;
 
    Assert(LWLockHeldByMe(LogicalRepWorkerLock));
 
    /* Search for attached worker for a given subscription id. */
    for (i = 0; i < max_logical_replication_workers; i++)
    {
-       LogicalRepWorker   *w = &LogicalRepCtx->workers[i];
+       LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
        if (w->in_use && w->subid == subid && w->relid == relid &&
            (!only_running || w->proc))
        {
@@ -251,17 +252,17 @@ void
 logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid,
                         Oid relid)
 {
-   BackgroundWorker    bgw;
+   BackgroundWorker bgw;
    BackgroundWorkerHandle *bgw_handle;
-   int                 i;
-   int                 slot = 0;
-   LogicalRepWorker   *worker = NULL;
-   int                 nsyncworkers;
-   TimestampTz         now;
+   int         i;
+   int         slot = 0;
+   LogicalRepWorker *worker = NULL;
+   int         nsyncworkers;
+   TimestampTz now;
 
    ereport(LOG,
-           (errmsg("starting logical replication worker for subscription \"%s\"",
-                   subname)));
+      (errmsg("starting logical replication worker for subscription \"%s\"",
+              subname)));
 
    /* Report this after the initial starting message for consistency. */
    if (max_replication_slots == 0)
@@ -300,7 +301,7 @@ retry:
     */
    if (worker == NULL || nsyncworkers >= max_sync_workers_per_subscription)
    {
-       bool    did_cleanup = false;
+       bool        did_cleanup = false;
 
        for (i = 0; i < max_logical_replication_workers; i++)
        {
@@ -373,7 +374,7 @@ retry:
 
    /* Register the new dynamic worker. */
    memset(&bgw, 0, sizeof(bgw));
-   bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
+   bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
        BGWORKER_BACKEND_DATABASE_CONNECTION;
    bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
    snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
@@ -394,7 +395,7 @@ retry:
        ereport(WARNING,
                (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
                 errmsg("out of background worker slots"),
-                errhint("You might need to increase max_worker_processes.")));
+              errhint("You might need to increase max_worker_processes.")));
        return;
    }
 
@@ -410,7 +411,7 @@ void
 logicalrep_worker_stop(Oid subid, Oid relid)
 {
    LogicalRepWorker *worker;
-   uint16  generation;
+   uint16      generation;
 
    LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
 
@@ -435,7 +436,7 @@ logicalrep_worker_stop(Oid subid, Oid relid)
     */
    while (worker->in_use && !worker->proc)
    {
-       int rc;
+       int         rc;
 
        LWLockRelease(LogicalRepWorkerLock);
 
@@ -478,7 +479,7 @@ logicalrep_worker_stop(Oid subid, Oid relid)
    /* ... and wait for it to die. */
    for (;;)
    {
-       int rc;
+       int         rc;
 
        LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
        if (!worker->proc || worker->generation != generation)
@@ -509,7 +510,7 @@ logicalrep_worker_stop(Oid subid, Oid relid)
 void
 logicalrep_worker_wakeup(Oid subid, Oid relid)
 {
-   LogicalRepWorker   *worker;
+   LogicalRepWorker *worker;
 
    LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
    worker = logicalrep_worker_find(subid, relid, true);
@@ -544,18 +545,18 @@ logicalrep_worker_attach(int slot)
    {
        LWLockRelease(LogicalRepWorkerLock);
        ereport(ERROR,
-              (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-               errmsg("logical replication worker slot %d is empty, cannot attach",
-                      slot)));
+               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+        errmsg("logical replication worker slot %d is empty, cannot attach",
+               slot)));
    }
 
    if (MyLogicalRepWorker->proc)
    {
        LWLockRelease(LogicalRepWorkerLock);
        ereport(ERROR,
-              (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-               errmsg("logical replication worker slot %d is already used by "
-                      "another worker, cannot attach", slot)));
+               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+             errmsg("logical replication worker slot %d is already used by "
+                    "another worker, cannot attach", slot)));
    }
 
    MyLogicalRepWorker->proc = MyProc;
@@ -620,7 +621,7 @@ logicalrep_worker_onexit(int code, Datum arg)
 void
 logicalrep_worker_sigterm(SIGNAL_ARGS)
 {
-   int save_errno = errno;
+   int         save_errno = errno;
 
    got_SIGTERM = true;
 
@@ -634,7 +635,7 @@ logicalrep_worker_sigterm(SIGNAL_ARGS)
 void
 logicalrep_worker_sighup(SIGNAL_ARGS)
 {
-   int save_errno = errno;
+   int         save_errno = errno;
 
    got_SIGHUP = true;
 
@@ -651,15 +652,16 @@ logicalrep_worker_sighup(SIGNAL_ARGS)
 int
 logicalrep_sync_worker_count(Oid subid)
 {
-   int i;
-   int res = 0;
+   int         i;
+   int         res = 0;
 
    Assert(LWLockHeldByMe(LogicalRepWorkerLock));
 
    /* Search for attached worker for a given subscription id. */
    for (i = 0; i < max_logical_replication_workers; i++)
    {
-       LogicalRepWorker   *w = &LogicalRepCtx->workers[i];
+       LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
        if (w->subid == subid && OidIsValid(w->relid))
            res++;
    }
@@ -699,7 +701,7 @@ ApplyLauncherRegister(void)
        return;
 
    memset(&bgw, 0, sizeof(bgw));
-   bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
+   bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
        BGWORKER_BACKEND_DATABASE_CONNECTION;
    bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
    snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
@@ -729,7 +731,7 @@ ApplyLauncherShmemInit(void)
 
    if (!found)
    {
-       int slot;
+       int         slot;
 
        memset(LogicalRepCtx, 0, ApplyLauncherShmemSize());
 
@@ -783,7 +785,7 @@ ApplyLauncherWakeup(void)
 void
 ApplyLauncherMain(Datum main_arg)
 {
-   TimestampTz     last_start_time = 0;
+   TimestampTz last_start_time = 0;
 
    ereport(DEBUG1,
            (errmsg("logical replication launcher started")));
@@ -813,10 +815,10 @@ ApplyLauncherMain(Datum main_arg)
        int         rc;
        List       *sublist;
        ListCell   *lc;
-       MemoryContext   subctx;
-       MemoryContext   oldctx;
-       TimestampTz     now;
-       long            wait_time = DEFAULT_NAPTIME_PER_CYCLE;
+       MemoryContext subctx;
+       MemoryContext oldctx;
+       TimestampTz now;
+       long        wait_time = DEFAULT_NAPTIME_PER_CYCLE;
 
        now = GetCurrentTimestamp();
 
@@ -826,7 +828,7 @@ ApplyLauncherMain(Datum main_arg)
        {
            /* Use temporary context for the database list and worker info. */
            subctx = AllocSetContextCreate(TopMemoryContext,
-                                          "Logical Replication Launcher sublist",
+                                     "Logical Replication Launcher sublist",
                                           ALLOCSET_DEFAULT_MINSIZE,
                                           ALLOCSET_DEFAULT_INITSIZE,
                                           ALLOCSET_DEFAULT_MAXSIZE);
@@ -838,8 +840,8 @@ ApplyLauncherMain(Datum main_arg)
            /* Start the missing workers for enabled subscriptions. */
            foreach(lc, sublist)
            {
-               Subscription       *sub = (Subscription *) lfirst(lc);
-               LogicalRepWorker   *w;
+               Subscription *sub = (Subscription *) lfirst(lc);
+               LogicalRepWorker *w;
 
                LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
                w = logicalrep_worker_find(sub->oid, InvalidOid, false);
@@ -864,9 +866,9 @@ ApplyLauncherMain(Datum main_arg)
        {
            /*
             * The wait in previous cycle was interrupted in less than
-            * wal_retrieve_retry_interval since last worker was started,
-            * this usually means crash of the worker, so we should retry
-            * in wal_retrieve_retry_interval again.
+            * wal_retrieve_retry_interval since last worker was started, this
+            * usually means crash of the worker, so we should retry in
+            * wal_retrieve_retry_interval again.
             */
            wait_time = wal_retrieve_retry_interval;
        }
@@ -948,7 +950,7 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS)
        Datum       values[PG_STAT_GET_SUBSCRIPTION_COLS];
        bool        nulls[PG_STAT_GET_SUBSCRIPTION_COLS];
        int         worker_pid;
-       LogicalRepWorker    worker;
+       LogicalRepWorker worker;
 
        memcpy(&worker, &LogicalRepCtx->workers[i],
               sizeof(LogicalRepWorker));
@@ -992,7 +994,10 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS)
 
        tuplestore_putvalues(tupstore, tupdesc, values, nulls);
 
-       /* If only a single subscription was requested, and we found it, break. */
+       /*
+        * If only a single subscription was requested, and we found it,
+        * break.
+        */
        if (OidIsValid(subid))
            break;
    }
index 7409e5ce3de759d2acead457824c4f1af49330ea..33cb01b8d0e901b96846f7ddbf27019f42ae7183 100644 (file)
@@ -118,7 +118,7 @@ StartupDecodingContext(List *output_plugin_options,
                       XLogPageReadCB read_page,
                       LogicalOutputPluginWriterPrepareWrite prepare_write,
                       LogicalOutputPluginWriterWrite do_write,
-                      LogicalOutputPluginWriterUpdateProgress update_progress)
+                    LogicalOutputPluginWriterUpdateProgress update_progress)
 {
    ReplicationSlot *slot;
    MemoryContext context,
@@ -202,8 +202,8 @@ StartupDecodingContext(List *output_plugin_options,
  * plugin contains the name of the output plugin
  * output_plugin_options contains options passed to the output plugin
  * read_page, prepare_write, do_write, update_progress
- *     callbacks that have to be filled to perform the use-case dependent,
- *     actual, work.
+ *     callbacks that have to be filled to perform the use-case dependent,
+ *     actual, work.
  *
  * Needs to be called while in a memory context that's at least as long lived
  * as the decoding context because further memory contexts will be created
@@ -219,7 +219,7 @@ CreateInitDecodingContext(char *plugin,
                          XLogPageReadCB read_page,
                          LogicalOutputPluginWriterPrepareWrite prepare_write,
                          LogicalOutputPluginWriterWrite do_write,
-                         LogicalOutputPluginWriterUpdateProgress update_progress)
+                    LogicalOutputPluginWriterUpdateProgress update_progress)
 {
    TransactionId xmin_horizon = InvalidTransactionId;
    ReplicationSlot *slot;
index 27164de093dd8de2874cb16edf7b4ca9dae75c20..ba4d8cc5a45d18e3c7dcbebc084044c60785423d 100644 (file)
@@ -328,17 +328,19 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin
        if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm)
        {
            LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr);
+
            /*
             * If only the confirmed_flush_lsn has changed the slot won't get
-            * marked as dirty by the above. Callers on the walsender interface
-            * are expected to keep track of their own progress and don't need
-            * it written out. But SQL-interface users cannot specify their own
-            * start positions and it's harder for them to keep track of their
-            * progress, so we should make more of an effort to save it for them.
+            * marked as dirty by the above. Callers on the walsender
+            * interface are expected to keep track of their own progress and
+            * don't need it written out. But SQL-interface users cannot
+            * specify their own start positions and it's harder for them to
+            * keep track of their progress, so we should make more of an
+            * effort to save it for them.
             *
-            * Dirty the slot so it's written out at the next checkpoint. We'll
-            * still lose its position on crash, as documented, but it's better
-            * than always losing the position even on clean restart.
+            * Dirty the slot so it's written out at the next checkpoint.
+            * We'll still lose its position on crash, as documented, but it's
+            * better than always losing the position even on clean restart.
             */
            ReplicationSlotMarkDirty();
        }
index adc62a0f3bbedc857db6e81317c45712a14fd378..ff348ff2a8c9e22b5b981d4fa8d4e99bc64bfa70 100644 (file)
@@ -28,7 +28,7 @@
 
 static void logicalrep_write_attrs(StringInfo out, Relation rel);
 static void logicalrep_write_tuple(StringInfo out, Relation rel,
-                                  HeapTuple tuple);
+                      HeapTuple tuple);
 
 static void logicalrep_read_attrs(StringInfo in, LogicalRepRelation *rel);
 static void logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple);
@@ -72,7 +72,7 @@ void
 logicalrep_write_commit(StringInfo out, ReorderBufferTXN *txn,
                        XLogRecPtr commit_lsn)
 {
-   uint8 flags = 0;
+   uint8       flags = 0;
 
    pq_sendbyte(out, 'C');      /* sending COMMIT */
 
@@ -92,7 +92,7 @@ void
 logicalrep_read_commit(StringInfo in, LogicalRepCommitData *commit_data)
 {
    /* read flags (unused for now) */
-   uint8   flags = pq_getmsgbyte(in);
+   uint8       flags = pq_getmsgbyte(in);
 
    if (flags != 0)
        elog(ERROR, "unrecognized flags %u in commit message", flags);
@@ -136,7 +136,7 @@ logicalrep_read_origin(StringInfo in, XLogRecPtr *origin_lsn)
  * Write INSERT to the output stream.
  */
 void
-logicalrep_write_insert(StringInfo out,    Relation rel, HeapTuple newtuple)
+logicalrep_write_insert(StringInfo out, Relation rel, HeapTuple newtuple)
 {
    pq_sendbyte(out, 'I');      /* action INSERT */
 
@@ -160,7 +160,7 @@ LogicalRepRelId
 logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup)
 {
    char        action;
-   LogicalRepRelId     relid;
+   LogicalRepRelId relid;
 
    /* read the relation id */
    relid = pq_getmsgint(in, 4);
@@ -180,7 +180,7 @@ logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup)
  */
 void
 logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple,
-                      HeapTuple newtuple)
+                       HeapTuple newtuple)
 {
    pq_sendbyte(out, 'U');      /* action UPDATE */
 
@@ -194,9 +194,9 @@ logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple,
    if (oldtuple != NULL)
    {
        if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
-           pq_sendbyte(out, 'O');  /* old tuple follows */
+           pq_sendbyte(out, 'O');      /* old tuple follows */
        else
-           pq_sendbyte(out, 'K');  /* old key follows */
+           pq_sendbyte(out, 'K');      /* old key follows */
        logicalrep_write_tuple(out, rel, oldtuple);
    }
 
@@ -213,7 +213,7 @@ logicalrep_read_update(StringInfo in, bool *has_oldtuple,
                       LogicalRepTupleData *newtup)
 {
    char        action;
-   LogicalRepRelId     relid;
+   LogicalRepRelId relid;
 
    /* read the relation id */
    relid = pq_getmsgint(in, 4);
@@ -277,7 +277,7 @@ LogicalRepRelId
 logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup)
 {
    char        action;
-   LogicalRepRelId     relid;
+   LogicalRepRelId relid;
 
    /* read the relation id */
    relid = pq_getmsgint(in, 4);
@@ -323,7 +323,7 @@ logicalrep_write_rel(StringInfo out, Relation rel)
 LogicalRepRelation *
 logicalrep_read_rel(StringInfo in)
 {
-   LogicalRepRelation  *rel = palloc(sizeof(LogicalRepRelation));
+   LogicalRepRelation *rel = palloc(sizeof(LogicalRepRelation));
 
    rel->remoteid = pq_getmsgint(in, 4);
 
@@ -424,12 +424,12 @@ logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple)
 
        if (isnull[i])
        {
-           pq_sendbyte(out, 'n');  /* null column */
+           pq_sendbyte(out, 'n');      /* null column */
            continue;
        }
        else if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i]))
        {
-           pq_sendbyte(out, 'u');  /* unchanged toast column */
+           pq_sendbyte(out, 'u');      /* unchanged toast column */
            continue;
        }
 
@@ -473,21 +473,21 @@ logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple)
 
        switch (kind)
        {
-           case 'n': /* null */
+           case 'n':           /* null */
                tuple->values[i] = NULL;
                tuple->changed[i] = true;
                break;
-           case 'u': /* unchanged column */
+           case 'u':           /* unchanged column */
                /* we don't receive the value of an unchanged column */
                tuple->values[i] = NULL;
                break;
-           case 't': /* text formatted value */
+           case 't':           /* text formatted value */
                {
                    int         len;
 
                    tuple->changed[i] = true;
 
-                   len = pq_getmsgint(in, 4); /* read length */
+                   len = pq_getmsgint(in, 4);  /* read length */
 
                    /* and data */
                    tuple->values[i] = palloc(len + 1);
@@ -534,7 +534,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)
    for (i = 0; i < desc->natts; i++)
    {
        Form_pg_attribute att = desc->attrs[i];
-       uint8           flags = 0;
+       uint8       flags = 0;
 
        if (att->attisdropped)
            continue;
@@ -612,7 +612,7 @@ logicalrep_write_namespace(StringInfo out, Oid nspid)
        pq_sendbyte(out, '\0');
    else
    {
-       char *nspname = get_namespace_name(nspid);
+       char       *nspname = get_namespace_name(nspid);
 
        if (nspname == NULL)
            elog(ERROR, "cache lookup failed for namespace %u",
index 590355a846e6a3f713b6d3aa78dc6ab5310644bf..41eff8971a5d05d664da3b0fef25db144e3bfa71 100644 (file)
 #include "utils/memutils.h"
 #include "utils/syscache.h"
 
-static MemoryContext   LogicalRepRelMapContext = NULL;
+static MemoryContext LogicalRepRelMapContext = NULL;
 
-static HTAB               *LogicalRepRelMap = NULL;
-static HTAB               *LogicalRepTypMap = NULL;
+static HTAB *LogicalRepRelMap = NULL;
+static HTAB *LogicalRepTypMap = NULL;
 
 static void logicalrep_typmap_invalidate_cb(Datum arg, int cacheid,
-                                           uint32 hashvalue);
+                               uint32 hashvalue);
 
 /*
  * Relcache invalidation callback for our relation map cache.
@@ -44,7 +44,7 @@ static void logicalrep_typmap_invalidate_cb(Datum arg, int cacheid,
 static void
 logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 {
-   LogicalRepRelMapEntry  *entry;
+   LogicalRepRelMapEntry *entry;
 
    /* Just to be sure. */
    if (LogicalRepRelMap == NULL)
@@ -110,7 +110,7 @@ logicalrep_relmap_init(void)
 
    /* This will usually be small. */
    LogicalRepTypMap = hash_create("logicalrep type map cache", 2, &ctl,
-                                  HASH_ELEM | HASH_BLOBS |HASH_CONTEXT);
+                                  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
 
    /* Watch for invalidation events. */
    CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb,
@@ -134,7 +134,7 @@ logicalrep_relmap_free_entry(LogicalRepRelMapEntry *entry)
 
    if (remoterel->natts > 0)
    {
-       int i;
+       int         i;
 
        for (i = 0; i < remoterel->natts; i++)
            pfree(remoterel->attnames[i]);
@@ -157,10 +157,10 @@ logicalrep_relmap_free_entry(LogicalRepRelMapEntry *entry)
 void
 logicalrep_relmap_update(LogicalRepRelation *remoterel)
 {
-   MemoryContext           oldctx;
-   LogicalRepRelMapEntry  *entry;
-   bool                    found;
-   int                     i;
+   MemoryContext oldctx;
+   LogicalRepRelMapEntry *entry;
+   bool        found;
+   int         i;
 
    if (LogicalRepRelMap == NULL)
        logicalrep_relmap_init();
@@ -202,7 +202,7 @@ logicalrep_relmap_update(LogicalRepRelation *remoterel)
 static int
 logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname)
 {
-   int i;
+   int         i;
 
    for (i = 0; i < remoterel->natts; i++)
    {
@@ -222,7 +222,7 @@ logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname)
 LogicalRepRelMapEntry *
 logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
 {
-   LogicalRepRelMapEntry  *entry;
+   LogicalRepRelMapEntry *entry;
    bool        found;
 
    if (LogicalRepRelMap == NULL)
@@ -245,7 +245,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
        Bitmapset  *idkey;
        TupleDesc   desc;
        LogicalRepRelation *remoterel;
-       MemoryContext       oldctx;
+       MemoryContext oldctx;
+
        remoterel = &entry->remoterel;
 
        /* Try to find and lock the relation by name. */
@@ -265,8 +266,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
 
        /*
         * Build the mapping of local attribute numbers to remote attribute
-        * numbers and validate that we don't miss any replicated columns
-        * as that would result in potentially unwanted data loss.
+        * numbers and validate that we don't miss any replicated columns as
+        * that would result in potentially unwanted data loss.
         */
        desc = RelationGetDescr(entry->localrel);
        oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext);
@@ -276,8 +277,9 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
        found = 0;
        for (i = 0; i < desc->natts; i++)
        {
-           int attnum = logicalrep_rel_att_by_name(remoterel,
-                                           NameStr(desc->attrs[i]->attname));
+           int         attnum = logicalrep_rel_att_by_name(remoterel,
+                                          NameStr(desc->attrs[i]->attname));
+
            entry->attrmap[i] = attnum;
            if (attnum >= 0)
                found++;
@@ -287,9 +289,9 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
        if (found < remoterel->natts)
            ereport(ERROR,
                    (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                    errmsg("logical replication target relation \"%s.%s\" is missing "
-                           "some replicated columns",
-                           remoterel->nspname, remoterel->relname)));
+           errmsg("logical replication target relation \"%s.%s\" is missing "
+                  "some replicated columns",
+                  remoterel->nspname, remoterel->relname)));
 
        /*
         * Check that replica identity matches. We allow for stricter replica
@@ -299,8 +301,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
         * but in the opposite scenario it will.
         *
         * Don't throw any error here just mark the relation entry as not
-        * updatable, as replica identity is only for updates and deletes
-        * but inserts can be replicated even without it.
+        * updatable, as replica identity is only for updates and deletes but
+        * inserts can be replicated even without it.
         */
        entry->updatable = true;
        idkey = RelationGetIndexAttrBitmap(entry->localrel,
@@ -310,6 +312,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
        {
            idkey = RelationGetIndexAttrBitmap(entry->localrel,
                                               INDEX_ATTR_BITMAP_PRIMARY_KEY);
+
            /*
             * If no replica identity index and no PK, the published table
             * must have replica identity FULL.
@@ -321,14 +324,14 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
        i = -1;
        while ((i = bms_next_member(idkey, i)) >= 0)
        {
-           int attnum = i + FirstLowInvalidHeapAttributeNumber;
+           int         attnum = i + FirstLowInvalidHeapAttributeNumber;
 
            if (!AttrNumberIsForUserDefinedAttr(attnum))
                ereport(ERROR,
                        (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                        errmsg("logical replication target relation \"%s.%s\" uses "
-                               "system columns in REPLICA IDENTITY index",
-                               remoterel->nspname, remoterel->relname)));
+                errmsg("logical replication target relation \"%s.%s\" uses "
+                       "system columns in REPLICA IDENTITY index",
+                       remoterel->nspname, remoterel->relname)));
 
            attnum = AttrNumberGetAttrOffset(attnum);
 
@@ -371,7 +374,7 @@ static void
 logicalrep_typmap_invalidate_cb(Datum arg, int cacheid, uint32 hashvalue)
 {
    HASH_SEQ_STATUS status;
-   LogicalRepTyp  *entry;
+   LogicalRepTyp *entry;
 
    /* Just to be sure. */
    if (LogicalRepTypMap == NULL)
@@ -402,9 +405,9 @@ logicalrep_typmap_free_entry(LogicalRepTyp *entry)
 void
 logicalrep_typmap_update(LogicalRepTyp *remotetyp)
 {
-   MemoryContext       oldctx;
-   LogicalRepTyp      *entry;
-   bool                found;
+   MemoryContext oldctx;
+   LogicalRepTyp *entry;
+   bool        found;
 
    if (LogicalRepTypMap == NULL)
        logicalrep_relmap_init();
@@ -433,9 +436,9 @@ logicalrep_typmap_update(LogicalRepTyp *remotetyp)
 Oid
 logicalrep_typmap_getid(Oid remoteid)
 {
-   LogicalRepTyp      *entry;
-   bool                found;
-   Oid                 nspoid;
+   LogicalRepTyp *entry;
+   bool        found;
+   Oid         nspoid;
 
    /* Internal types are mapped directly. */
    if (remoteid < FirstNormalObjectId)
index 428d7aa55eb83ea6687e65d0e2c7ed316eb28293..8848f5b4ec14ee6f72e3bcdee23b873c991a2763 100644 (file)
@@ -59,7 +59,7 @@
  * by the following graph describing the SnapBuild->state transitions:
  *
  *        +-------------------------+
- *   +----|         START           |-------------+
+ *   +----|         START           |-------------+
  *   |    +-------------------------+             |
  *   |                 |                          |
  *   |                 |                          |
  *   |                 |                          |
  *   |                 v                          |
  *   |    +-------------------------+             v
- *   |    |   BUILDING_SNAPSHOT     |------------>|
+ *   |    |   BUILDING_SNAPSHOT     |------------>|
  *   |    +-------------------------+             |
  *   |                 |                          |
  *   |                 |                          |
- *   | running_xacts #2, xacts from #1 finished   |
+ *   | running_xacts #2, xacts from #1 finished   |
  *   |                 |                          |
  *   |                 |                          |
  *   |                 v                          |
  *   |    +-------------------------+             v
- *   |    |       FULL_SNAPSHOT     |------------>|
+ *   |    |       FULL_SNAPSHOT     |------------>|
  *   |    +-------------------------+             |
  *   |                 |                          |
  * running_xacts       |                      saved snapshot
  * with zero xacts     |                 at running_xacts's lsn
  *   |                 |                          |
- *   | running_xacts with xacts from #2 finished  |
+ *   | running_xacts with xacts from #2 finished  |
  *   |                 |                          |
  *   |                 v                          |
  *   |    +-------------------------+             |
@@ -209,9 +209,9 @@ struct SnapBuild
        TransactionId was_xmin;
        TransactionId was_xmax;
 
-       size_t      was_xcnt;       /* number of used xip entries */
-       size_t      was_xcnt_space; /* allocated size of xip */
-       TransactionId *was_xip;     /* running xacts array, xidComparator-sorted */
+       size_t      was_xcnt;   /* number of used xip entries */
+       size_t      was_xcnt_space;     /* allocated size of xip */
+       TransactionId *was_xip; /* running xacts array, xidComparator-sorted */
    }           was_running;
 
    /*
@@ -608,8 +608,8 @@ SnapBuildInitialSnapshot(SnapBuild *builder)
        {
            if (newxcnt >= GetMaxSnapshotXidCount())
                ereport(ERROR,
-                   (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-                    errmsg("initial slot snapshot too large")));
+                       (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+                        errmsg("initial slot snapshot too large")));
 
            newxip[newxcnt++] = xid;
        }
@@ -986,6 +986,7 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
            if (NormalTransactionIdFollows(subxid, xmax))
                xmax = subxid;
        }
+
        /*
         * If we're forcing timetravel we also need visibility information
         * about subtransaction, so keep track of subtransaction's state, even
@@ -1031,8 +1032,8 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
 
    /*
     * Adjust xmax of the snapshot builder, we only do that for committed,
-    * catalog modifying, transactions, everything else isn't interesting
-    * for us since we'll never look at the respective rows.
+    * catalog modifying, transactions, everything else isn't interesting for
+    * us since we'll never look at the respective rows.
     */
    if (needs_timetravel &&
        (!TransactionIdIsValid(builder->xmax) ||
@@ -1130,8 +1131,8 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact
         running->oldestRunningXid);
 
    /*
-    * Increase shared memory limits, so vacuum can work on tuples we prevented
-    * from being pruned till now.
+    * Increase shared memory limits, so vacuum can work on tuples we
+    * prevented from being pruned till now.
     */
    LogicalIncreaseXminForSlot(lsn, running->oldestRunningXid);
 
@@ -1202,11 +1203,11 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
     *    modifying transactions.
     *
     * c) First incrementally build a snapshot for catalog tuples
-    *    (BUILDING_SNAPSHOT), that requires all, already in-progress,
-    *    transactions to finish.  Every transaction starting after that
-    *    (FULL_SNAPSHOT state), has enough information to be decoded.  But
-    *    for older running transactions no viable snapshot exists yet, so
-    *    CONSISTENT will only be reached once all of those have finished.
+    *    (BUILDING_SNAPSHOT), that requires all, already in-progress,
+    *    transactions to finish.  Every transaction starting after that
+    *    (FULL_SNAPSHOT state), has enough information to be decoded.  But
+    *    for older running transactions no viable snapshot exists yet, so
+    *    CONSISTENT will only be reached once all of those have finished.
     * ---
     */
 
@@ -1271,6 +1272,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
        /* there won't be any state to cleanup */
        return false;
    }
+
    /*
     * c) transition from START to BUILDING_SNAPSHOT.
     *
@@ -1308,6 +1310,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
 
        SnapBuildWaitSnapshot(running, running->nextXid);
    }
+
    /*
     * c) transition from BUILDING_SNAPSHOT to FULL_SNAPSHOT.
     *
@@ -1324,13 +1327,14 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
        SnapBuildStartNextPhaseAt(builder, running->nextXid);
 
        ereport(LOG,
-               (errmsg("logical decoding found initial consistent point at %X/%X",
-                       (uint32) (lsn >> 32), (uint32) lsn),
-                errdetail("Waiting for transactions (approximately %d) older than %u to end.",
-                          running->xcnt, running->nextXid)));
+         (errmsg("logical decoding found initial consistent point at %X/%X",
+                 (uint32) (lsn >> 32), (uint32) lsn),
+          errdetail("Waiting for transactions (approximately %d) older than %u to end.",
+                    running->xcnt, running->nextXid)));
 
        SnapBuildWaitSnapshot(running, running->nextXid);
    }
+
    /*
     * c) transition from FULL_SNAPSHOT to CONSISTENT.
     *
@@ -1368,9 +1372,9 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
  *
  * This isn't required for the correctness of decoding, but to:
  * a) allow isolationtester to notice that we're currently waiting for
- *    something.
+ *   something.
  * b) log a new xl_running_xacts record where it'd be helpful, without having
- *    to write for bgwriter or checkpointer.
+ *   to write for bgwriter or checkpointer.
  * ---
  */
 static void
@@ -1383,9 +1387,9 @@ SnapBuildWaitSnapshot(xl_running_xacts *running, TransactionId cutoff)
        TransactionId xid = running->xids[off];
 
        /*
-        * Upper layers should prevent that we ever need to wait on
-        * ourselves. Check anyway, since failing to do so would either
-        * result in an endless wait or an Assert() failure.
+        * Upper layers should prevent that we ever need to wait on ourselves.
+        * Check anyway, since failing to do so would either result in an
+        * endless wait or an Assert() failure.
         */
        if (TransactionIdIsCurrentTransactionId(xid))
            elog(ERROR, "waiting for ourselves");
@@ -1864,8 +1868,9 @@ CheckPointSnapBuild(void)
    char        path[MAXPGPATH + 21];
 
    /*
-    * We start off with a minimum of the last redo pointer. No new replication
-    * slot will start before that, so that's a safe upper bound for removal.
+    * We start off with a minimum of the last redo pointer. No new
+    * replication slot will start before that, so that's a safe upper bound
+    * for removal.
     */
    redo = GetRedoRecPtr();
 
index 7e51076b376d9fbb9e3c0eb955aeef26afe04a88..1e3753b8fe2dfeab12f9a4b1bf749ebcdfaf8893 100644 (file)
@@ -113,7 +113,8 @@ StringInfo  copybuf = NULL;
 /*
  * Exit routine for synchronization worker.
  */
-static void pg_attribute_noreturn()
+static void
+pg_attribute_noreturn()
 finish_sync_worker(void)
 {
    /*
@@ -148,12 +149,12 @@ finish_sync_worker(void)
 static bool
 wait_for_sync_status_change(Oid relid, char origstate)
 {
-   int     rc;
-   char    state = origstate;
+   int         rc;
+   char        state = origstate;
 
    while (!got_SIGTERM)
    {
-       LogicalRepWorker   *worker;
+       LogicalRepWorker *worker;
 
        LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
        worker = logicalrep_worker_find(MyLogicalRepWorker->subid,
@@ -269,7 +270,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
    struct tablesync_start_time_mapping
    {
        Oid         relid;
-       TimestampTz last_start_time;
+       TimestampTz last_start_time;
    };
    static List *table_states = NIL;
    static HTAB *last_start_times = NULL;
@@ -281,9 +282,9 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
    /* We need up to date sync state info for subscription tables here. */
    if (!table_states_valid)
    {
-       MemoryContext   oldctx;
-       List           *rstates;
-       ListCell       *lc;
+       MemoryContext oldctx;
+       List       *rstates;
+       ListCell   *lc;
        SubscriptionRelState *rstate;
 
        /* Clean the old list. */
@@ -294,7 +295,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
        started_tx = true;
 
        /* Fetch all non-ready tables. */
-       rstates = GetSubscriptionNotReadyRelations(MySubscription->oid);
+       rstates = GetSubscriptionNotReadyRelations(MySubscription->oid);
 
        /* Allocate the tracking info in a permanent memory context. */
        oldctx = MemoryContextSwitchTo(CacheMemoryContext);
@@ -324,6 +325,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
        last_start_times = hash_create("Logical replication table sync worker start times",
                                       256, &ctl, HASH_ELEM | HASH_BLOBS);
    }
+
    /*
     * Clean up the hash table when we're done with all tables (just to
     * release the bit of memory).
@@ -337,14 +339,14 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
    /* Process all tables that are being synchronized. */
    foreach(lc, table_states)
    {
-       SubscriptionRelState *rstate = (SubscriptionRelState *)lfirst(lc);
+       SubscriptionRelState *rstate = (SubscriptionRelState *) lfirst(lc);
 
        if (rstate->state == SUBREL_STATE_SYNCDONE)
        {
            /*
-            * Apply has caught up to the position where the table sync
-            * has finished.  Time to mark the table as ready so that
-            * apply will just continue to replicate it normally.
+            * Apply has caught up to the position where the table sync has
+            * finished.  Time to mark the table as ready so that apply will
+            * just continue to replicate it normally.
             */
            if (current_lsn >= rstate->lsn)
            {
@@ -362,8 +364,8 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
        }
        else
        {
-           LogicalRepWorker   *syncworker;
-           int                 nsyncworkers = 0;
+           LogicalRepWorker *syncworker;
+           int         nsyncworkers = 0;
 
            LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
            syncworker = logicalrep_worker_find(MyLogicalRepWorker->subid,
@@ -376,6 +378,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                SpinLockRelease(&syncworker->relmutex);
            }
            else
+
                /*
                 * If no sync worker for this table yet, count running sync
                 * workers for this subscription, while we have the lock, for
@@ -394,16 +397,16 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                 * There are three possible synchronization situations here.
                 *
                 * a) Apply is in front of the table sync: We tell the table
-                *    sync to CATCHUP.
+                * sync to CATCHUP.
                 *
                 * b) Apply is behind the table sync: We tell the table sync
-                *    to mark the table as SYNCDONE and finish.
-
+                * to mark the table as SYNCDONE and finish.
+                *
                 * c) Apply and table sync are at the same position: We tell
-                *    table sync to mark the table as READY and finish.
+                * table sync to mark the table as READY and finish.
                 *
-                * In any case we'll need to wait for table sync to change
-                * the state in catalog and only then continue ourselves.
+                * In any case we'll need to wait for table sync to change the
+                * state in catalog and only then continue ourselves.
                 */
                if (current_lsn > rstate->lsn)
                {
@@ -427,20 +430,19 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                logicalrep_worker_wakeup_ptr(syncworker);
 
                /*
-                * Enter busy loop and wait for synchronization status
-                * change.
+                * Enter busy loop and wait for synchronization status change.
                 */
                wait_for_sync_status_change(rstate->relid, rstate->state);
            }
 
            /*
-            * If there is no sync worker registered for the table and
-            * there is some free sync worker slot, start new sync worker
-            * for the table.
+            * If there is no sync worker registered for the table and there
+            * is some free sync worker slot, start new sync worker for the
+            * table.
             */
            else if (!syncworker && nsyncworkers < max_sync_workers_per_subscription)
            {
-               TimestampTz now = GetCurrentTimestamp();
+               TimestampTz now = GetCurrentTimestamp();
                struct tablesync_start_time_mapping *hentry;
                bool        found;
 
@@ -492,7 +494,7 @@ make_copy_attnamelist(LogicalRepRelMapEntry *rel)
 
    for (i = 0; i < desc->natts; i++)
    {
-       int     remoteattnum = rel->attrmap[i];
+       int         remoteattnum = rel->attrmap[i];
 
        /* Skip dropped attributes. */
        if (desc->attrs[i]->attisdropped)
@@ -503,7 +505,7 @@ make_copy_attnamelist(LogicalRepRelMapEntry *rel)
            continue;
 
        attnamelist = lappend(attnamelist,
-                           makeString(rel->remoterel.attnames[remoteattnum]));
+                         makeString(rel->remoterel.attnames[remoteattnum]));
    }
 
    return attnamelist;
@@ -516,8 +518,8 @@ make_copy_attnamelist(LogicalRepRelMapEntry *rel)
 static int
 copy_read_data(void *outbuf, int minread, int maxread)
 {
-   int     bytesread = 0;
-   int     avail;
+   int         bytesread = 0;
+   int         avail;
 
    /* If there are some leftover data from previous read, use them. */
    avail = copybuf->len - copybuf->cursor;
@@ -601,13 +603,13 @@ static void
 fetch_remote_table_info(char *nspname, char *relname,
                        LogicalRepRelation *lrel)
 {
-   WalRcvExecResult   *res;
-   StringInfoData      cmd;
-   TupleTableSlot     *slot;
-   Oid                 tableRow[2] = {OIDOID, CHAROID};
-   Oid                 attrRow[4] = {TEXTOID, OIDOID, INT4OID, BOOLOID};
-   bool                isnull;
-   int                 natt;
+   WalRcvExecResult *res;
+   StringInfoData cmd;
+   TupleTableSlot *slot;
+   Oid         tableRow[2] = {OIDOID, CHAROID};
+   Oid         attrRow[4] = {TEXTOID, OIDOID, INT4OID, BOOLOID};
+   bool        isnull;
+   int         natt;
 
    lrel->nspname = nspname;
    lrel->relname = relname;
@@ -615,14 +617,14 @@ fetch_remote_table_info(char *nspname, char *relname,
    /* First fetch Oid and replica identity. */
    initStringInfo(&cmd);
    appendStringInfo(&cmd, "SELECT c.oid, c.relreplident"
-                          "  FROM pg_catalog.pg_class c"
-                          "  INNER JOIN pg_catalog.pg_namespace n"
-                          "        ON (c.relnamespace = n.oid)"
-                          " WHERE n.nspname = %s"
-                          "   AND c.relname = %s"
-                          "   AND c.relkind = 'r'",
-                          quote_literal_cstr(nspname),
-                          quote_literal_cstr(relname));
+                    "  FROM pg_catalog.pg_class c"
+                    "  INNER JOIN pg_catalog.pg_namespace n"
+                    "        ON (c.relnamespace = n.oid)"
+                    " WHERE n.nspname = %s"
+                    "   AND c.relname = %s"
+                    "   AND c.relkind = 'r'",
+                    quote_literal_cstr(nspname),
+                    quote_literal_cstr(relname));
    res = walrcv_exec(wrconn, cmd.data, 2, tableRow);
 
    if (res->status != WALRCV_OK_TUPLES)
@@ -653,7 +655,7 @@ fetch_remote_table_info(char *nspname, char *relname,
                     "       a.attnum = ANY(i.indkey)"
                     "  FROM pg_catalog.pg_attribute a"
                     "  LEFT JOIN pg_catalog.pg_index i"
-                    "       ON (i.indexrelid = pg_get_replica_identity_index(%u))"
+              "       ON (i.indexrelid = pg_get_replica_identity_index(%u))"
                     " WHERE a.attnum > 0::pg_catalog.int2"
                     "   AND NOT a.attisdropped"
                     "   AND a.attrelid = %u"
@@ -686,7 +688,7 @@ fetch_remote_table_info(char *nspname, char *relname,
        /* Should never happen. */
        if (++natt >= MaxTupleAttributeNumber)
            elog(ERROR, "too many columns in remote table \"%s.%s\"",
-                       nspname, relname);
+                nspname, relname);
 
        ExecClearTuple(slot);
    }
@@ -707,9 +709,9 @@ static void
 copy_table(Relation rel)
 {
    LogicalRepRelMapEntry *relmapentry;
-   LogicalRepRelation  lrel;
-   WalRcvExecResult   *res;
-   StringInfoData      cmd;
+   LogicalRepRelation lrel;
+   WalRcvExecResult *res;
+   StringInfoData cmd;
    CopyState   cstate;
    List       *attnamelist;
    ParseState *pstate;
@@ -759,8 +761,8 @@ copy_table(Relation rel)
 char *
 LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
 {
-   char           *slotname;
-   char           *err;
+   char       *slotname;
+   char       *err;
    char        relstate;
    XLogRecPtr  relstate_lsn;
 
@@ -783,7 +785,7 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
     * NAMEDATALEN on the remote that matters, but this scheme will also work
     * reasonably if that is different.)
     */
-   StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small"); /* for sanity */
+   StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small");       /* for sanity */
    slotname = psprintf("%.*s_%u_sync_%u",
                        NAMEDATALEN - 28,
                        MySubscription->slotname,
@@ -801,7 +803,7 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
        case SUBREL_STATE_DATASYNC:
            {
                Relation    rel;
-               WalRcvExecResult   *res;
+               WalRcvExecResult *res;
 
                SpinLockAcquire(&MyLogicalRepWorker->relmutex);
                MyLogicalRepWorker->relstate = SUBREL_STATE_DATASYNC;
@@ -818,24 +820,23 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
                pgstat_report_stat(false);
 
                /*
-                * We want to do the table data sync in single
-                * transaction.
+                * We want to do the table data sync in single transaction.
                 */
                StartTransactionCommand();
 
                /*
                 * Use standard write lock here. It might be better to
-                * disallow access to table while it's being synchronized.
-                * But we don't want to block the main apply process from
-                * working and it has to open relation in RowExclusiveLock
-                * when remapping remote relation id to local one.
+                * disallow access to table while it's being synchronized. But
+                * we don't want to block the main apply process from working
+                * and it has to open relation in RowExclusiveLock when
+                * remapping remote relation id to local one.
                 */
                rel = heap_open(MyLogicalRepWorker->relid, RowExclusiveLock);
 
                /*
-                * Create temporary slot for the sync process.
-                * We do this inside transaction so that we can use the
-                * snapshot made by the slot to get existing data.
+                * Create temporary slot for the sync process. We do this
+                * inside transaction so that we can use the snapshot made by
+                * the slot to get existing data.
                 */
                res = walrcv_exec(wrconn,
                                  "BEGIN READ ONLY ISOLATION LEVEL "
@@ -849,10 +850,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
                /*
                 * Create new temporary logical decoding slot.
                 *
-                * We'll use slot for data copy so make sure the snapshot
-                * is used for the transaction, that way the COPY will get
-                * data that is consistent with the lsn used by the slot
-                * to start decoding.
+                * We'll use slot for data copy so make sure the snapshot is
+                * used for the transaction, that way the COPY will get data
+                * that is consistent with the lsn used by the slot to start
+                * decoding.
                 */
                walrcv_create_slot(wrconn, slotname, true,
                                   CRS_USE_SNAPSHOT, origin_startpos);
@@ -872,8 +873,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
                CommandCounterIncrement();
 
                /*
-                * We are done with the initial data synchronization,
-                * update the state.
+                * We are done with the initial data synchronization, update
+                * the state.
                 */
                SpinLockAcquire(&MyLogicalRepWorker->relmutex);
                MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT;
@@ -881,8 +882,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
                SpinLockRelease(&MyLogicalRepWorker->relmutex);
 
                /*
-                * Wait for main apply worker to either tell us to
-                * catchup or that we are done.
+                * Wait for main apply worker to either tell us to catchup or
+                * that we are done.
                 */
                wait_for_sync_status_change(MyLogicalRepWorker->relid,
                                            MyLogicalRepWorker->relstate);
index 04813b506e149f9e032b1ffaee49c4300a85a69c..9d1eab9e1e67ec9ee98a727fa2e3376de6e29237 100644 (file)
 
 typedef struct FlushPosition
 {
-   dlist_node node;
-   XLogRecPtr local_end;
-   XLogRecPtr remote_end;
+   dlist_node  node;
+   XLogRecPtr  local_end;
+   XLogRecPtr  remote_end;
 } FlushPosition;
 
 static dlist_head lsn_mapping = DLIST_STATIC_INIT(lsn_mapping);
 
 typedef struct SlotErrCallbackArg
 {
-   LogicalRepRelation  *rel;
+   LogicalRepRelation *rel;
    int         attnum;
 } SlotErrCallbackArg;
 
-static MemoryContext   ApplyMessageContext = NULL;
-MemoryContext          ApplyContext = NULL;
+static MemoryContext ApplyMessageContext = NULL;
+MemoryContext ApplyContext = NULL;
 
-WalReceiverConn       *wrconn = NULL;
+WalReceiverConn *wrconn = NULL;
 
-Subscription      *MySubscription = NULL;
-bool               MySubscriptionValid = false;
+Subscription *MySubscription = NULL;
+bool       MySubscriptionValid = false;
 
-bool               in_remote_transaction = false;
-static XLogRecPtr  remote_final_lsn = InvalidXLogRecPtr;
+bool       in_remote_transaction = false;
+static XLogRecPtr remote_final_lsn = InvalidXLogRecPtr;
 
 static void send_feedback(XLogRecPtr recvpos, bool force, bool requestReply);
 
@@ -215,7 +215,7 @@ create_estate_for_relation(LogicalRepRelMapEntry *rel)
  */
 static void
 slot_fill_defaults(LogicalRepRelMapEntry *rel, EState *estate,
-                TupleTableSlot *slot)
+                  TupleTableSlot *slot)
 {
    TupleDesc   desc = RelationGetDescr(rel->localrel);
    int         num_phys_attrs = desc->natts;
@@ -271,9 +271,9 @@ slot_fill_defaults(LogicalRepRelMapEntry *rel, EState *estate,
 static void
 slot_store_error_callback(void *arg)
 {
-   SlotErrCallbackArg     *errarg = (SlotErrCallbackArg *) arg;
-   Oid     remotetypoid,
-           localtypoid;
+   SlotErrCallbackArg *errarg = (SlotErrCallbackArg *) arg;
+   Oid         remotetypoid,
+               localtypoid;
 
    if (errarg->attnum < 0)
        return;
@@ -295,12 +295,12 @@ slot_store_error_callback(void *arg)
  */
 static void
 slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
-                 char **values)
+                   char **values)
 {
-   int     natts = slot->tts_tupleDescriptor->natts;
-   int     i;
-   SlotErrCallbackArg      errarg;
-   ErrorContextCallback    errcallback;
+   int         natts = slot->tts_tupleDescriptor->natts;
+   int         i;
+   SlotErrCallbackArg errarg;
+   ErrorContextCallback errcallback;
 
    ExecClearTuple(slot);
 
@@ -315,14 +315,14 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
    /* Call the "in" function for each non-dropped attribute */
    for (i = 0; i < natts; i++)
    {
-       Form_pg_attribute   att = slot->tts_tupleDescriptor->attrs[i];
-       int                 remoteattnum = rel->attrmap[i];
+       Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i];
+       int         remoteattnum = rel->attrmap[i];
 
        if (!att->attisdropped && remoteattnum >= 0 &&
            values[remoteattnum] != NULL)
        {
-           Oid typinput;
-           Oid typioparam;
+           Oid         typinput;
+           Oid         typioparam;
 
            errarg.attnum = remoteattnum;
 
@@ -359,12 +359,12 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
  */
 static void
 slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
-                  char **values, bool *replaces)
+                    char **values, bool *replaces)
 {
-   int     natts = slot->tts_tupleDescriptor->natts;
-   int     i;
-   SlotErrCallbackArg      errarg;
-   ErrorContextCallback    errcallback;
+   int         natts = slot->tts_tupleDescriptor->natts;
+   int         i;
+   SlotErrCallbackArg errarg;
+   ErrorContextCallback errcallback;
 
    slot_getallattrs(slot);
    ExecClearTuple(slot);
@@ -380,16 +380,16 @@ slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
    /* Call the "in" function for each replaced attribute */
    for (i = 0; i < natts; i++)
    {
-       Form_pg_attribute   att = slot->tts_tupleDescriptor->attrs[i];
-       int                 remoteattnum = rel->attrmap[i];
+       Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i];
+       int         remoteattnum = rel->attrmap[i];
 
        if (remoteattnum >= 0 && !replaces[remoteattnum])
            continue;
 
        if (remoteattnum >= 0 && values[remoteattnum] != NULL)
        {
-           Oid typinput;
-           Oid typioparam;
+           Oid         typinput;
+           Oid         typioparam;
 
            errarg.attnum = remoteattnum;
 
@@ -418,7 +418,7 @@ slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
 static void
 apply_handle_begin(StringInfo s)
 {
-   LogicalRepBeginData begin_data;
+   LogicalRepBeginData begin_data;
 
    logicalrep_read_begin(s, &begin_data);
 
@@ -437,7 +437,7 @@ apply_handle_begin(StringInfo s)
 static void
 apply_handle_commit(StringInfo s)
 {
-   LogicalRepCommitData    commit_data;
+   LogicalRepCommitData commit_data;
 
    logicalrep_read_commit(s, &commit_data);
 
@@ -476,8 +476,8 @@ static void
 apply_handle_origin(StringInfo s)
 {
    /*
-    * ORIGIN message can only come inside remote transaction and before
-    * any actual writes.
+    * ORIGIN message can only come inside remote transaction and before any
+    * actual writes.
     */
    if (!in_remote_transaction ||
        (IsTransactionState() && !am_tablesync_worker()))
@@ -497,7 +497,7 @@ apply_handle_origin(StringInfo s)
 static void
 apply_handle_relation(StringInfo s)
 {
-   LogicalRepRelation  *rel;
+   LogicalRepRelation *rel;
 
    rel = logicalrep_read_rel(s);
    logicalrep_relmap_update(rel);
@@ -512,7 +512,7 @@ apply_handle_relation(StringInfo s)
 static void
 apply_handle_type(StringInfo s)
 {
-   LogicalRepTyp   typ;
+   LogicalRepTyp typ;
 
    logicalrep_read_typ(s, &typ);
    logicalrep_typmap_update(&typ);
@@ -526,7 +526,7 @@ apply_handle_type(StringInfo s)
 static Oid
 GetRelationIdentityOrPK(Relation rel)
 {
-   Oid idxoid;
+   Oid         idxoid;
 
    idxoid = RelationGetReplicaIndex(rel);
 
@@ -543,11 +543,11 @@ static void
 apply_handle_insert(StringInfo s)
 {
    LogicalRepRelMapEntry *rel;
-   LogicalRepTupleData newtup;
-   LogicalRepRelId     relid;
-   EState             *estate;
-   TupleTableSlot     *remoteslot;
-   MemoryContext       oldctx;
+   LogicalRepTupleData newtup;
+   LogicalRepRelId relid;
+   EState     *estate;
+   TupleTableSlot *remoteslot;
+   MemoryContext oldctx;
 
    ensure_transaction();
 
@@ -607,15 +607,15 @@ check_relation_updatable(LogicalRepRelMapEntry *rel)
        return;
 
    /*
-    * We are in error mode so it's fine this is somewhat slow.
-    * It's better to give user correct error.
+    * We are in error mode so it's fine this is somewhat slow. It's better to
+    * give user correct error.
     */
    if (OidIsValid(GetRelationIdentityOrPK(rel->localrel)))
    {
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                 errmsg("publisher does not send replica identity column "
-                       "expected by the logical replication target relation \"%s.%s\"",
+            "expected by the logical replication target relation \"%s.%s\"",
                        rel->remoterel.nspname, rel->remoterel.relname)));
    }
 
@@ -637,17 +637,17 @@ static void
 apply_handle_update(StringInfo s)
 {
    LogicalRepRelMapEntry *rel;
-   LogicalRepRelId     relid;
-   Oid                 idxoid;
-   EState             *estate;
-   EPQState            epqstate;
-   LogicalRepTupleData oldtup;
-   LogicalRepTupleData newtup;
-   bool                has_oldtup;
-   TupleTableSlot     *localslot;
-   TupleTableSlot     *remoteslot;
-   bool                found;
-   MemoryContext       oldctx;
+   LogicalRepRelId relid;
+   Oid         idxoid;
+   EState     *estate;
+   EPQState    epqstate;
+   LogicalRepTupleData oldtup;
+   LogicalRepTupleData newtup;
+   bool        has_oldtup;
+   TupleTableSlot *localslot;
+   TupleTableSlot *remoteslot;
+   bool        found;
+   MemoryContext oldctx;
 
    ensure_transaction();
 
@@ -685,8 +685,8 @@ apply_handle_update(StringInfo s)
    MemoryContextSwitchTo(oldctx);
 
    /*
-    * Try to find tuple using either replica identity index, primary key
-    * or if needed, sequential scan.
+    * Try to find tuple using either replica identity index, primary key or
+    * if needed, sequential scan.
     */
    idxoid = GetRelationIdentityOrPK(rel->localrel);
    Assert(OidIsValid(idxoid) ||
@@ -758,15 +758,15 @@ static void
 apply_handle_delete(StringInfo s)
 {
    LogicalRepRelMapEntry *rel;
-   LogicalRepTupleData oldtup;
-   LogicalRepRelId     relid;
-   Oid                 idxoid;
-   EState             *estate;
-   EPQState            epqstate;
-   TupleTableSlot     *remoteslot;
-   TupleTableSlot     *localslot;
-   bool                found;
-   MemoryContext       oldctx;
+   LogicalRepTupleData oldtup;
+   LogicalRepRelId relid;
+   Oid         idxoid;
+   EState     *estate;
+   EPQState    epqstate;
+   TupleTableSlot *remoteslot;
+   TupleTableSlot *localslot;
+   bool        found;
+   MemoryContext oldctx;
 
    ensure_transaction();
 
@@ -802,8 +802,8 @@ apply_handle_delete(StringInfo s)
    MemoryContextSwitchTo(oldctx);
 
    /*
-    * Try to find tuple using either replica identity index, primary key
-    * or if needed, sequential scan.
+    * Try to find tuple using either replica identity index, primary key or
+    * if needed, sequential scan.
     */
    idxoid = GetRelationIdentityOrPK(rel->localrel);
    Assert(OidIsValid(idxoid) ||
@@ -826,7 +826,7 @@ apply_handle_delete(StringInfo s)
    }
    else
    {
-       /* The tuple to be deleted could not be found.*/
+       /* The tuple to be deleted could not be found. */
        ereport(DEBUG1,
                (errmsg("logical replication could not find row for delete "
                        "in replication target %s",
@@ -856,46 +856,46 @@ apply_handle_delete(StringInfo s)
 static void
 apply_dispatch(StringInfo s)
 {
-   char action = pq_getmsgbyte(s);
+   char        action = pq_getmsgbyte(s);
 
    switch (action)
    {
-       /* BEGIN */
+           /* BEGIN */
        case 'B':
            apply_handle_begin(s);
            break;
-       /* COMMIT */
+           /* COMMIT */
        case 'C':
            apply_handle_commit(s);
            break;
-       /* INSERT */
+           /* INSERT */
        case 'I':
            apply_handle_insert(s);
            break;
-       /* UPDATE */
+           /* UPDATE */
        case 'U':
            apply_handle_update(s);
            break;
-       /* DELETE */
+           /* DELETE */
        case 'D':
            apply_handle_delete(s);
            break;
-       /* RELATION */
+           /* RELATION */
        case 'R':
            apply_handle_relation(s);
            break;
-       /* TYPE */
+           /* TYPE */
        case 'Y':
            apply_handle_type(s);
            break;
-       /* ORIGIN */
+           /* ORIGIN */
        case 'O':
            apply_handle_origin(s);
            break;
        default:
            ereport(ERROR,
                    (errcode(ERRCODE_PROTOCOL_VIOLATION),
-                    errmsg("invalid logical replication message type %c", action)));
+            errmsg("invalid logical replication message type %c", action)));
    }
 }
 
@@ -925,7 +925,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush,
    dlist_foreach_modify(iter, &lsn_mapping)
    {
        FlushPosition *pos =
-           dlist_container(FlushPosition, node, iter.cur);
+       dlist_container(FlushPosition, node, iter.cur);
 
        *write = pos->remote_end;
 
@@ -995,12 +995,12 @@ static void
 LogicalRepApplyLoop(XLogRecPtr last_received)
 {
    /*
-    * Init the ApplyMessageContext which we clean up after each
-    * replication protocol message.
+    * Init the ApplyMessageContext which we clean up after each replication
+    * protocol message.
     */
    ApplyMessageContext = AllocSetContextCreate(ApplyContext,
-                                        "ApplyMessageContext",
-                                        ALLOCSET_DEFAULT_SIZES);
+                                               "ApplyMessageContext",
+                                               ALLOCSET_DEFAULT_SIZES);
 
    /* mark as idle, before starting to loop */
    pgstat_report_activity(STATE_IDLE, NULL);
@@ -1039,7 +1039,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
                }
                else
                {
-                   int c;
+                   int         c;
                    StringInfoData s;
 
                    /* Reset timeout. */
@@ -1108,7 +1108,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
        {
            /*
             * If we didn't get any transactions for a while there might be
-            * unconsumed invalidation messages in the queue, consume them now.
+            * unconsumed invalidation messages in the queue, consume them
+            * now.
             */
            AcceptInvalidationMessages();
            if (!MySubscriptionValid)
@@ -1126,6 +1127,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
        if (endofstream)
        {
            TimeLineID  tli;
+
            walrcv_endstreaming(wrconn, &tli);
            break;
        }
@@ -1152,19 +1154,18 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
        if (rc & WL_TIMEOUT)
        {
            /*
-            * We didn't receive anything new. If we haven't heard
-            * anything from the server for more than
-            * wal_receiver_timeout / 2, ping the server. Also, if
-            * it's been longer than wal_receiver_status_interval
-            * since the last update we sent, send a status update to
-            * the master anyway, to report any progress in applying
-            * WAL.
+            * We didn't receive anything new. If we haven't heard anything
+            * from the server for more than wal_receiver_timeout / 2, ping
+            * the server. Also, if it's been longer than
+            * wal_receiver_status_interval since the last update we sent,
+            * send a status update to the master anyway, to report any
+            * progress in applying WAL.
             */
            bool        requestReply = false;
 
            /*
-            * Check if time since last receive from standby has
-            * reached the configured limit.
+            * Check if time since last receive from standby has reached the
+            * configured limit.
             */
            if (wal_receiver_timeout > 0)
            {
@@ -1180,13 +1181,13 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
                            (errmsg("terminating logical replication worker due to timeout")));
 
                /*
-                * We didn't receive anything new, for half of
-                * receiver replication timeout. Ping the server.
+                * We didn't receive anything new, for half of receiver
+                * replication timeout. Ping the server.
                 */
                if (!ping_sent)
                {
                    timeout = TimestampTzPlusMilliseconds(last_recv_timestamp,
-                                                         (wal_receiver_timeout / 2));
+                                                (wal_receiver_timeout / 2));
                    if (now >= timeout)
                    {
                        requestReply = true;
@@ -1211,17 +1212,17 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
 static void
 send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
 {
-   static StringInfo   reply_message = NULL;
-   static TimestampTz  send_time = 0;
+   static StringInfo reply_message = NULL;
+   static TimestampTz send_time = 0;
 
    static XLogRecPtr last_recvpos = InvalidXLogRecPtr;
    static XLogRecPtr last_writepos = InvalidXLogRecPtr;
    static XLogRecPtr last_flushpos = InvalidXLogRecPtr;
 
-   XLogRecPtr writepos;
-   XLogRecPtr flushpos;
+   XLogRecPtr  writepos;
+   XLogRecPtr  flushpos;
    TimestampTz now;
-   bool have_pending_txes;
+   bool        have_pending_txes;
 
    /*
     * If the user doesn't want status to be reported to the publisher, be
@@ -1237,8 +1238,8 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
    get_flush_position(&writepos, &flushpos, &have_pending_txes);
 
    /*
-    * No outstanding transactions to flush, we can report the latest
-    * received position. This is important for synchronous replication.
+    * No outstanding transactions to flush, we can report the latest received
+    * position. This is important for synchronous replication.
     */
    if (!have_pending_txes)
        flushpos = writepos = recvpos;
@@ -1262,7 +1263,8 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
 
    if (!reply_message)
    {
-       MemoryContext   oldctx = MemoryContextSwitchTo(ApplyContext);
+       MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext);
+
        reply_message = makeStringInfo();
        MemoryContextSwitchTo(oldctx);
    }
@@ -1273,7 +1275,7 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
    pq_sendint64(reply_message, recvpos);       /* write */
    pq_sendint64(reply_message, flushpos);      /* flush */
    pq_sendint64(reply_message, writepos);      /* apply */
-   pq_sendint64(reply_message, now);           /* sendTime */
+   pq_sendint64(reply_message, now);   /* sendTime */
    pq_sendbyte(reply_message, requestReply);   /* replyRequested */
 
    elog(DEBUG2, "sending feedback (force %d) to recv %X/%X, write %X/%X, flush %X/%X",
@@ -1300,9 +1302,9 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
 static void
 reread_subscription(void)
 {
-   MemoryContext   oldctx;
-   Subscription   *newsub;
-   bool            started_tx = false;
+   MemoryContext oldctx;
+   Subscription *newsub;
+   bool        started_tx = false;
 
    /* This function might be called inside or outside of transaction. */
    if (!IsTransactionState())
@@ -1317,47 +1319,45 @@ reread_subscription(void)
    newsub = GetSubscription(MyLogicalRepWorker->subid, true);
 
    /*
-    * Exit if the subscription was removed.
-    * This normally should not happen as the worker gets killed
-    * during DROP SUBSCRIPTION.
+    * Exit if the subscription was removed. This normally should not happen
+    * as the worker gets killed during DROP SUBSCRIPTION.
     */
    if (!newsub)
    {
        ereport(LOG,
-               (errmsg("logical replication worker for subscription \"%s\" will "
-                       "stop because the subscription was removed",
-                       MySubscription->name)));
+          (errmsg("logical replication worker for subscription \"%s\" will "
+                  "stop because the subscription was removed",
+                  MySubscription->name)));
 
        walrcv_disconnect(wrconn);
        proc_exit(0);
    }
 
    /*
-    * Exit if the subscription was disabled.
-    * This normally should not happen as the worker gets killed
-    * during ALTER SUBSCRIPTION ... DISABLE.
+    * Exit if the subscription was disabled. This normally should not happen
+    * as the worker gets killed during ALTER SUBSCRIPTION ... DISABLE.
     */
    if (!newsub->enabled)
    {
        ereport(LOG,
-               (errmsg("logical replication worker for subscription \"%s\" will "
-                       "stop because the subscription was disabled",
-                       MySubscription->name)));
+          (errmsg("logical replication worker for subscription \"%s\" will "
+                  "stop because the subscription was disabled",
+                  MySubscription->name)));
 
        walrcv_disconnect(wrconn);
        proc_exit(0);
    }
 
    /*
-    * Exit if connection string was changed. The launcher will start
-    * new worker.
+    * Exit if connection string was changed. The launcher will start new
+    * worker.
     */
    if (strcmp(newsub->conninfo, MySubscription->conninfo) != 0)
    {
        ereport(LOG,
-               (errmsg("logical replication worker for subscription \"%s\" will "
-                       "restart because the connection information was changed",
-                       MySubscription->name)));
+          (errmsg("logical replication worker for subscription \"%s\" will "
+                  "restart because the connection information was changed",
+                  MySubscription->name)));
 
        walrcv_disconnect(wrconn);
        proc_exit(0);
@@ -1370,9 +1370,9 @@ reread_subscription(void)
    if (strcmp(newsub->name, MySubscription->name) != 0)
    {
        ereport(LOG,
-               (errmsg("logical replication worker for subscription \"%s\" will "
-                       "restart because subscription was renamed",
-                       MySubscription->name)));
+          (errmsg("logical replication worker for subscription \"%s\" will "
+                  "restart because subscription was renamed",
+                  MySubscription->name)));
 
        walrcv_disconnect(wrconn);
        proc_exit(0);
@@ -1382,30 +1382,30 @@ reread_subscription(void)
    Assert(newsub->slotname);
 
    /*
-    * We need to make new connection to new slot if slot name has changed
-    * so exit here as well if that's the case.
+    * We need to make new connection to new slot if slot name has changed so
+    * exit here as well if that's the case.
     */
    if (strcmp(newsub->slotname, MySubscription->slotname) != 0)
    {
        ereport(LOG,
-               (errmsg("logical replication worker for subscription \"%s\" will "
-                       "restart because the replication slot name was changed",
-                       MySubscription->name)));
+          (errmsg("logical replication worker for subscription \"%s\" will "
+                  "restart because the replication slot name was changed",
+                  MySubscription->name)));
 
        walrcv_disconnect(wrconn);
        proc_exit(0);
    }
 
    /*
-    * Exit if publication list was changed. The launcher will start
-    * new worker.
+    * Exit if publication list was changed. The launcher will start new
+    * worker.
     */
    if (!equal(newsub->publications, MySubscription->publications))
    {
        ereport(LOG,
-               (errmsg("logical replication worker for subscription \"%s\" will "
-                       "restart because subscription's publications were changed",
-                       MySubscription->name)));
+          (errmsg("logical replication worker for subscription \"%s\" will "
+                  "restart because subscription's publications were changed",
+                  MySubscription->name)));
 
        walrcv_disconnect(wrconn);
        proc_exit(0);
@@ -1448,11 +1448,11 @@ subscription_change_cb(Datum arg, int cacheid, uint32 hashvalue)
 void
 ApplyWorkerMain(Datum main_arg)
 {
-   int             worker_slot = DatumGetInt32(main_arg);
-   MemoryContext   oldctx;
-   char            originname[NAMEDATALEN];
-   XLogRecPtr      origin_startpos;
-   char           *myslotname;
+   int         worker_slot = DatumGetInt32(main_arg);
+   MemoryContext oldctx;
+   char        originname[NAMEDATALEN];
+   XLogRecPtr  origin_startpos;
+   char       *myslotname;
    WalRcvStreamOptions options;
 
    /* Attach to slot */
@@ -1488,8 +1488,8 @@ ApplyWorkerMain(Datum main_arg)
 
    /* Load the subscription into persistent memory context. */
    ApplyContext = AllocSetContextCreate(TopMemoryContext,
-                                             "ApplyContext",
-                                             ALLOCSET_DEFAULT_SIZES);
+                                        "ApplyContext",
+                                        ALLOCSET_DEFAULT_SIZES);
    StartTransactionCommand();
    oldctx = MemoryContextSwitchTo(ApplyContext);
    MySubscription = GetSubscription(MyLogicalRepWorker->subid, false);
@@ -1503,9 +1503,9 @@ ApplyWorkerMain(Datum main_arg)
    if (!MySubscription->enabled)
    {
        ereport(LOG,
-               (errmsg("logical replication worker for subscription \"%s\" will not "
-                       "start because the subscription was disabled during startup",
-                       MySubscription->name)));
+       (errmsg("logical replication worker for subscription \"%s\" will not "
+               "start because the subscription was disabled during startup",
+               MySubscription->name)));
 
        proc_exit(0);
    }
@@ -1530,7 +1530,7 @@ ApplyWorkerMain(Datum main_arg)
 
    if (am_tablesync_worker())
    {
-       char *syncslotname;
+       char       *syncslotname;
 
        /* This is table synchroniation worker, call initial sync. */
        syncslotname = LogicalRepSyncTableStart(&origin_startpos);
@@ -1545,10 +1545,10 @@ ApplyWorkerMain(Datum main_arg)
    else
    {
        /* This is main apply worker */
-       RepOriginId     originid;
-       TimeLineID      startpointTLI;
-       char           *err;
-       int             server_version;
+       RepOriginId originid;
+       TimeLineID  startpointTLI;
+       char       *err;
+       int         server_version;
 
        myslotname = MySubscription->slotname;
 
@@ -1570,9 +1570,8 @@ ApplyWorkerMain(Datum main_arg)
                    (errmsg("could not connect to the publisher: %s", err)));
 
        /*
-        * We don't really use the output identify_system for anything
-        * but it does some initializations on the upstream so let's still
-        * call it.
+        * We don't really use the output identify_system for anything but it
+        * does some initializations on the upstream so let's still call it.
         */
        (void) walrcv_identify_system(wrconn, &startpointTLI,
                                      &server_version);
@@ -1580,8 +1579,8 @@ ApplyWorkerMain(Datum main_arg)
    }
 
    /*
-    * Setup callback for syscache so that we know when something
-    * changes in the subscription relation state.
+    * Setup callback for syscache so that we know when something changes in
+    * the subscription relation state.
     */
    CacheRegisterSyscacheCallback(SUBSCRIPTIONRELMAP,
                                  invalidate_syncing_table_states,
index 694f351dd8ea7701512a0f5a8cfa6c75fdcd74b7..5bdfa60ae74044e4ad186c29fcc7d34c25595278 100644 (file)
@@ -29,31 +29,31 @@ PG_MODULE_MAGIC;
 
 extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
 
-static void pgoutput_startup(LogicalDecodingContext * ctx,
-                             OutputPluginOptions *opt, bool is_init);
-static void pgoutput_shutdown(LogicalDecodingContext * ctx);
+static void pgoutput_startup(LogicalDecodingContext *ctx,
+                OutputPluginOptions *opt, bool is_init);
+static void pgoutput_shutdown(LogicalDecodingContext *ctx);
 static void pgoutput_begin_txn(LogicalDecodingContext *ctx,
-                   ReorderBufferTXN *txn);
+                  ReorderBufferTXN *txn);
 static void pgoutput_commit_txn(LogicalDecodingContext *ctx,
-                    ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
+                   ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
 static void pgoutput_change(LogicalDecodingContext *ctx,
-                ReorderBufferTXN *txn, Relation rel,
-                ReorderBufferChange *change);
+               ReorderBufferTXN *txn, Relation rel,
+               ReorderBufferChange *change);
 static bool pgoutput_origin_filter(LogicalDecodingContext *ctx,
-                       RepOriginId origin_id);
+                      RepOriginId origin_id);
 
 static bool publications_valid;
 
 static List *LoadPublications(List *pubnames);
 static void publication_invalidation_cb(Datum arg, int cacheid,
-                                       uint32 hashvalue);
+                           uint32 hashvalue);
 
 /* Entry in the map used to remember which relation schemas we sent. */
 typedef struct RelationSyncEntry
 {
-   Oid     relid;          /* relation oid */
-   bool    schema_sent;    /* did we send the schema? */
-   bool    replicate_valid;
+   Oid         relid;          /* relation oid */
+   bool        schema_sent;    /* did we send the schema? */
+   bool        replicate_valid;
    PublicationActions pubactions;
 } RelationSyncEntry;
 
@@ -64,7 +64,7 @@ static void init_rel_sync_cache(MemoryContext decoding_context);
 static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data, Oid relid);
 static void rel_sync_cache_relation_cb(Datum arg, Oid relid);
 static void rel_sync_cache_publication_cb(Datum arg, int cacheid,
-                                         uint32 hashvalue);
+                             uint32 hashvalue);
 
 /*
  * Specify output plugin callbacks
@@ -130,9 +130,9 @@ parse_output_parameters(List *options, uint32 *protocol_version,
 
            if (!SplitIdentifierString(strVal(defel->arg), ',',
                                       publication_names))
-                   ereport(ERROR,
-                           (errcode(ERRCODE_INVALID_NAME),
-                            errmsg("invalid publication_names syntax")));
+               ereport(ERROR,
+                       (errcode(ERRCODE_INVALID_NAME),
+                        errmsg("invalid publication_names syntax")));
        }
        else
            elog(ERROR, "unrecognized pgoutput option: %s", defel->defname);
@@ -143,14 +143,14 @@ parse_output_parameters(List *options, uint32 *protocol_version,
  * Initialize this plugin
  */
 static void
-pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
-                 bool is_init)
+pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
+                bool is_init)
 {
-   PGOutputData   *data = palloc0(sizeof(PGOutputData));
+   PGOutputData *data = palloc0(sizeof(PGOutputData));
 
    /* Create our memory context for private allocations. */
    data->context = AllocSetContextCreate(ctx->context,
-                                         "logical replication output context",
+                                       "logical replication output context",
                                          ALLOCSET_DEFAULT_MINSIZE,
                                          ALLOCSET_DEFAULT_INITSIZE,
                                          ALLOCSET_DEFAULT_MAXSIZE);
@@ -175,15 +175,15 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
        /* Check if we support requested protocol */
        if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM)
            ereport(ERROR,
-               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                errmsg("client sent proto_version=%d but we only support protocol %d or lower",
+                   (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                    errmsg("client sent proto_version=%d but we only support protocol %d or lower",
                     data->protocol_version, LOGICALREP_PROTO_VERSION_NUM)));
 
        if (data->protocol_version < LOGICALREP_PROTO_MIN_VERSION_NUM)
            ereport(ERROR,
-               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                errmsg("client sent proto_version=%d but we only support protocol %d or higher",
-                  data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM)));
+                   (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                    errmsg("client sent proto_version=%d but we only support protocol %d or higher",
+                data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM)));
 
        if (list_length(data->publication_names) < 1)
            ereport(ERROR,
@@ -208,14 +208,14 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
 static void
 pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
 {
-   bool    send_replication_origin = txn->origin_id != InvalidRepOriginId;
+   bool        send_replication_origin = txn->origin_id != InvalidRepOriginId;
 
    OutputPluginPrepareWrite(ctx, !send_replication_origin);
    logicalrep_write_begin(ctx->out, txn);
 
    if (send_replication_origin)
    {
-       char *origin;
+       char       *origin;
 
        /* Message boundary */
        OutputPluginWrite(ctx, false);
@@ -225,10 +225,10 @@ pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
         * XXX: which behaviour do we want here?
         *
         * Alternatives:
-        *  - don't send origin message if origin name not found
-        *    (that's what we do now)
-        *  - throw error - that will break replication, not good
-        *  - send some special "unknown" origin
+        *  - don't send origin message if origin name not found
+        *    (that's what we do now)
+        *  - throw error - that will break replication, not good
+        *  - send some special "unknown" origin
         *----------
         */
        if (replorigin_by_oid(txn->origin_id, true, &origin))
@@ -243,7 +243,7 @@ pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
  */
 static void
 pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
-                    XLogRecPtr commit_lsn)
+                   XLogRecPtr commit_lsn)
 {
    OutputPluginUpdateProgress(ctx);
 
@@ -259,9 +259,9 @@ static void
 pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
                Relation relation, ReorderBufferChange *change)
 {
-   PGOutputData       *data = (PGOutputData *) ctx->output_plugin_private;
-   MemoryContext       old;
-   RelationSyncEntry  *relentry;
+   PGOutputData *data = (PGOutputData *) ctx->output_plugin_private;
+   MemoryContext old;
+   RelationSyncEntry *relentry;
 
    relentry = get_rel_sync_entry(data, RelationGetRelid(relation));
 
@@ -333,8 +333,8 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
            break;
        case REORDER_BUFFER_CHANGE_UPDATE:
            {
-               HeapTuple oldtuple = change->data.tp.oldtuple ?
-                   &change->data.tp.oldtuple->tuple : NULL;
+               HeapTuple   oldtuple = change->data.tp.oldtuple ?
+               &change->data.tp.oldtuple->tuple : NULL;
 
                OutputPluginPrepareWrite(ctx, true);
                logicalrep_write_update(ctx->out, relation, oldtuple,
@@ -367,7 +367,7 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
  */
 static bool
 pgoutput_origin_filter(LogicalDecodingContext *ctx,
-                       RepOriginId origin_id)
+                      RepOriginId origin_id)
 {
    return false;
 }
@@ -379,7 +379,7 @@ pgoutput_origin_filter(LogicalDecodingContext *ctx,
  * of the ctx->context so it will be cleaned up by logical decoding machinery.
  */
 static void
-pgoutput_shutdown(LogicalDecodingContext * ctx)
+pgoutput_shutdown(LogicalDecodingContext *ctx)
 {
    if (RelationSyncCache)
    {
@@ -397,10 +397,10 @@ LoadPublications(List *pubnames)
    List       *result = NIL;
    ListCell   *lc;
 
-   foreach (lc, pubnames)
+   foreach(lc, pubnames)
    {
-       char           *pubname = (char *) lfirst(lc);
-       Publication    *pub = GetPublicationByName(pubname, false);
+       char       *pubname = (char *) lfirst(lc);
+       Publication *pub = GetPublicationByName(pubname, false);
 
        result = lappend(result, pub);
    }
@@ -417,9 +417,8 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
    publications_valid = false;
 
    /*
-    * Also invalidate per-relation cache so that next time the filtering
-    * info is checked it will be updated with the new publication
-    * settings.
+    * Also invalidate per-relation cache so that next time the filtering info
+    * is checked it will be updated with the new publication settings.
     */
    rel_sync_cache_publication_cb(arg, cacheid, hashvalue);
 }
@@ -434,7 +433,7 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
 static void
 init_rel_sync_cache(MemoryContext cachectx)
 {
-   HASHCTL ctl;
+   HASHCTL     ctl;
    MemoryContext old_ctxt;
 
    if (RelationSyncCache != NULL)
@@ -466,9 +465,9 @@ init_rel_sync_cache(MemoryContext cachectx)
 static RelationSyncEntry *
 get_rel_sync_entry(PGOutputData *data, Oid relid)
 {
-   RelationSyncEntry  *entry;
-   bool                found;
-   MemoryContext       oldctx;
+   RelationSyncEntry *entry;
+   bool        found;
+   MemoryContext oldctx;
 
    Assert(RelationSyncCache != NULL);
 
@@ -499,9 +498,9 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
        }
 
        /*
-        * Build publication cache. We can't use one provided by relcache
-        * as relcache considers all publications given relation is in, but
-        * here we only need to consider ones that the subscriber requested.
+        * Build publication cache. We can't use one provided by relcache as
+        * relcache considers all publications given relation is in, but here
+        * we only need to consider ones that the subscriber requested.
         */
        entry->pubactions.pubinsert = entry->pubactions.pubupdate =
            entry->pubactions.pubdelete = false;
@@ -539,7 +538,7 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
 static void
 rel_sync_cache_relation_cb(Datum arg, Oid relid)
 {
-   RelationSyncEntry  *entry;
+   RelationSyncEntry *entry;
 
    /*
     * We can get here if the plugin was used in SQL interface as the
@@ -558,15 +557,14 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
     * safe point.
     *
     * Getting invalidations for relations that aren't in the table is
-    * entirely normal, since there's no way to unregister for an
-    * invalidation event. So we don't care if it's found or not.
+    * entirely normal, since there's no way to unregister for an invalidation
+    * event. So we don't care if it's found or not.
     */
    entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
                                              HASH_FIND, NULL);
 
    /*
-    * Reset schema sent status as the relation definition may have
-    * changed.
+    * Reset schema sent status as the relation definition may have changed.
     */
    if (entry != NULL)
        entry->schema_sent = false;
@@ -578,8 +576,8 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
 static void
 rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
 {
-   HASH_SEQ_STATUS     status;
-   RelationSyncEntry  *entry;
+   HASH_SEQ_STATUS status;
+   RelationSyncEntry *entry;
 
    /*
     * We can get here if the plugin was used in SQL interface as the
@@ -590,8 +588,8 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
        return;
 
    /*
-    * There is no way to find which entry in our cache the hash belongs to
-    * so mark the whole cache as invalid.
+    * There is no way to find which entry in our cache the hash belongs to so
+    * mark the whole cache as invalid.
     */
    hash_seq_init(&status, RelationSyncCache);
    while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
index 5f63d0484a08ea2cab90eb6ca19aa2fd1988ca89..5386e86aa6b3b6d9afd1bd5ad92d979736a09c12 100644 (file)
@@ -502,8 +502,8 @@ ReplicationSlotDropPtr(ReplicationSlot *slot)
    /*
     * Rename the slot directory on disk, so that we'll no longer recognize
     * this as a valid slot.  Note that if this fails, we've got to mark the
-    * slot inactive before bailing out.  If we're dropping an ephemeral or
-    * temporary slot, we better never fail hard as the caller won't expect
+    * slot inactive before bailing out.  If we're dropping an ephemeral or a
+    * temporary slot, we better never fail hard as the caller won't expect
     * the slot to survive and this might get called during error handling.
     */
    if (rename(path, tmppath) == 0)
@@ -839,8 +839,8 @@ restart:
    for (i = 0; i < max_replication_slots; i++)
    {
        ReplicationSlot *s;
-       char *slotname;
-       int active_pid;
+       char       *slotname;
+       int         active_pid;
 
        s = &ReplicationSlotCtl->replication_slots[i];
 
index 56a9ca965172e17df5600780201759f9585da50f..bbd26f3d6a3ddb0d3acd8762be2965d9b61ccbd4 100644 (file)
@@ -119,11 +119,11 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
 
    /*
     * Acquire a logical decoding slot, this will check for conflicting names.
-    * Initially create persistent slot as ephemeral - that allows us to nicely
-    * handle errors during initialization because it'll get dropped if this
-    * transaction fails. We'll make it persistent at the end.
-    * Temporary slots can be created as temporary from beginning as they get
-    * dropped on error as well.
+    * Initially create persistent slot as ephemeral - that allows us to
+    * nicely handle errors during initialization because it'll get dropped if
+    * this transaction fails. We'll make it persistent at the end. Temporary
+    * slots can be created as temporary from beginning as they get dropped on
+    * error as well.
     */
    ReplicationSlotCreate(NameStr(*name), true,
                          temporary ? RS_TEMPORARY : RS_EPHEMERAL);
@@ -132,7 +132,7 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
     * Create logical decoding context, to build the initial snapshot.
     */
    ctx = CreateInitDecodingContext(NameStr(*plugin), NIL,
-                                   false, /* do not build snapshot */
+                                   false,      /* do not build snapshot */
                                    logical_read_local_xlog_page, NULL, NULL,
                                    NULL);
 
@@ -227,7 +227,7 @@ pg_get_replication_slots(PG_FUNCTION_ARGS)
        Datum       values[PG_GET_REPLICATION_SLOTS_COLS];
        bool        nulls[PG_GET_REPLICATION_SLOTS_COLS];
 
-       ReplicationSlotPersistency  persistency;
+       ReplicationSlotPersistency persistency;
        TransactionId xmin;
        TransactionId catalog_xmin;
        XLogRecPtr  restart_lsn;
index 554f783209635167b1089c3b73fda6f23a0400bb..ad213fc454a6ae54caba86c23f42cd7f66fe88ce 100644 (file)
@@ -102,17 +102,17 @@ static void SyncRepCancelWait(void);
 static int SyncRepWakeQueue(bool all, int mode);
 
 static bool SyncRepGetSyncRecPtr(XLogRecPtr *writePtr,
-                                XLogRecPtr *flushPtr,
-                                XLogRecPtr *applyPtr,
-                                bool *am_sync);
+                    XLogRecPtr *flushPtr,
+                    XLogRecPtr *applyPtr,
+                    bool *am_sync);
 static void SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr,
-                                      XLogRecPtr *flushPtr,
-                                      XLogRecPtr *applyPtr,
-                                      List *sync_standbys);
+                          XLogRecPtr *flushPtr,
+                          XLogRecPtr *applyPtr,
+                          List *sync_standbys);
 static void SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr,
-                                         XLogRecPtr *flushPtr,
-                                         XLogRecPtr *applyPtr,
-                                         List *sync_standbys, uint8 nth);
+                             XLogRecPtr *flushPtr,
+                             XLogRecPtr *applyPtr,
+                             List *sync_standbys, uint8 nth);
 static int SyncRepGetStandbyPriority(void);
 static List *SyncRepGetSyncStandbysPriority(bool *am_sync);
 static List *SyncRepGetSyncStandbysQuorum(bool *am_sync);
@@ -455,7 +455,7 @@ SyncRepReleaseWaiters(void)
        if (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY)
            ereport(LOG,
                    (errmsg("standby \"%s\" is now a synchronous standby with priority %u",
-                           application_name, MyWalSnd->sync_standby_priority)));
+                       application_name, MyWalSnd->sync_standby_priority)));
        else
            ereport(LOG,
                    (errmsg("standby \"%s\" is now a candidate for quorum synchronous standby",
@@ -513,7 +513,7 @@ SyncRepReleaseWaiters(void)
  */
 static bool
 SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
-                          XLogRecPtr *applyPtr, bool *am_sync)
+                    XLogRecPtr *applyPtr, bool *am_sync)
 {
    List       *sync_standbys;
 
@@ -542,9 +542,9 @@ SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
     * oldest ones among sync standbys. In a quorum-based, they are the Nth
     * latest ones.
     *
-    * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest positions.
-    * But we use SyncRepGetOldestSyncRecPtr() for that calculation because
-    * it's a bit more efficient.
+    * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest
+    * positions. But we use SyncRepGetOldestSyncRecPtr() for that calculation
+    * because it's a bit more efficient.
     *
     * XXX If the numbers of current and requested sync standbys are the same,
     * we can use SyncRepGetOldestSyncRecPtr() to calculate the synced
@@ -572,15 +572,15 @@ static void
 SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
                           XLogRecPtr *applyPtr, List *sync_standbys)
 {
-   ListCell    *cell;
+   ListCell   *cell;
 
    /*
-    * Scan through all sync standbys and calculate the oldest
-    * Write, Flush and Apply positions.
+    * Scan through all sync standbys and calculate the oldest Write, Flush
+    * and Apply positions.
     */
-   foreach (cell, sync_standbys)
+   foreach(cell, sync_standbys)
    {
-       WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
+       WalSnd     *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
        XLogRecPtr  write;
        XLogRecPtr  flush;
        XLogRecPtr  apply;
@@ -606,23 +606,23 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
  */
 static void
 SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
-                         XLogRecPtr *applyPtr, List *sync_standbys, uint8 nth)
+                       XLogRecPtr *applyPtr, List *sync_standbys, uint8 nth)
 {
-   ListCell    *cell;
-   XLogRecPtr  *write_array;
-   XLogRecPtr  *flush_array;
-   XLogRecPtr  *apply_array;
-   int len;
-   int i = 0;
+   ListCell   *cell;
+   XLogRecPtr *write_array;
+   XLogRecPtr *flush_array;
+   XLogRecPtr *apply_array;
+   int         len;
+   int         i = 0;
 
    len = list_length(sync_standbys);
    write_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
    flush_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
    apply_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
 
-   foreach (cell, sync_standbys)
+   foreach(cell, sync_standbys)
    {
-       WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
+       WalSnd     *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
 
        SpinLockAcquire(&walsnd->mutex);
        write_array[i] = walsnd->write;
@@ -654,8 +654,8 @@ SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
 static int
 cmp_lsn(const void *a, const void *b)
 {
-   XLogRecPtr lsn1 = *((const XLogRecPtr *) a);
-   XLogRecPtr lsn2 = *((const XLogRecPtr *) b);
+   XLogRecPtr  lsn1 = *((const XLogRecPtr *) a);
+   XLogRecPtr  lsn2 = *((const XLogRecPtr *) b);
 
    if (lsn1 > lsn2)
        return -1;
@@ -674,7 +674,7 @@ cmp_lsn(const void *a, const void *b)
  * sync standby. Otherwise it's set to false.
  */
 List *
-SyncRepGetSyncStandbys(bool    *am_sync)
+SyncRepGetSyncStandbys(bool *am_sync)
 {
    /* Set default result */
    if (am_sync != NULL)
@@ -702,8 +702,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
 static List *
 SyncRepGetSyncStandbysQuorum(bool *am_sync)
 {
-   List    *result = NIL;
-   int i;
+   List       *result = NIL;
+   int         i;
    volatile WalSnd *walsnd;    /* Use volatile pointer to prevent code
                                 * rearrangement */
 
@@ -730,8 +730,8 @@ SyncRepGetSyncStandbysQuorum(bool *am_sync)
            continue;
 
        /*
-        * Consider this standby as a candidate for quorum sync standbys
-        * and append it to the result.
+        * Consider this standby as a candidate for quorum sync standbys and
+        * append it to the result.
         */
        result = lappend_int(result, i);
        if (am_sync != NULL && walsnd == MyWalSnd)
@@ -955,8 +955,8 @@ SyncRepGetStandbyPriority(void)
        return 0;
 
    /*
-    * In quorum-based sync replication, all the standbys in the list
-    * have the same priority, one.
+    * In quorum-based sync replication, all the standbys in the list have the
+    * same priority, one.
     */
    return (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY) ? priority : 1;
 }
index 028170c9529c76674e06bd8d07f8d95963d4b606..272361271814514f5003a03f7d9bf8a25e4f6d1f 100644 (file)
@@ -1176,9 +1176,12 @@ XLogWalRcvSendHSFeedback(bool immed)
 {
    TimestampTz now;
    TransactionId nextXid;
-   uint32      xmin_epoch, catalog_xmin_epoch;
-   TransactionId xmin, catalog_xmin;
+   uint32      xmin_epoch,
+               catalog_xmin_epoch;
+   TransactionId xmin,
+               catalog_xmin;
    static TimestampTz sendTime = 0;
+
    /* initially true so we always send at least one feedback message */
    static bool master_has_standby_xmin = true;
 
@@ -1211,8 +1214,8 @@ XLogWalRcvSendHSFeedback(bool immed)
     *
     * Bailing out here also ensures that we don't send feedback until we've
     * read our own replication slot state, so we don't tell the master to
-    * discard needed xmin or catalog_xmin from any slots that may exist
-    * on this replica.
+    * discard needed xmin or catalog_xmin from any slots that may exist on
+    * this replica.
     */
    if (!HotStandbyActive())
        return;
@@ -1232,7 +1235,7 @@ XLogWalRcvSendHSFeedback(bool immed)
         * excludes the catalog_xmin.
         */
        xmin = GetOldestXmin(NULL,
-                            PROCARRAY_FLAGS_DEFAULT|PROCARRAY_SLOTS_XMIN);
+                            PROCARRAY_FLAGS_DEFAULT | PROCARRAY_SLOTS_XMIN);
 
        ProcArrayGetReplicationSlotXmin(&slot_xmin, &catalog_xmin);
 
@@ -1253,9 +1256,9 @@ XLogWalRcvSendHSFeedback(bool immed)
    GetNextXidAndEpoch(&nextXid, &xmin_epoch);
    catalog_xmin_epoch = xmin_epoch;
    if (nextXid < xmin)
-       xmin_epoch --;
+       xmin_epoch--;
    if (nextXid < catalog_xmin)
-       catalog_xmin_epoch --;
+       catalog_xmin_epoch--;
 
    elog(DEBUG2, "sending hot standby feedback xmin %u epoch %u catalog_xmin %u catalog_xmin_epoch %u",
         xmin, xmin_epoch, catalog_xmin, catalog_xmin_epoch);
index a899841d835fa996c834a94b3638f0778422b100..49cce388806378d9eaf5885997ef1495b2b514cb 100644 (file)
@@ -197,7 +197,7 @@ static XLogRecPtr logical_startptr = InvalidXLogRecPtr;
 /* A sample associating a WAL location with the time it was written. */
 typedef struct
 {
-   XLogRecPtr lsn;
+   XLogRecPtr  lsn;
    TimestampTz time;
 } WalTimeSample;
 
@@ -207,12 +207,12 @@ typedef struct
 /* A mechanism for tracking replication lag. */
 static struct
 {
-   XLogRecPtr last_lsn;
+   XLogRecPtr  last_lsn;
    WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE];
-   int write_head;
-   int read_heads[NUM_SYNC_REP_WAIT_MODE];
+   int         write_head;
+   int         read_heads[NUM_SYNC_REP_WAIT_MODE];
    WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
-} LagTracker;
+}  LagTracker;
 
 /* Signal handlers */
 static void WalSndSigHupHandler(SIGNAL_ARGS);
@@ -530,7 +530,7 @@ StartReplication(StartReplicationCmd *cmd)
    if (ThisTimeLineID == 0)
        ereport(ERROR,
                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                errmsg("IDENTIFY_SYSTEM has not been run before START_REPLICATION")));
+       errmsg("IDENTIFY_SYSTEM has not been run before START_REPLICATION")));
 
    /*
     * We assume here that we're logging enough information in the WAL for
@@ -580,8 +580,8 @@ StartReplication(StartReplicationCmd *cmd)
            sendTimeLineIsHistoric = true;
 
            /*
-            * Check that the timeline the client requested exists, and
-            * the requested start location is on that timeline.
+            * Check that the timeline the client requested exists, and the
+            * requested start location is on that timeline.
             */
            timeLineHistory = readTimeLineHistory(ThisTimeLineID);
            switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
@@ -599,8 +599,8 @@ StartReplication(StartReplicationCmd *cmd)
             * request to start replication from the beginning of the WAL
             * segment that contains switchpoint, but on the new timeline, so
             * that it doesn't end up with a partial segment. If you ask for
-            * too old a starting point, you'll get an error later when we fail
-            * to find the requested WAL segment in pg_wal.
+            * too old a starting point, you'll get an error later when we
+            * fail to find the requested WAL segment in pg_wal.
             *
             * XXX: we could be more strict here and only allow a startpoint
             * that's older than the switchpoint, if it's still in the same
@@ -717,9 +717,9 @@ StartReplication(StartReplicationCmd *cmd)
        MemSet(nulls, false, sizeof(nulls));
 
        /*
-        * Need a tuple descriptor representing two columns.
-        * int8 may seem like a surprising data type for this, but in theory
-        * int4 would not be wide enough for this, as TimeLineID is unsigned.
+        * Need a tuple descriptor representing two columns. int8 may seem
+        * like a surprising data type for this, but in theory int4 would not
+        * be wide enough for this, as TimeLineID is unsigned.
         */
        tupdesc = CreateTemplateTupleDesc(2, false);
        TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli",
@@ -795,7 +795,7 @@ parseCreateReplSlotOptions(CreateReplicationSlotCmd *cmd,
    bool        reserve_wal_given = false;
 
    /* Parse options */
-   foreach (lc, cmd->options)
+   foreach(lc, cmd->options)
    {
        DefElem    *defel = (DefElem *) lfirst(lc);
 
@@ -883,7 +883,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
    if (cmd->kind == REPLICATION_KIND_LOGICAL)
    {
        LogicalDecodingContext *ctx;
-       bool    need_full_snapshot = false;
+       bool        need_full_snapshot = false;
 
        /*
         * Do options check early so that we can bail before calling the
@@ -1255,10 +1255,10 @@ WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId
    TimestampTz now = GetCurrentTimestamp();
 
    /*
-    * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS
-    * to avoid flooding the lag tracker when we commit frequently.
+    * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to
+    * avoid flooding the lag tracker when we commit frequently.
     */
-#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS   1000
+#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS   1000
    if (!TimestampDifferenceExceeds(sendTime, now,
                                    WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS))
        return;
@@ -1474,8 +1474,8 @@ exec_replication_command(const char *cmd_string)
        SnapBuildClearExportedSnapshot();
 
    /*
-    * For aborted transactions, don't allow anything except pure SQL,
-    * the exec_simple_query() will handle it correctly.
+    * For aborted transactions, don't allow anything except pure SQL, the
+    * exec_simple_query() will handle it correctly.
     */
    if (IsAbortedTransactionBlockState() && !IsA(cmd_node, SQLCmd))
        ereport(ERROR,
@@ -1744,7 +1744,7 @@ ProcessStandbyReplyMessage(void)
    bool        clearLagTimes;
    TimestampTz now;
 
-   static bool fullyAppliedLastTime = false;
+   static bool fullyAppliedLastTime = false;
 
    /* the caller already consumed the msgtype byte */
    writePtr = pq_getmsgint64(&reply_message);
@@ -1892,7 +1892,7 @@ TransactionIdInRecentPast(TransactionId xid, uint32 epoch)
    }
 
    if (!TransactionIdPrecedesOrEquals(xid, nextXid))
-       return false;               /* epoch OK, but it's wrapped around */
+       return false;           /* epoch OK, but it's wrapped around */
 
    return true;
 }
@@ -1974,8 +1974,8 @@ ProcessStandbyHSFeedbackMessage(void)
     *
     * If we're using a replication slot we reserve the xmin via that,
     * otherwise via the walsender's PGXACT entry. We can only track the
-    * catalog xmin separately when using a slot, so we store the least
-    * of the two provided when not using a slot.
+   &nbs