Run pgindent, pgperltidy, and reformat-dat-files.
I manually fixed a couple of comments that pgindent uglified.
diag("Time::HiRes::VERSION: $Time::HiRes::VERSION");
# Check that if prove is using msys perl it is for an msys target
-ok(($ENV{__CONFIG_HOST_OS__} || "") eq 'msys',
- "Msys perl used for correct target")
- if $Config{osname} eq 'msys';
+ok( ($ENV{__CONFIG_HOST_OS__} || "") eq 'msys',
+ "Msys perl used for correct target") if $Config{osname} eq 'msys';
ok(1);
done_testing();
cmid = TOAST_COMPRESS_METHOD(&toast_pointer);
switch (cmid)
{
- /* List of all valid compression method IDs */
+ /* List of all valid compression method IDs */
case TOAST_PGLZ_COMPRESSION_ID:
case TOAST_LZ4_COMPRESSION_ID:
valid = true;
break;
- /* Recognized but invalid compression method ID */
+ /* Recognized but invalid compression method ID */
case TOAST_INVALID_COMPRESSION_ID:
break;
- /* Intentionally no default here */
+ /* Intentionally no default here */
}
if (!valid)
report_corruption(ctx,
FILE *pipe;
} bbsink_shell;
-void _PG_init(void);
+void _PG_init(void);
static void *shell_check_detail(char *target, char *target_detail);
static bbsink *shell_get_sink(bbsink *next_sink, void *detail_arg);
static void bbsink_shell_begin_archive(bbsink *sink,
- const char *archive_name);
+ const char *archive_name);
static void bbsink_shell_archive_contents(bbsink *sink, size_t len);
static void bbsink_shell_end_archive(bbsink *sink);
static void bbsink_shell_begin_manifest(bbsink *sink);
{
if (shell_required_role[0] != '\0')
{
- Oid roleid;
+ Oid roleid;
StartTransactionCommand();
roleid = get_role_oid(shell_required_role, true);
shell_get_sink(bbsink *next_sink, void *detail_arg)
{
bbsink_shell *sink;
- bool has_detail_escape = false;
- char *c;
+ bool has_detail_escape = false;
+ char *c;
/*
* Set up the bbsink.
/*
* Since we're passing the string provided by the user to popen(), it will
* be interpreted by the shell, which is a potential security
- * vulnerability, since the user invoking this module is not necessarily
- * a superuser. To stay out of trouble, we must disallow any shell
+ * vulnerability, since the user invoking this module is not necessarily a
+ * superuser. To stay out of trouble, we must disallow any shell
* metacharacters here; to be conservative and keep things simple, we
* allow only alphanumerics.
*/
if (sink->target_detail != NULL)
{
- char *d;
- bool scary = false;
+ char *d;
+ bool scary = false;
for (d = sink->target_detail; *d != '\0'; ++d)
{
shell_construct_command(char *base_command, const char *filename,
char *target_detail)
{
- StringInfoData buf;
+ StringInfoData buf;
char *c;
initStringInfo(&buf);
static void
shell_finish_command(bbsink_shell *sink)
{
- int pclose_rc;
+ int pclose_rc;
/* There should be a command running. */
Assert(sink->current_command != NULL);
{
/*
* The error we're about to throw would shut down the command
- * anyway, but we may get a more meaningful error message by
- * doing this. If not, we'll fall through to the generic error
- * below.
+ * anyway, but we may get a more meaningful error message by doing
+ * this. If not, we'll fall through to the generic error below.
*/
shell_finish_command(sink);
errno = EPIPE;
# Make sure pg_hba.conf is set up to allow connections from backupuser.
# This is only needed on Windows machines that don't use UNIX sockets.
-$node->init('allows_streaming' => 1,
- 'auth_extra' => [ '--create-role', 'backupuser' ]);
+$node->init(
+ 'allows_streaming' => 1,
+ 'auth_extra' => [ '--create-role', 'backupuser' ]);
$node->append_conf('postgresql.conf',
- "shared_preload_libraries = 'basebackup_to_shell'");
+ "shared_preload_libraries = 'basebackup_to_shell'");
$node->start;
$node->safe_psql('postgres', 'CREATE USER backupuser REPLICATION');
$node->safe_psql('postgres', 'CREATE ROLE trustworthy');
# Can't use this module without setting basebackup_to_shell.command.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target', 'shell' ],
qr/shell command for backup is not configured/,
'fails if basebackup_to_shell.command is not set');
# Configure basebackup_to_shell.command and reload the configuation file.
-my $backup_path = PostgreSQL::Test::Utils::tempdir;
+my $backup_path = PostgreSQL::Test::Utils::tempdir;
my $escaped_backup_path = $backup_path;
-$escaped_backup_path =~ s{\\}{\\\\}g if ($PostgreSQL::Test::Utils::windows_os);
+$escaped_backup_path =~ s{\\}{\\\\}g
+ if ($PostgreSQL::Test::Utils::windows_os);
my $shell_command =
- $PostgreSQL::Test::Utils::windows_os
- ? qq{$gzip --fast > "$escaped_backup_path\\\\%f.gz"}
- : qq{$gzip --fast > "$escaped_backup_path/%f.gz"};
+ $PostgreSQL::Test::Utils::windows_os
+ ? qq{$gzip --fast > "$escaped_backup_path\\\\%f.gz"}
+ : qq{$gzip --fast > "$escaped_backup_path/%f.gz"};
$node->append_conf('postgresql.conf',
- "basebackup_to_shell.command='$shell_command'");
+ "basebackup_to_shell.command='$shell_command'");
$node->reload();
# Should work now.
$node->command_ok(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target', 'shell' ],
'backup with no detail: pg_basebackup');
verify_backup('', $backup_path, "backup with no detail");
# Should fail with a detail.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell:foo' ],
+ [ @pg_basebackup_cmd, '--target', 'shell:foo' ],
qr/a target detail is not permitted because the configured command does not include %d/,
'fails if detail provided without %d');
# Reconfigure to restrict access and require a detail.
$shell_command =
- $PostgreSQL::Test::Utils::windows_os
- ? qq{$gzip --fast > "$escaped_backup_path\\\\%d.%f.gz"}
- : qq{$gzip --fast > "$escaped_backup_path/%d.%f.gz"};
+ $PostgreSQL::Test::Utils::windows_os
+ ? qq{$gzip --fast > "$escaped_backup_path\\\\%d.%f.gz"}
+ : qq{$gzip --fast > "$escaped_backup_path/%d.%f.gz"};
$node->append_conf('postgresql.conf',
- "basebackup_to_shell.command='$shell_command'");
+ "basebackup_to_shell.command='$shell_command'");
$node->append_conf('postgresql.conf',
- "basebackup_to_shell.required_role='trustworthy'");
+ "basebackup_to_shell.required_role='trustworthy'");
$node->reload();
# Should fail due to lack of permission.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target', 'shell' ],
qr/permission denied to use basebackup_to_shell/,
'fails if required_role not granted');
# Should fail due to lack of a detail.
$node->safe_psql('postgres', 'GRANT trustworthy TO backupuser');
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target', 'shell' ],
qr/a target detail is required because the configured command includes %d/,
'fails if %d is present and detail not given');
# Should work.
-$node->command_ok(
- [ @pg_basebackup_cmd, '--target', 'shell:bar' ],
+$node->command_ok([ @pg_basebackup_cmd, '--target', 'shell:bar' ],
'backup with detail: pg_basebackup');
verify_backup('bar.', $backup_path, "backup with detail");
{
my ($prefix, $backup_dir, $test_name) = @_;
- ok(-f "$backup_dir/${prefix}backup_manifest.gz",
- "$test_name: backup_manifest.gz was created");
- ok(-f "$backup_dir/${prefix}base.tar.gz",
- "$test_name: base.tar.gz was created");
+ ok( -f "$backup_dir/${prefix}backup_manifest.gz",
+ "$test_name: backup_manifest.gz was created");
+ ok( -f "$backup_dir/${prefix}base.tar.gz",
+ "$test_name: base.tar.gz was created");
- SKIP: {
+ SKIP:
+ {
my $tar = $ENV{TAR};
skip "no tar program available", 1 if (!defined $tar || $tar eq '');
# Decompress.
system_or_bail($gzip, '-d',
- $backup_dir . '/' . $prefix . 'backup_manifest.gz');
+ $backup_dir . '/' . $prefix . 'backup_manifest.gz');
system_or_bail($gzip, '-d',
- $backup_dir . '/' . $prefix . 'base.tar.gz');
+ $backup_dir . '/' . $prefix . 'base.tar.gz');
# Untar.
my $extract_path = PostgreSQL::Test::Utils::tempdir;
system_or_bail($tar, 'xf', $backup_dir . '/' . $prefix . 'base.tar',
- '-C', $extract_path);
+ '-C', $extract_path);
# Verify.
- $node->command_ok([ 'pg_verifybackup', '-n',
- '-m', "${backup_dir}/${prefix}backup_manifest",
- '-e', $extract_path ],
- "$test_name: backup verifies ok");
+ $node->command_ok(
+ [
+ 'pg_verifybackup', '-n',
+ '-m', "${backup_dir}/${prefix}backup_manifest",
+ '-e', $extract_path
+ ],
+ "$test_name: backup verifies ok");
}
}
PG_MODULE_MAGIC;
-void _PG_init(void);
-void _PG_archive_module_init(ArchiveModuleCallbacks *cb);
+void _PG_init(void);
+void _PG_archive_module_init(ArchiveModuleCallbacks *cb);
static char *archive_directory = NULL;
static MemoryContext basic_archive_context;
/*
* The default value is an empty string, so we have to accept that value.
- * Our check_configured callback also checks for this and prevents archiving
- * from proceeding if it is still empty.
+ * Our check_configured callback also checks for this and prevents
+ * archiving from proceeding if it is still empty.
*/
if (*newval == NULL || *newval[0] == '\0')
return true;
}
/*
- * Do a basic sanity check that the specified archive directory exists. It
+ * Do a basic sanity check that the specified archive directory exists. It
* could be removed at some point in the future, so we still need to be
* prepared for it not to exist in the actual archiving logic.
*/
MemoryContext oldcontext;
/*
- * We run basic_archive_file_internal() in our own memory context so that we
- * can easily reset it during error recovery (thus avoiding memory leaks).
+ * We run basic_archive_file_internal() in our own memory context so that
+ * we can easily reset it during error recovery (thus avoiding memory
+ * leaks).
*/
oldcontext = MemoryContextSwitchTo(basic_archive_context);
/*
- * Since the archiver operates at the bottom of the exception stack, ERRORs
- * turn into FATALs and cause the archiver process to restart. However,
- * using ereport(ERROR, ...) when there are problems is easy to code and
- * maintain. Therefore, we create our own exception handler to catch ERRORs
- * and return false instead of restarting the archiver whenever there is a
- * failure.
+ * Since the archiver operates at the bottom of the exception stack,
+ * ERRORs turn into FATALs and cause the archiver process to restart.
+ * However, using ereport(ERROR, ...) when there are problems is easy to
+ * code and maintain. Therefore, we create our own exception handler to
+ * catch ERRORs and return false instead of restarting the archiver
+ * whenever there is a failure.
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
{
snprintf(destination, MAXPGPATH, "%s/%s", archive_directory, file);
/*
- * First, check if the file has already been archived. If it already exists
- * and has the same contents as the file we're trying to archive, we can
- * return success (after ensuring the file is persisted to disk). This
- * scenario is possible if the server crashed after archiving the file but
- * before renaming its .ready file to .done.
+ * First, check if the file has already been archived. If it already
+ * exists and has the same contents as the file we're trying to archive,
+ * we can return success (after ensuring the file is persisted to disk).
+ * This scenario is possible if the server crashed after archiving the
+ * file but before renaming its .ready file to .done.
*
- * If the archive file already exists but has different contents, something
- * might be wrong, so we just fail.
+ * If the archive file already exists but has different contents,
+ * something might be wrong, so we just fail.
*/
if (stat(destination, &st) == 0)
{
archive_directory, "archtemp", file, MyProcPid, epoch);
/*
- * Copy the file to its temporary destination. Note that this will fail if
- * temp already exists.
+ * Copy the file to its temporary destination. Note that this will fail
+ * if temp already exists.
*/
copy_file(unconstify(char *, path), temp);
for (;;)
{
- int nbytes = 0;
- int buf1_len = 0;
- int buf2_len = 0;
+ int nbytes = 0;
+ int buf1_len = 0;
+ int buf2_len = 0;
while (buf1_len < CMP_BUF_SIZE)
{
static int
gbt_boolkey_cmp(const void *a, const void *b, FmgrInfo *flinfo)
{
- boolKEY *ia = (boolKEY *) (((const Nsrt *) a)->t);
- boolKEY *ib = (boolKEY *) (((const Nsrt *) b)->t);
+ boolKEY *ia = (boolKEY *) (((const Nsrt *) a)->t);
+ boolKEY *ib = (boolKEY *) (((const Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
PyObject *key;
key = PLyUnicode_FromStringAndSize(HSTORE_KEY(entries, base, i),
- HSTORE_KEYLEN(entries, i));
+ HSTORE_KEYLEN(entries, i));
if (HSTORE_VALISNULL(entries, i))
PyDict_SetItem(dict, key, Py_None);
else
PyObject *value;
value = PLyUnicode_FromStringAndSize(HSTORE_VAL(entries, base, i),
- HSTORE_VALLEN(entries, i));
+ HSTORE_VALLEN(entries, i));
PyDict_SetItem(dict, key, value);
Py_XDECREF(value);
}
/* verify the special space has the expected size */
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BrinSpecialSpace)))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "BRIN"),
- errdetail("Expected special size %d, got %d.",
- (int) MAXALIGN(sizeof(BrinSpecialSpace)),
- (int) PageGetSpecialSize(page))));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "BRIN"),
+ errdetail("Expected special size %d, got %d.",
+ (int) MAXALIGN(sizeof(BrinSpecialSpace)),
+ (int) PageGetSpecialSize(page))));
switch (BrinPageType(page))
{
/* verify the special space has the expected size */
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BrinSpecialSpace)))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "BRIN"),
- errdetail("Expected special size %d, got %d.",
- (int) MAXALIGN(sizeof(BrinSpecialSpace)),
- (int) PageGetSpecialSize(page))));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "BRIN"),
+ errdetail("Expected special size %d, got %d.",
+ (int) MAXALIGN(sizeof(BrinSpecialSpace)),
+ (int) PageGetSpecialSize(page))));
/* verify the special space says this page is what we want */
if (BrinPageType(page) != type)
/* verify the special space has the expected size */
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GISTPageOpaqueData)))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected special size %d, got %d.",
- (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
- (int) PageGetSpecialSize(page))));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "GiST"),
+ errdetail("Expected special size %d, got %d.",
+ (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
+ (int) PageGetSpecialSize(page))));
opaq = GistPageGetOpaque(page);
if (opaq->gist_page_id != GIST_PAGE_ID)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected %08x, got %08x.",
- GIST_PAGE_ID,
- opaq->gist_page_id)));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "GiST"),
+ errdetail("Expected %08x, got %08x.",
+ GIST_PAGE_ID,
+ opaq->gist_page_id)));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
/* verify the special space has the expected size */
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GISTPageOpaqueData)))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected special size %d, got %d.",
- (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
- (int) PageGetSpecialSize(page))));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "GiST"),
+ errdetail("Expected special size %d, got %d.",
+ (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
+ (int) PageGetSpecialSize(page))));
opaq = GistPageGetOpaque(page);
if (opaq->gist_page_id != GIST_PAGE_ID)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page is not a valid %s page", "GiST"),
- errdetail("Expected %08x, got %08x.",
- GIST_PAGE_ID,
- opaq->gist_page_id)));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page is not a valid %s page", "GiST"),
+ errdetail("Expected %08x, got %08x.",
+ GIST_PAGE_ID,
+ opaq->gist_page_id)));
/* Avoid bogus PageGetMaxOffsetNumber() call with deleted pages */
if (GistPageIsDeleted(page))
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
- /* Superusers or roles with the privileges of pg_read_all_stats members are allowed */
+ /*
+ * Superusers or roles with the privileges of pg_read_all_stats members
+ * are allowed
+ */
is_allowed_role = has_privs_of_role(userid, ROLE_PG_READ_ALL_STATS);
/* hash table must exist already */
XLogRecPtr start_lsn, XLogRecPtr end_lsn);
static void GetWALRecordsInfo(FunctionCallInfo fcinfo, XLogRecPtr start_lsn,
XLogRecPtr end_lsn);
-static void GetXLogSummaryStats(XLogStats * stats, ReturnSetInfo *rsinfo,
+static void GetXLogSummaryStats(XLogStats *stats, ReturnSetInfo *rsinfo,
Datum *values, bool *nulls, uint32 ncols,
bool stats_per_record);
static void FillXLogStatsRow(const char *name, uint64 n, uint64 total_count,
LSN_FORMAT_ARGS(lsn))));
private_data = (ReadLocalXLogPageNoWaitPrivate *)
- palloc0(sizeof(ReadLocalXLogPageNoWaitPrivate));
+ palloc0(sizeof(ReadLocalXLogPageNoWaitPrivate));
xlogreader = XLogReaderAllocate(wal_segment_size, NULL,
XL_ROUTINE(.page_read = &read_local_xlog_page_no_wait,
ReadNextXLogRecord(XLogReaderState *xlogreader, XLogRecPtr first_record)
{
XLogRecord *record;
- char *errormsg;
+ char *errormsg;
record = XLogReadRecord(xlogreader, &errormsg);
/* return NULL, if end of WAL is reached */
private_data = (ReadLocalXLogPageNoWaitPrivate *)
- xlogreader->private_data;
+ xlogreader->private_data;
if (private_data->end_of_wal)
return NULL;
Datum *values, bool *nulls, uint32 ncols)
{
const char *id;
- RmgrData desc;
- uint32 fpi_len = 0;
+ RmgrData desc;
+ uint32 fpi_len = 0;
StringInfoData rec_desc;
StringInfoData rec_blk_ref;
- uint32 main_data_len;
- int i = 0;
+ uint32 main_data_len;
+ int i = 0;
desc = GetRmgr(XLogRecGetRmid(record));
id = desc.rm_identify(XLogRecGetInfo(record));
pg_get_wal_record_info(PG_FUNCTION_ARGS)
{
#define PG_GET_WAL_RECORD_INFO_COLS 11
- Datum result;
- Datum values[PG_GET_WAL_RECORD_INFO_COLS];
- bool nulls[PG_GET_WAL_RECORD_INFO_COLS];
+ Datum result;
+ Datum values[PG_GET_WAL_RECORD_INFO_COLS];
+ bool nulls[PG_GET_WAL_RECORD_INFO_COLS];
XLogRecPtr lsn;
XLogRecPtr curr_lsn;
XLogRecPtr first_record;
XLogRecPtr first_record;
XLogReaderState *xlogreader;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- Datum values[PG_GET_WAL_RECORDS_INFO_COLS];
- bool nulls[PG_GET_WAL_RECORDS_INFO_COLS];
+ Datum values[PG_GET_WAL_RECORDS_INFO_COLS];
+ bool nulls[PG_GET_WAL_RECORDS_INFO_COLS];
SetSingleFuncCall(fcinfo, 0);
uint64 tot_len, uint64 total_len,
Datum *values, bool *nulls, uint32 ncols)
{
- double n_pct,
- rec_len_pct,
- fpi_len_pct,
- tot_len_pct;
- int i = 0;
+ double n_pct,
+ rec_len_pct,
+ fpi_len_pct,
+ tot_len_pct;
+ int i = 0;
n_pct = 0;
if (total_count != 0)
Datum *values, bool *nulls, uint32 ncols,
bool stats_per_record)
{
- uint64 total_count = 0;
- uint64 total_rec_len = 0;
- uint64 total_fpi_len = 0;
- uint64 total_len = 0;
- int ri;
+ uint64 total_count = 0;
+ uint64 total_rec_len = 0;
+ uint64 total_fpi_len = 0;
+ uint64 total_len = 0;
+ int ri;
/*
* Each row shows its percentages of the total, so make a first pass to
uint64 rec_len;
uint64 fpi_len;
uint64 tot_len;
- RmgrData desc;
+ RmgrData desc;
if (!RmgrIdIsValid(ri))
continue;
if (stats_per_record)
{
- int rj;
+ int rj;
for (rj = 0; rj < MAX_XLINFO_TYPES; rj++)
{
#define PG_GET_WAL_STATS_COLS 9
XLogRecPtr first_record;
XLogReaderState *xlogreader;
- XLogStats stats;
+ XLogStats stats;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- Datum values[PG_GET_WAL_STATS_COLS];
- bool nulls[PG_GET_WAL_STATS_COLS];
+ Datum values[PG_GET_WAL_STATS_COLS];
+ bool nulls[PG_GET_WAL_STATS_COLS];
SetSingleFuncCall(fcinfo, 0);
{
XLogRecPtr start_lsn;
XLogRecPtr end_lsn;
- bool stats_per_record;
+ bool stats_per_record;
start_lsn = PG_GETARG_LSN(0);
end_lsn = PG_GETARG_LSN(1);
{
XLogRecPtr start_lsn;
XLogRecPtr end_lsn = InvalidXLogRecPtr;
- bool stats_per_record;
+ bool stats_per_record;
start_lsn = PG_GETARG_LSN(0);
stats_per_record = PG_GETARG_BOOL(1);
uint8 *res, unsigned *rlen)
{
OSSLCipher *od = c->ptr;
- int outlen, outlen2;
+ int outlen,
+ outlen2;
if (!od->init)
{
uint8 *res, unsigned *rlen)
{
OSSLCipher *od = c->ptr;
- int outlen, outlen2;
+ int outlen,
+ outlen2;
if (!od->init)
{
if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind) ||
rel->rd_rel->relkind == RELKIND_SEQUENCE)
{
- return pgstat_heap(rel, fcinfo);
+ return pgstat_heap(rel, fcinfo);
}
else if (rel->rd_rel->relkind == RELKIND_INDEX)
{
- switch (rel->rd_rel->relam)
- {
- case BTREE_AM_OID:
- return pgstat_index(rel, BTREE_METAPAGE + 1,
- pgstat_btree_page, fcinfo);
- case HASH_AM_OID:
- return pgstat_index(rel, HASH_METAPAGE + 1,
- pgstat_hash_page, fcinfo);
- case GIST_AM_OID:
- return pgstat_index(rel, GIST_ROOT_BLKNO + 1,
- pgstat_gist_page, fcinfo);
- case GIN_AM_OID:
- err = "gin index";
- break;
- case SPGIST_AM_OID:
- err = "spgist index";
- break;
- case BRIN_AM_OID:
- err = "brin index";
- break;
- default:
- err = "unknown index";
- break;
- }
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("index \"%s\" (%s) is not supported",
- RelationGetRelationName(rel), err)));
+ switch (rel->rd_rel->relam)
+ {
+ case BTREE_AM_OID:
+ return pgstat_index(rel, BTREE_METAPAGE + 1,
+ pgstat_btree_page, fcinfo);
+ case HASH_AM_OID:
+ return pgstat_index(rel, HASH_METAPAGE + 1,
+ pgstat_hash_page, fcinfo);
+ case GIST_AM_OID:
+ return pgstat_index(rel, GIST_ROOT_BLKNO + 1,
+ pgstat_gist_page, fcinfo);
+ case GIN_AM_OID:
+ err = "gin index";
+ break;
+ case SPGIST_AM_OID:
+ err = "spgist index";
+ break;
+ case BRIN_AM_OID:
+ err = "brin index";
+ break;
+ default:
+ err = "unknown index";
+ break;
+ }
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("index \"%s\" (%s) is not supported",
+ RelationGetRelationName(rel), err)));
}
else
{
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot get tuple-level statistics for relation \"%s\"",
- RelationGetRelationName(rel)),
- errdetail_relkind_not_supported(rel->rd_rel->relkind)));
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot get tuple-level statistics for relation \"%s\"",
+ RelationGetRelationName(rel)),
+ errdetail_relkind_not_supported(rel->rd_rel->relkind)));
}
return 0; /* should not happen */
PGresult *res;
/*
- * If requested, consume whatever data is available from the socket.
- * (Note that if all data is available, this allows pgfdw_get_result to
- * call PQgetResult without forcing the overhead of WaitLatchOrSocket,
- * which would be large compared to the overhead of PQconsumeInput.)
+ * If requested, consume whatever data is available from the socket. (Note
+ * that if all data is available, this allows pgfdw_get_result to call
+ * PQgetResult without forcing the overhead of WaitLatchOrSocket, which
+ * would be large compared to the overhead of PQconsumeInput.)
*/
if (consume_input && !PQconsumeInput(conn))
pgfdw_report_error(ERROR, NULL, conn, false, sql);
entry = (ConnCacheEntry *) lfirst(lc);
Assert(entry->changing_xact_state);
+
/*
* We might already have received the result on the socket, so pass
* consume_input=true to try to consume it first
entry = (ConnCacheEntry *) lfirst(lc);
Assert(entry->changing_xact_state);
+
/*
* We might already have received the result on the socket, so pass
* consume_input=true to try to consume it first
if (best_path->fdw_private)
{
has_final_sort = boolVal(list_nth(best_path->fdw_private,
- FdwPathPrivateHasFinalSort));
+ FdwPathPrivateHasFinalSort));
has_limit = boolVal(list_nth(best_path->fdw_private,
- FdwPathPrivateHasLimit));
+ FdwPathPrivateHasLimit));
}
if (IS_SIMPLE_REL(foreignrel))
values_end_len = intVal(list_nth(fdw_private,
FdwModifyPrivateLen));
has_returning = boolVal(list_nth(fdw_private,
- FdwModifyPrivateHasReturning));
+ FdwModifyPrivateHasReturning));
retrieved_attrs = (List *) list_nth(fdw_private,
FdwModifyPrivateRetrievedAttrs);
dmstate->query = strVal(list_nth(fsplan->fdw_private,
FdwDirectModifyPrivateUpdateSql));
dmstate->has_returning = boolVal(list_nth(fsplan->fdw_private,
- FdwDirectModifyPrivateHasReturning));
+ FdwDirectModifyPrivateHasReturning));
dmstate->retrieved_attrs = (List *) list_nth(fsplan->fdw_private,
FdwDirectModifyPrivateRetrievedAttrs);
dmstate->set_processed = boolVal(list_nth(fsplan->fdw_private,
- FdwDirectModifyPrivateSetProcessed));
+ FdwDirectModifyPrivateSetProcessed));
/* Create context for per-tuple temp workspace. */
dmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
txn->output_plugin_private = txndata;
/*
- * If asked to skip empty transactions, we'll emit BEGIN at the point where
- * the first operation is received for this transaction.
+ * If asked to skip empty transactions, we'll emit BEGIN at the point
+ * where the first operation is received for this transaction.
*/
if (data->skip_empty_xacts)
return;
txn->output_plugin_private = txndata;
/*
- * If asked to skip empty transactions, we'll emit BEGIN at the point where
- * the first operation is received for this transaction.
+ * If asked to skip empty transactions, we'll emit BEGIN at the point
+ * where the first operation is received for this transaction.
*/
if (data->skip_empty_xacts)
return;
/*
* Catalog snapshots can be returned by GetOldestSnapshot() even if not
* registered or active. That easily hides bugs around not having a
- * snapshot set up - most of the time there is a valid catalog
- * snapshot. So additionally insist that the current snapshot is
- * registered or active.
+ * snapshot set up - most of the time there is a valid catalog snapshot.
+ * So additionally insist that the current snapshot is registered or
+ * active.
*/
Assert(HaveRegisteredOrActiveSnapshot());
/*
* Tuple visibility is only computed once for each tuple, for correctness
- * and efficiency reasons; see comment in heap_page_prune() for
- * details. This is of type int8[,] instead of HTSV_Result[], so we can use
- * -1 to indicate no visibility has been computed, e.g. for LP_DEAD items.
+ * and efficiency reasons; see comment in heap_page_prune() for details.
+ * This is of type int8[], instead of HTSV_Result[], so we can use -1 to
+ * indicate no visibility has been computed, e.g. for LP_DEAD items.
*
* Same indexing as ->marked.
*/
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
- int ndeleted,
- nnewlpdead;
+ int ndeleted,
+ nnewlpdead;
ndeleted = heap_page_prune(relation, buffer, vistest, limited_xmin,
limited_ts, &nnewlpdead, NULL);
GlobalVisState *vistest,
TransactionId old_snap_xmin,
TimestampTz old_snap_ts,
- int *nnewlpdead,
+ int *nnewlpdead,
OffsetNumber *off_loc)
{
int ndeleted = 0;
PGRUsage ru0;
TimestampTz starttime = 0;
PgStat_Counter startreadtime = 0,
- startwritetime = 0;
+ startwritetime = 0;
WalUsage startwalusage = pgWalUsage;
int64 StartPageHit = VacuumPageHit,
StartPageMiss = VacuumPageMiss,
* dead_items space is not CPU cache resident.
*
* We don't take any special steps to remember the LP_DEAD items (such
- * as counting them in our final update to the stats system) when
- * the optimization is applied. Though the accounting used in
- * analyze.c's acquire_sample_rows() will recognize the same LP_DEAD
- * items as dead rows in its own stats report, that's okay.
- * The discrepancy should be negligible. If this optimization is ever
- * expanded to cover more cases then this may need to be reconsidered.
+ * as counting them in our final update to the stats system) when the
+ * optimization is applied. Though the accounting used in analyze.c's
+ * acquire_sample_rows() will recognize the same LP_DEAD items as dead
+ * rows in its own stats report, that's okay. The discrepancy should
+ * be negligible. If this optimization is ever expanded to cover more
+ * cases then this may need to be reconsidered.
*/
threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
bypass = (vacrel->lpdead_item_pages < threshold &&
parsed.tsId, xlrec->initfileinval);
/*
- * Check if the replication origin has been set in this record in the
- * same way as PrepareRedoAdd().
+ * Check if the replication origin has been set in this record in the same
+ * way as PrepareRedoAdd().
*/
if (origin_id != InvalidRepOriginId)
appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
bool detailed_format, StringInfo buf,
uint32 *fpi_len)
{
- int block_id;
+ int block_id;
Assert(record != NULL);
#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \
{ name, redo, desc, identify, startup, cleanup, mask, decode },
-RmgrData RmgrTable[RM_MAX_ID + 1] = {
+RmgrData RmgrTable[RM_MAX_ID + 1] = {
#include "access/rmgrlist.h"
};
if (!pg_strcasecmp(RmgrTable[existing_rmid].rm_name, rmgr->rm_name))
ereport(ERROR,
- (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
- errdetail("Existing resource manager with ID %d has the same name.", existing_rmid)));
+ (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
+ errdetail("Existing resource manager with ID %d has the same name.", existing_rmid)));
}
/* register it */
if (hdr.nabortstats > 0)
{
save_state_data(abortstats,
- hdr.nabortstats * sizeof(xl_xact_stats_item));
+ hdr.nabortstats * sizeof(xl_xact_stats_item));
pfree(abortstats);
}
if (hdr.ninvalmsgs > 0)
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode));
abortrels = (RelFileNode *) bufptr;
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode));
- commitstats = (xl_xact_stats_item*) bufptr;
+ commitstats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item));
- abortstats = (xl_xact_stats_item*) bufptr;
+ abortstats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(hdr->nabortstats * sizeof(xl_xact_stats_item));
invalmsgs = (SharedInvalidationMessage *) bufptr;
bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
bool fullPageWrites;
/*
- * runningBackups is a counter indicating the number of backups currently in
- * progress. forcePageWrites is set to true when runningBackups is non-zero.
- * lastBackupStart is the latest checkpoint redo location used as a starting
- * point for an online backup.
+ * runningBackups is a counter indicating the number of backups currently
+ * in progress. forcePageWrites is set to true when runningBackups is
+ * non-zero. lastBackupStart is the latest checkpoint redo location used
+ * as a starting point for an online backup.
*/
int runningBackups;
XLogRecPtr lastBackupStart;
* When recovering from a backup (we are in recovery, and archive recovery
* was requested), complain if we did not roll forward far enough to reach
* the point where the database is consistent. For regular online
- * backup-from-primary, that means reaching the end-of-backup WAL record (at
- * which point we reset backupStartPoint to be Invalid), for
+ * backup-from-primary, that means reaching the end-of-backup WAL record
+ * (at which point we reset backupStartPoint to be Invalid), for
* backup-from-replica (which can't inject records into the WAL stream),
* that point is when we reach the minRecoveryPoint in pg_control (which
- * we purposfully copy last when backing up from a replica). For pg_rewind
- * (which creates a backup_label with a method of "pg_rewind") or
- * snapshot-style backups (which don't), backupEndRequired will be set to
- * false.
+ * we purposefully copy last when backing up from a replica). For
+ * pg_rewind (which creates a backup_label with a method of "pg_rewind")
+ * or snapshot-style backups (which don't), backupEndRequired will be set
+ * to false.
*
* Note: it is indeed okay to look at the local variable
* LocalMinRecoveryPoint here, even though ControlFile->minRecoveryPoint
/*
* Ran off end of WAL before reaching end-of-backup WAL record, or
* minRecoveryPoint. That's a bad sign, indicating that you tried to
- * recover from an online backup but never called pg_backup_stop(),
- * or you didn't archive all the WAL needed.
+ * recover from an online backup but never called pg_backup_stop(), or
+ * you didn't archive all the WAL needed.
*/
if (ArchiveRecoveryRequested || ControlFile->backupEndRequired)
{
WALInsertLockAcquireExclusive();
/*
- * It is expected that each do_pg_backup_start() call is matched by exactly
- * one do_pg_backup_stop() call.
+ * It is expected that each do_pg_backup_start() call is matched by
+ * exactly one do_pg_backup_stop() call.
*/
Assert(XLogCtl->Insert.runningBackups > 0);
XLogCtl->Insert.runningBackups--;
}
/*
- * Timeline history files are given the highest archival priority to
- * lower the chance that a promoted standby will choose a timeline that
- * is already in use. However, the archiver ordinarily tries to gather
+ * Timeline history files are given the highest archival priority to lower
+ * the chance that a promoted standby will choose a timeline that is
+ * already in use. However, the archiver ordinarily tries to gather
* multiple files to archive from each scan of the archive_status
- * directory, which means that newly created timeline history files
- * could be left unarchived for a while. To ensure that the archiver
- * picks up timeline history files as soon as possible, we force the
- * archiver to scan the archive_status directory the next time it looks
- * for a file to archive.
+ * directory, which means that newly created timeline history files could
+ * be left unarchived for a while. To ensure that the archiver picks up
+ * timeline history files as soon as possible, we force the archiver to
+ * scan the archive_status directory the next time it looks for a file to
+ * archive.
*/
if (IsTLHistoryFileName(xlog))
PgArchForceDirScan();
errmsg("a backup is already in progress in this session")));
/*
- * Label file and tablespace map file need to be long-lived, since
- * they are read in pg_backup_stop.
+ * Label file and tablespace map file need to be long-lived, since they
+ * are read in pg_backup_stop.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
label_file = makeStringInfo();
errhint("Did you call pg_backup_start()?")));
/*
- * Stop the backup. Return a copy of the backup label and tablespace map so
- * they can be written to disk by the caller.
+ * Stop the backup. Return a copy of the backup label and tablespace map
+ * so they can be written to disk by the caller.
*/
stoppoint = do_pg_backup_stop(label_file->data, waitforarchive, NULL);
* method was used) or if this label came from somewhere else (the only
* other option today being from pg_rewind). If this was a streamed
* backup then we know that we need to play through until we get to the
- * end of the WAL which was generated during the backup (at which point
- * we will have reached consistency and backupEndRequired will be reset
- * to be false).
+ * end of the WAL which was generated during the backup (at which point we
+ * will have reached consistency and backupEndRequired will be reset to be
+ * false).
*/
if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
{
/*
* Have we passed our safe starting point? Note that minRecoveryPoint is
- * known to be incorrectly set if recovering from a backup, until
- * the XLOG_BACKUP_END arrives to advise us of the correct
- * minRecoveryPoint. All we know prior to that is that we're not
- * consistent yet.
+ * known to be incorrectly set if recovering from a backup, until the
+ * XLOG_BACKUP_END arrives to advise us of the correct minRecoveryPoint.
+ * All we know prior to that is that we're not consistent yet.
*/
if (!reachedConsistency && !backupEndRequired &&
minRecoveryPoint <= lastReplayedEndRecPtr)
HandleStartupProcInterrupts();
}
- return XLREAD_FAIL; /* not reached */
+ return XLREAD_FAIL; /* not reached */
}
XLogRecGetLen(XLogReaderState *record, uint32 *rec_len,
uint32 *fpi_len)
{
- int block_id;
+ int block_id;
/*
* Calculate the amount of FPI data in the record.
void
XLogRecStoreStats(XLogStats *stats, XLogReaderState *record)
{
- RmgrId rmid;
- uint8 recid;
- uint32 rec_len;
- uint32 fpi_len;
+ RmgrId rmid;
+ uint8 recid;
+ uint32 rec_len;
+ uint32 fpi_len;
Assert(stats != NULL && record != NULL);
static HTAB *invalid_page_tab = NULL;
-static int
-read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
- int reqLen, XLogRecPtr targetRecPtr,
- char *cur_page, bool wait_for_wal);
+static int read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
+ int reqLen, XLogRecPtr targetRecPtr,
+ char *cur_page, bool wait_for_wal);
/* Report a reference to an invalid page */
static void
* archive in the timeline will get renamed to .partial by
* StartupXLOG().
*
- * If that happens after our caller determined the TLI but before
- * we actually read the xlog page, we might still try to read from the
+ * If that happens after our caller determined the TLI but before we
+ * actually read the xlog page, we might still try to read from the
* old (now renamed) segment and fail. There's not much we can do
* about this, but it can only happen when we're a leaf of a cascading
* standby whose primary gets promoted while we're decoding, so a
* end of WAL has been reached.
*/
private_data = (ReadLocalXLogPageNoWaitPrivate *)
- state->private_data;
+ state->private_data;
private_data->end_of_wal = true;
break;
}
my $is_varlen = 0;
my $is_client_code = 0;
- $catalog{columns} = [];
- $catalog{toasting} = [];
- $catalog{indexing} = [];
- $catalog{other_oids} = [];
+ $catalog{columns} = [];
+ $catalog{toasting} = [];
+ $catalog{indexing} = [];
+ $catalog{other_oids} = [];
$catalog{foreign_keys} = [];
- $catalog{client_code} = [];
+ $catalog{client_code} = [];
open(my $ifh, '<', $input_file) || die "$input_file: $!";
push @{ $catalog{toasting} },
{ parent_table => $1, toast_oid => $2, toast_index_oid => $3 };
}
- elsif (/^DECLARE_TOAST_WITH_MACRO\(\s*(\w+),\s*(\d+),\s*(\d+),\s*(\w+),\s*(\w+)\)/)
+ elsif (
+ /^DECLARE_TOAST_WITH_MACRO\(\s*(\w+),\s*(\d+),\s*(\d+),\s*(\w+),\s*(\w+)\)/
+ )
{
push @{ $catalog{toasting} },
{
};
}
elsif (
- /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(\w+),\s*(.+)\)/)
+ /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(\w+),\s*(.+)\)/
+ )
{
push @{ $catalog{indexing} },
{
is_unique => $1 ? 1 : 0,
is_pkey => $2 ? 1 : 0,
- index_name => $3,
- index_oid => $4,
+ index_name => $3,
+ index_oid => $4,
index_oid_macro => $5,
- index_decl => $6
+ index_decl => $6
};
}
elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/)
Catalog::RenameTempFile($fk_info_file, $tmpext);
Catalog::RenameTempFile($constraints_file, $tmpext);
-exit ($num_errors != 0 ? 1 : 0);
+exit($num_errors != 0 ? 1 : 0);
#################### Subroutines ########################
# Copy the type data from pg_type, and add some type-dependent items
my $type = $types{$atttype};
- $row->{atttypid} = $type->{oid};
- $row->{attlen} = $type->{typlen};
- $row->{attbyval} = $type->{typbyval};
- $row->{attalign} = $type->{typalign};
- $row->{attstorage} = $type->{typstorage};
+ $row->{atttypid} = $type->{oid};
+ $row->{attlen} = $type->{typlen};
+ $row->{attbyval} = $type->{typbyval};
+ $row->{attalign} = $type->{typalign};
+ $row->{attstorage} = $type->{typstorage};
# set attndims if it's an array type
$row->{attndims} = $type->{typcategory} eq 'A' ? '1' : '0';
if (!OidIsValid(binary_upgrade_next_toast_pg_class_relfilenode))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("toast relfilenode value not set when in binary upgrade mode")));
+ errmsg("toast relfilenode value not set when in binary upgrade mode")));
relfilenode = binary_upgrade_next_toast_pg_class_relfilenode;
binary_upgrade_next_toast_pg_class_relfilenode = InvalidOid;
* remove the disk file again.)
*
* NB: Note that passing create_storage = true is correct even for binary
- * upgrade. The storage we create here will be replaced later, but we need
- * to have something on disk in the meanwhile.
+ * upgrade. The storage we create here will be replaced later, but we
+ * need to have something on disk in the meanwhile.
*/
new_rel_desc = heap_create(relname,
relnamespace,
/*
* If this constraint has a parent constraint which we have not seen
* yet, keep track of it for the second loop, below. Tracking parent
- * constraints allows us to climb up to the top-level constraint
- * and look for all possible relations referencing the partitioned
- * table.
+ * constraints allows us to climb up to the top-level constraint and
+ * look for all possible relations referencing the partitioned table.
*/
if (OidIsValid(con->conparentid) &&
!list_member_oid(parent_cons, con->conparentid))
binary_upgrade_next_index_pg_class_relfilenode = InvalidOid;
/*
- * Note that we want create_storage = true for binary upgrade.
- * The storage we create here will be replaced later, but we need
- * to have something on disk in the meanwhile.
+ * Note that we want create_storage = true for binary upgrade. The
+ * storage we create here will be replaced later, but we need to
+ * have something on disk in the meanwhile.
*/
Assert(create_storage);
}
*/
void
RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId,
- bool is_internal)
+ bool is_internal)
{
ObjectAccessPostCreate pc_arg;
pc_arg.is_internal = is_internal;
(*object_access_hook_str) (OAT_POST_CREATE,
- classId, objectName, subId,
- (void *) &pc_arg);
+ classId, objectName, subId,
+ (void *) &pc_arg);
}
/*
*/
void
RunObjectDropHookStr(Oid classId, const char *objectName, int subId,
- int dropflags)
+ int dropflags)
{
ObjectAccessDrop drop_arg;
drop_arg.dropflags = dropflags;
(*object_access_hook_str) (OAT_DROP,
- classId, objectName, subId,
- (void *) &drop_arg);
+ classId, objectName, subId,
+ (void *) &drop_arg);
}
/*
Assert(object_access_hook_str != NULL);
(*object_access_hook_str) (OAT_TRUNCATE,
- RelationRelationId, objectName, 0,
- NULL);
+ RelationRelationId, objectName, 0,
+ NULL);
}
/*
*/
void
RunObjectPostAlterHookStr(Oid classId, const char *objectName, int subId,
- Oid auxiliaryId, bool is_internal)
+ Oid auxiliaryId, bool is_internal)
{
ObjectAccessPostAlter pa_arg;
pa_arg.is_internal = is_internal;
(*object_access_hook_str) (OAT_POST_ALTER,
- classId, objectName, subId,
- (void *) &pa_arg);
+ classId, objectName, subId,
+ (void *) &pa_arg);
}
/*
ns_arg.result = true;
(*object_access_hook_str) (OAT_NAMESPACE_SEARCH,
- NamespaceRelationId, objectName, 0,
- (void *) &ns_arg);
+ NamespaceRelationId, objectName, 0,
+ (void *) &ns_arg);
return ns_arg.result;
}
Assert(object_access_hook_str != NULL);
(*object_access_hook_str) (OAT_FUNCTION_EXECUTE,
- ProcedureRelationId, objectName, 0,
- NULL);
+ ProcedureRelationId, objectName, 0,
+ NULL);
}
for (i = 0; i < numFkDeleteSetCols; i++)
fkdatums[i] = Int16GetDatum(fkDeleteSetCols[i]);
confdelsetcolsArray = construct_array(fkdatums, numFkDeleteSetCols,
- INT2OID, 2, true, TYPALIGN_SHORT);
+ INT2OID, 2, true, TYPALIGN_SHORT);
}
else
confdelsetcolsArray = NULL;
}
else
{
- int num_delete_cols;
+ int num_delete_cols;
arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
if (ARR_NDIM(arr) != 1 ||
num_delete_cols = ARR_DIMS(arr)[0];
memcpy(fk_del_set_cols, ARR_DATA_PTR(arr), num_delete_cols * sizeof(int16));
if ((Pointer) arr != DatumGetPointer(adatum))
- pfree(arr); /* free de-toasted copy, if any */
+ pfree(arr); /* free de-toasted copy, if any */
*num_fk_del_set_cols = num_delete_cols;
}
check_publication_add_relation(targetrel);
/*
- * Translate column names to attnums and make sure the column list contains
- * only allowed elements (no system or generated columns etc.). Also build
- * an array of attnums, for storing in the catalog.
+ * Translate column names to attnums and make sure the column list
+ * contains only allowed elements (no system or generated columns etc.).
+ * Also build an array of attnums, for storing in the catalog.
*/
publication_translate_columns(pri->relation, pri->columns,
&natts, &attarray);
ArrayType *arr;
int nelems;
int16 *elems;
- MemoryContext oldcxt = NULL;
+ MemoryContext oldcxt = NULL;
/*
- * If an existing bitmap was provided, use it. Otherwise just use NULL
- * and build a new bitmap.
+ * If an existing bitmap was provided, use it. Otherwise just use NULL and
+ * build a new bitmap.
*/
if (columns)
result = columns;
* is in progress.
*
* The truncation operation might drop buffers that the checkpoint
- * otherwise would have flushed. If it does, then it's essential that
- * the files actually get truncated on disk before the checkpoint record
- * is written. Otherwise, if reply begins from that checkpoint, the
+ * otherwise would have flushed. If it does, then it's essential that the
+ * files actually get truncated on disk before the checkpoint record is
+ * written. Otherwise, if reply begins from that checkpoint, the
* to-be-truncated blocks might still exist on disk but have older
- * contents than expected, which can cause replay to fail. It's OK for
- * the blocks to not exist on disk at all, but not for them to have the
- * wrong contents.
+ * contents than expected, which can cause replay to fail. It's OK for the
+ * blocks to not exist on disk at all, but not for them to have the wrong
+ * contents.
*/
Assert((MyProc->delayChkptFlags & DELAY_CHKPT_COMPLETE) == 0);
MyProc->delayChkptFlags |= DELAY_CHKPT_COMPLETE;
*/
if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- List *idxs = RelationGetIndexList(onerel);
+ List *idxs = RelationGetIndexList(onerel);
Irel = NULL;
nindexes = 0;
}
/*
- * Now report ANALYZE to the cumulative stats system. For regular tables, we do
- * it only if not doing inherited stats. For partitioned tables, we only
- * do it for inherited stats. (We're never called for not-inherited stats
- * on partitioned tables anyway.)
+ * Now report ANALYZE to the cumulative stats system. For regular tables,
+ * we do it only if not doing inherited stats. For partitioned tables, we
+ * only do it for inherited stats. (We're never called for not-inherited
+ * stats on partitioned tables anyway.)
*
* Reset the changes_since_analyze counter only if we analyzed all
* columns; otherwise, there is still work for auto-analyze to do.
/*
* Nondeterministic collations are currently only supported with ICU
- * because that's the only case where it can actually make a difference.
- * So we can save writing the code for the other providers.
+ * because that's the only case where it can actually make a
+ * difference. So we can save writing the code for the other
+ * providers.
*/
if (!collisdeterministic && collprovider != COLLPROVIDER_ICU)
ereport(ERROR,
break;
default:
{
- char *sval = defGetString(def);
+ char *sval = defGetString(def);
/*
* The set of strings accepted here should match up with the
break;
}
ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("%s requires a Boolean value or \"match\"",
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("%s requires a Boolean value or \"match\"",
def->defname)));
return COPY_HEADER_FALSE; /* keep compiler quiet */
}
errmsg("column name mismatch in header line field %d: got null value (\"%s\"), expected \"%s\"",
fldnum, cstate->opts.null_print, NameStr(attr->attname))));
- if (namestrcmp(&attr->attname, colName) != 0) {
+ if (namestrcmp(&attr->attname, colName) != 0)
+ {
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("column name mismatch in header line field %d: got \"%s\", expected \"%s\"",
* locks on the source table(s).
*/
rewritten = pg_analyze_and_rewrite_fixedparams(raw_query,
- pstate->p_sourcetext, NULL, 0,
- NULL);
+ pstate->p_sourcetext, NULL, 0,
+ NULL);
/* check that we got back something we can work with */
if (rewritten == NIL)
if (cstate->opts.csv_mode)
CopyAttributeOutCSV(cstate, colname, false,
- list_length(cstate->attnumlist) == 1);
+ list_length(cstate->attnumlist) == 1);
else
CopyAttributeOutText(cstate, colname);
}
*
* We typically do not read relation data into shared_buffers without
* holding a relation lock. It's unclear what could go wrong if we
- * skipped it in this case, because nobody can be modifying either
- * the source or destination database at this point, and we have locks
- * on both databases, too, but let's take the conservative route.
+ * skipped it in this case, because nobody can be modifying either the
+ * source or destination database at this point, and we have locks on
+ * both databases, too, but let's take the conservative route.
*/
dstrelid.relId = srcrelid.relId = relinfo->reloid;
LockRelationId(&srcrelid, AccessShareLock);
/*
* We can't use a real relcache entry for a relation in some other
- * database, but since we're only going to access the fields related
- * to physical storage, a fake one is good enough. If we didn't do this
- * and used the smgr layer directly, we would have to worry about
+ * database, but since we're only going to access the fields related to
+ * physical storage, a fake one is good enough. If we didn't do this and
+ * used the smgr layer directly, we would have to worry about
* invalidations.
*/
rel = CreateFakeRelcacheEntry(rnode);
char *srcpath, List *rnodelist,
Snapshot snapshot)
{
- BlockNumber blkno = BufferGetBlockNumber(buf);
- OffsetNumber offnum;
- OffsetNumber maxoff;
- HeapTupleData tuple;
+ BlockNumber blkno = BufferGetBlockNumber(buf);
+ OffsetNumber offnum;
+ OffsetNumber maxoff;
+ HeapTupleData tuple;
maxoff = PageGetMaxOffsetNumber(page);
CreateDBRelInfo *relinfo;
/*
- * ScanSourceDatabasePgClassTuple is in charge of constructing
- * a CreateDBRelInfo object for this tuple, but can also decide
- * that this tuple isn't something we need to copy. If we do need
- * to copy the relation, add it to the list.
+ * ScanSourceDatabasePgClassTuple is in charge of constructing a
+ * CreateDBRelInfo object for this tuple, but can also decide that
+ * this tuple isn't something we need to copy. If we do need to
+ * copy the relation, add it to the list.
*/
relinfo = ScanSourceDatabasePgClassTuple(&tuple, tbid, dbid,
srcpath);
ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid,
char *srcpath)
{
- CreateDBRelInfo *relinfo;
- Form_pg_class classForm;
- Oid relfilenode = InvalidOid;
+ CreateDBRelInfo *relinfo;
+ Form_pg_class classForm;
+ Oid relfilenode = InvalidOid;
classForm = (Form_pg_class) GETSTRUCT(tuple);
*
* Shared objects don't need to be copied, because they are shared.
* Objects without storage can't be copied, because there's nothing to
- * copy. Temporary relations don't need to be copied either, because
- * they are inaccessible outside of the session that created them,
- * which must be gone already, and couldn't connect to a different database
- * if it still existed. autovacuum will eventually remove the pg_class
- * entries as well.
+ * copy. Temporary relations don't need to be copied either, because they
+ * are inaccessible outside of the session that created them, which must
+ * be gone already, and couldn't connect to a different database if it
+ * still existed. autovacuum will eventually remove the pg_class entries
+ * as well.
*/
if (classForm->reltablespace == GLOBALTABLESPACE_OID ||
!RELKIND_HAS_STORAGE(classForm->relkind) ||
DefElem *dcollate = NULL;
DefElem *dctype = NULL;
DefElem *diculocale = NULL;
- DefElem *dlocprovider = NULL;
+ DefElem *dlocprovider = NULL;
DefElem *distemplate = NULL;
DefElem *dallowconnections = NULL;
DefElem *dconnlimit = NULL;
/*
* We don't normally permit new databases to be created with
* system-assigned OIDs. pg_upgrade tries to preserve database
- * OIDs, so we can't allow any database to be created with an
- * OID that might be in use in a freshly-initialized cluster
- * created by some future version. We assume all such OIDs will
- * be from the system-managed OID range.
+ * OIDs, so we can't allow any database to be created with an OID
+ * that might be in use in a freshly-initialized cluster created
+ * by some future version. We assume all such OIDs will be from
+ * the system-managed OID range.
*
* As an exception, however, we permit any OID to be assigned when
* allow_system_table_mods=on (so that initdb can assign system
InvokeObjectPostCreateHook(DatabaseRelationId, dboid, 0);
/*
- * If we're going to be reading data for the to-be-created database
- * into shared_buffers, take a lock on it. Nobody should know that this
+ * If we're going to be reading data for the to-be-created database into
+ * shared_buffers, take a lock on it. Nobody should know that this
* database exists yet, but it's good to maintain the invariant that a
* lock an AccessExclusiveLock on the database is sufficient to drop all
* of its buffers without worrying about more being read later.
*
- * Note that we need to do this before entering the PG_ENSURE_ERROR_CLEANUP
- * block below, because createdb_failure_callback expects this lock to
- * be held already.
+ * Note that we need to do this before entering the
+ * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback
+ * expects this lock to be held already.
*/
if (dbstrategy == CREATEDB_WAL_LOG)
LockSharedObject(DatabaseRelationId, dboid, 0, AccessShareLock);
if (rte->tablefunc)
if (rte->tablefunc->functype == TFT_XMLTABLE)
objectname = "xmltable";
- else /* Must be TFT_JSON_TABLE */
+ else /* Must be TFT_JSON_TABLE */
objectname = "json_table";
else
objectname = NULL;
CommandCounterIncrement();
stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
- sql,
- NULL,
- 0,
- NULL);
+ sql,
+ NULL,
+ 0,
+ NULL);
stmt_list = pg_plan_queries(stmt_list, sql, CURSOR_OPT_PARALLEL_OK, NULL);
foreach(lc2, stmt_list)
/*
* Inform cumulative stats system about our activity: basically, we
* truncated the matview and inserted some new data. (The concurrent
- * code path above doesn't need to worry about this because the inserts
- * and deletes it issues get counted by lower-level code.)
+ * code path above doesn't need to worry about this because the
+ * inserts and deletes it issues get counted by lower-level code.)
*/
pgstat_count_truncate(matviewRel);
if (!stmt->skipData)
*/
bool
pub_rf_contains_invalid_column(Oid pubid, Relation relation, List *ancestors,
- bool pubviaroot)
+ bool pubviaroot)
{
HeapTuple rftuple;
Oid relid = RelationGetRelid(relation);
*/
bool
pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestors,
- bool pubviaroot)
+ bool pubviaroot)
{
HeapTuple tuple;
Oid relid = RelationGetRelid(relation);
/*
* For a partition, if pubviaroot is true, find the topmost ancestor that
- * is published via this publication as we need to use its column list
- * for the changes.
+ * is published via this publication as we need to use its column list for
+ * the changes.
*
* Note that even though the column list used is for an ancestor, the
* REPLICA IDENTITY used will be for the actual child table.
}
tuple = SearchSysCache2(PUBLICATIONRELMAP,
- ObjectIdGetDatum(publish_as_relid),
- ObjectIdGetDatum(pubid));
+ ObjectIdGetDatum(publish_as_relid),
+ ObjectIdGetDatum(pubid));
if (!HeapTupleIsValid(tuple))
return false;
datum = SysCacheGetAttr(PUBLICATIONRELMAP, tuple,
- Anum_pg_publication_rel_prattrs,
- &isnull);
+ Anum_pg_publication_rel_prattrs,
+ &isnull);
if (!isnull)
{
- int x;
+ int x;
Bitmapset *idattrs;
Bitmapset *columns = NULL;
/*
* Attnums in the bitmap returned by RelationGetIndexAttrBitmap are
* offset (to handle system columns the usual way), while column list
- * does not use offset, so we can't do bms_is_subset(). Instead, we have
- * to loop over the idattrs and check all of them are in the list.
+ * does not use offset, so we can't do bms_is_subset(). Instead, we
+ * have to loop over the idattrs and check all of them are in the
+ * list.
*/
x = -1;
while ((x = bms_next_member(idattrs, x)) >= 0)
/*
* If pubviaroot is true, we are validating the column list of the
* parent table, but the bitmap contains the replica identity
- * information of the child table. The parent/child attnums may not
- * match, so translate them to the parent - get the attname from
- * the child, and look it up in the parent.
+ * information of the child table. The parent/child attnums may
+ * not match, so translate them to the parent - get the attname
+ * from the child, and look it up in the parent.
*/
if (pubviaroot)
{
/* attribute name in the child table */
- char *colname = get_attname(relid, attnum, false);
+ char *colname = get_attname(relid, attnum, false);
/*
* Determine the attnum for the attribute name in parent (we
*/
static void
CheckPubRelationColumnList(List *tables, const char *queryString,
- bool pubviaroot)
+ bool pubviaroot)
{
ListCell *lc;
publish_via_partition_root);
CheckPubRelationColumnList(rels, pstate->p_sourcetext,
- publish_via_partition_root);
+ publish_via_partition_root);
PublicationAddTables(puboid, rels, true, NULL);
CloseTableList(rels);
/* Transform the int2vector column list to a bitmap. */
columnListDatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple,
- Anum_pg_publication_rel_prattrs,
- &isnull);
+ Anum_pg_publication_rel_prattrs,
+ &isnull);
if (!isnull)
oldcolumns = pub_collist_to_bitmapset(NULL, columnListDatum, NULL);
foreach(newlc, rels)
{
PublicationRelInfo *newpubrel;
- Oid newrelid;
- Bitmapset *newcolumns = NULL;
+ Oid newrelid;
+ Bitmapset *newcolumns = NULL;
newpubrel = (PublicationRelInfo *) lfirst(newlc);
newrelid = RelationGetRelid(newpubrel->relation);
/*
- * If the new publication has column list, transform it to
- * a bitmap too.
+ * If the new publication has column list, transform it to a
+ * bitmap too.
*/
if (newpubrel->columns)
{
nattnums++;
ReleaseSysCache(atttuple);
}
- else if (IsA(selem->expr, Var)) /* column reference in parens */
+ else if (IsA(selem->expr, Var)) /* column reference in parens */
{
- Var *var = (Var *) selem->expr;
+ Var *var = (Var *) selem->expr;
TypeCacheEntry *type;
/* Disallow use of system attributes in extended stats */
while ((k = bms_next_member(attnums, k)) >= 0)
{
AttrNumber attnum = k + FirstLowInvalidHeapAttributeNumber;
+
if (attnum <= 0)
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("statistics creation on system columns is not supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("statistics creation on system columns is not supported")));
}
/*
relation_close(statrel, RowExclusiveLock);
/*
- * We used to create the pg_statistic_ext_data tuple too, but it's not clear
- * what value should the stxdinherit flag have (it depends on whether the rel
- * is partitioned, contains data, etc.)
+ * We used to create the pg_statistic_ext_data tuple too, but it's not
+ * clear what value should the stxdinherit flag have (it depends on
+ * whether the rel is partitioned, contains data, etc.)
*/
InvokeObjectPostCreateHook(StatisticExtRelationId, statoid, 0);
PG_END_TRY();
/*
- * Tell the cumulative stats system that the subscription is getting dropped.
- * We can safely report dropping the subscription statistics here if the
- * subscription is associated with a replication slot since we cannot run
- * DROP SUBSCRIPTION inside a transaction block. Subscription statistics
- * will be removed later by (auto)vacuum either if it's not associated
- * with a replication slot or if the message for dropping the subscription
- * gets lost.
+ * Tell the cumulative stats system that the subscription is getting
+ * dropped. We can safely report dropping the subscription statistics here
+ * if the subscription is associated with a replication slot since we
+ * cannot run DROP SUBSCRIPTION inside a transaction block. Subscription
+ * statistics will be removed later by (auto)vacuum either if it's not
+ * associated with a replication slot or if the message for dropping the
+ * subscription gets lost.
*/
if (slotname)
pgstat_drop_subscription(subid);
bool old_check_ok,
Oid parentDelTrigger, Oid parentUpdTrigger);
static void validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums,
- int numfksetcols, const int16 *fksetcolsattnums,
- List *fksetcols);
+ int numfksetcols, const int16 *fksetcolsattnums,
+ List *fksetcols);
static void addFkRecurseReferencing(List **wqueue, Constraint *fkconstraint,
Relation rel, Relation pkrel, Oid indexOid, Oid parentConstr,
int numfks, int16 *pkattnum, int16 *fkattnum,
foreach(lc, seqlist)
{
- Oid seq_relid = lfirst_oid(lc);
+ Oid seq_relid = lfirst_oid(lc);
SequenceChangePersistence(seq_relid, tab->newrelpersistence);
}
{
for (int i = 0; i < numfksetcols; i++)
{
- int16 setcol_attnum = fksetcolsattnums[i];
- bool seen = false;
+ int16 setcol_attnum = fksetcolsattnums[i];
+ bool seen = false;
for (int j = 0; j < numfks; j++)
{
if (!seen)
{
- char *col = strVal(list_nth(fksetcols, i));
+ char *col = strVal(list_nth(fksetcols, i));
+
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("column \"%s\" referenced in ON DELETE SET action must be part of foreign key", col)));
CatalogTupleUpdate(pg_index, &pg_index_tuple->t_self, pg_index_tuple);
InvokeObjectPostAlterHookArg(IndexRelationId, thisIndexOid, 0,
InvalidOid, is_internal);
+
/*
* Invalidate the relcache for the table, so that after we commit
* all sessions will refresh the table's replica identity index
/*
* If the partition we just attached is partitioned itself, invalidate
* relcache for all descendent partitions too to ensure that their
- * rd_partcheck expression trees are rebuilt; partitions already locked
- * at the beginning of this function.
+ * rd_partcheck expression trees are rebuilt; partitions already locked at
+ * the beginning of this function.
*/
if (attachrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- ListCell *l;
+ ListCell *l;
foreach(l, attachrel_children)
{
/*
* If the partition we just detached is partitioned itself, invalidate
* relcache for all descendent partitions too to ensure that their
- * rd_partcheck expression trees are rebuilt; must lock partitions
- * before doing so, using the same lockmode as what partRel has been
- * locked with by the caller.
+ * rd_partcheck expression trees are rebuilt; must lock partitions before
+ * doing so, using the same lockmode as what partRel has been locked with
+ * by the caller.
*/
if (partRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- List *children;
+ List *children;
children = find_all_inheritors(RelationGetRelid(partRel),
AccessExclusiveLock, NULL);
char *temp_tablespaces = NULL;
bool allow_in_place_tablespaces = false;
-Oid binary_upgrade_next_pg_tablespace_oid = InvalidOid;
+Oid binary_upgrade_next_pg_tablespace_oid = InvalidOid;
static void create_tablespace_directories(const char *location,
const Oid tablespaceoid);
*/
if (drolemembers)
{
- List *rolemembers = (List *) drolemembers->arg;
+ List *rolemembers = (List *) drolemembers->arg;
CommandCounterIncrement();
- if (stmt->action == +1) /* add members to role */
+ if (stmt->action == +1) /* add members to role */
AddRoleMems(rolename, roleid,
rolemembers, roleSpecsToIds(rolemembers),
GetUserId(), false);
*frozenxid_updated = false;
if (TransactionIdIsNormal(frozenxid) && oldfrozenxid != frozenxid)
{
- bool update = false;
+ bool update = false;
if (TransactionIdPrecedes(oldfrozenxid, frozenxid))
update = true;
*minmulti_updated = false;
if (MultiXactIdIsValid(minmulti) && oldminmulti != minmulti)
{
- bool update = false;
+ bool update = false;
if (MultiXactIdPrecedes(oldminmulti, minmulti))
update = true;
PARALLEL_INDVAC_STATUS_NEED_BULKDELETE,
PARALLEL_INDVAC_STATUS_NEED_CLEANUP,
PARALLEL_INDVAC_STATUS_COMPLETED
-} PVIndVacStatus;
+} PVIndVacStatus;
/*
* Struct for index vacuum statistics of an index that is used for parallel vacuum.
if (ctor->type == JSCTOR_JSON_SCALAR)
{
bool is_jsonb =
- ctor->returning->format->format_type == JS_FORMAT_JSONB;
+ ctor->returning->format->format_type == JS_FORMAT_JSONB;
scratch.d.json_constructor.arg_type_cache =
palloc(sizeof(*scratch.d.json_constructor.arg_type_cache) * nargs);
{
cstate->coercion = *coercion;
cstate->estate = *coercion ?
- ExecInitExprWithCaseValue((Expr *)(*coercion)->expr,
+ ExecInitExprWithCaseValue((Expr *) (*coercion)->expr,
state->parent,
caseval, casenull) : NULL;
}
}
/*
- * Do full parsing pass only for uniqueness check or for
- * JSON text validation.
+ * Do full parsing pass only for uniqueness check or for JSON text
+ * validation.
*/
if (res && (pred->unique_keys || exprtype == TEXTOID))
res = json_validate(json, pred->unique_keys, false);
if (ctor->type == JSCTOR_JSON_ARRAY)
res = (is_jsonb ?
jsonb_build_array_worker :
- json_build_array_worker)(op->d.json_constructor.nargs,
- op->d.json_constructor.arg_values,
- op->d.json_constructor.arg_nulls,
- op->d.json_constructor.arg_types,
- op->d.json_constructor.constructor->absent_on_null);
- else if (ctor->type == JSCTOR_JSON_OBJECT)
- res = (is_jsonb ?
- jsonb_build_object_worker :
- json_build_object_worker)(op->d.json_constructor.nargs,
+ json_build_array_worker) (op->d.json_constructor.nargs,
op->d.json_constructor.arg_values,
op->d.json_constructor.arg_nulls,
op->d.json_constructor.arg_types,
- op->d.json_constructor.constructor->absent_on_null,
- op->d.json_constructor.constructor->unique);
+ op->d.json_constructor.constructor->absent_on_null);
+ else if (ctor->type == JSCTOR_JSON_OBJECT)
+ res = (is_jsonb ?
+ jsonb_build_object_worker :
+ json_build_object_worker) (op->d.json_constructor.nargs,
+ op->d.json_constructor.arg_values,
+ op->d.json_constructor.arg_nulls,
+ op->d.json_constructor.arg_types,
+ op->d.json_constructor.constructor->absent_on_null,
+ op->d.json_constructor.constructor->unique);
else if (ctor->type == JSCTOR_JSON_SCALAR)
{
if (op->d.json_constructor.arg_nulls[0])
ExecEvalJsonExprCoercion(ExprEvalStep *op, ExprContext *econtext,
Datum res, bool *isNull, void *p, bool *error)
{
- ExprState *estate = p;
+ ExprState *estate = p;
- if (estate) /* coerce using specified expression */
+ if (estate) /* coerce using specified expression */
return ExecEvalExpr(estate, econtext, isNull);
if (op->d.jsonexpr.jsexpr->op != JSON_EXISTS_OP)
if (!var->evaluated)
{
MemoryContext oldcxt = var->mcxt ?
- MemoryContextSwitchTo(var->mcxt) : NULL;
+ MemoryContextSwitchTo(var->mcxt) : NULL;
var->value = ExecEvalExpr(var->estate, var->econtext, &var->isnull);
var->evaluated = true;
case jbvString:
coercion = &coercions->string;
- res = PointerGetDatum(
- cstring_to_text_with_len(item->val.string.val,
- item->val.string.len));
+ res = PointerGetDatum(cstring_to_text_with_len(item->val.string.val,
+ item->val.string.len));
break;
case jbvNumeric:
return res;
}
-typedef Datum (*JsonFunc)(ExprEvalStep *op, ExprContext *econtext,
- Datum item, bool *resnull, void *p, bool *error);
+typedef Datum (*JsonFunc) (ExprEvalStep *op, ExprContext *econtext,
+ Datum item, bool *resnull, void *p, bool *error);
static Datum
ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op,
return func(op, econtext, res, resnull, p, error);
/*
- * We should catch exceptions of category ERRCODE_DATA_EXCEPTION
- * and execute the corresponding ON ERROR behavior then.
+ * We should catch exceptions of category ERRCODE_DATA_EXCEPTION and
+ * execute the corresponding ON ERROR behavior then.
*/
oldcontext = CurrentMemoryContext;
oldowner = CurrentResourceOwner;
ecategory = ERRCODE_TO_CATEGORY(edata->sqlerrcode);
- if (ecategory != ERRCODE_DATA_EXCEPTION && /* jsonpath and other data errors */
+ if (ecategory != ERRCODE_DATA_EXCEPTION && /* jsonpath and other data
+ * errors */
ecategory != ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION) /* domain errors */
ReThrowError(edata);
if (error && *error)
return (Datum) 0;
- if (!jbv) /* NULL or empty */
+ if (!jbv) /* NULL or empty */
break;
Assert(!empty);
*error = true;
return (Datum) 0;
}
+
/*
* Coercion via I/O means here that the cast to the target
* type simply does not exist.
*/
ereport(ERROR,
- /*
- * XXX Standard says about a separate error code
- * ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE
- * but does not define its number.
- */
+
+ /*
+ * XXX Standard says about a separate error code
+ * ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE but
+ * does not define its number.
+ */
(errcode(ERRCODE_SQL_JSON_SCALAR_REQUIRED),
errmsg("SQL/JSON item cannot be cast to target type")));
}
else if (!jcstate->estate)
- return res; /* no coercion */
+ return res; /* no coercion */
/* coerce using specific expression */
estate = jcstate->estate;
}
if (jexpr->on_empty->btype == JSON_BEHAVIOR_DEFAULT)
+
/*
* Execute DEFAULT expression as a coercion expression, because
* its result is already coerced to the target type.
if (node->iss_ReorderQueue)
{
HeapTuple tuple;
+
while (!pairingheap_is_empty(node->iss_ReorderQueue))
{
tuple = reorderqueue_pop(node);
cache_purge_all(MemoizeState *mstate)
{
uint64 evictions = mstate->hashtable->members;
- PlanState *pstate = (PlanState *) mstate;
+ PlanState *pstate = (PlanState *) mstate;
/*
* Likely the most efficient way to remove all items is to just reset the
{
TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
TupleDesc plan_tdesc =
- CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
+ CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
else
{
stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
- src,
- plan->argtypes,
- plan->nargs,
- _SPI_current->queryEnv);
+ src,
+ plan->argtypes,
+ plan->nargs,
+ _SPI_current->queryEnv);
}
/* Finish filling in the CachedPlanSource */
else
{
stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
- src,
- plan->argtypes,
- plan->nargs,
- _SPI_current->queryEnv);
+ src,
+ plan->argtypes,
+ plan->nargs,
+ _SPI_current->queryEnv);
}
/* Finish filling in the CachedPlanSource */
* has occurred in the middle of LLVM code. It is not safe to call back
* into LLVM (which is why a FATAL error was thrown).
*
- * We do need to shutdown LLVM in other shutdown cases, otherwise
- * e.g. profiling data won't be written out.
+ * We do need to shutdown LLVM in other shutdown cases, otherwise e.g.
+ * profiling data won't be written out.
*/
if (llvm_in_fatal_on_oom())
{
/*
* Not yet holding any partition locks. Need to determine the size of the
- * hash table, it could have been resized since we were looking
- * last. Since we iterate in partition order, we can start by
- * unconditionally lock partition 0.
+ * hash table, it could have been resized since we were looking last.
+ * Since we iterate in partition order, we can start by unconditionally
+ * lock partition 0.
*
* Once we hold the lock, no resizing can happen until the scan ends. So
* we don't need to repeatedly call ensure_valid_bucket_pointers().
* because no code should expect latches to survive across
* CHECK_FOR_INTERRUPTS().
*/
- ResetLatch(MyLatch);
- goto retry;
+ ResetLatch(MyLatch);
+ goto retry;
}
}
static JsonValueExpr *
_copyJsonValueExpr(const JsonValueExpr *from)
{
- JsonValueExpr *newnode = makeNode(JsonValueExpr);
+ JsonValueExpr *newnode = makeNode(JsonValueExpr);
COPY_NODE_FIELD(raw_expr);
COPY_NODE_FIELD(formatted_expr);
static JsonParseExpr *
_copyJsonParseExpr(const JsonParseExpr *from)
{
- JsonParseExpr *newnode = makeNode(JsonParseExpr);
+ JsonParseExpr *newnode = makeNode(JsonParseExpr);
COPY_NODE_FIELD(expr);
COPY_NODE_FIELD(output);
static JsonOutput *
_copyJsonOutput(const JsonOutput *from)
{
- JsonOutput *newnode = makeNode(JsonOutput);
+ JsonOutput *newnode = makeNode(JsonOutput);
COPY_NODE_FIELD(typeName);
COPY_NODE_FIELD(returning);
static JsonExpr *
_copyJsonExpr(const JsonExpr *from)
{
- JsonExpr *newnode = makeNode(JsonExpr);
+ JsonExpr *newnode = makeNode(JsonExpr);
COPY_SCALAR_FIELD(op);
COPY_NODE_FIELD(formatted_expr);
static JsonFuncExpr *
_copyJsonFuncExpr(const JsonFuncExpr *from)
{
- JsonFuncExpr *newnode = makeNode(JsonFuncExpr);
+ JsonFuncExpr *newnode = makeNode(JsonFuncExpr);
COPY_SCALAR_FIELD(op);
COPY_NODE_FIELD(common);
static JsonBehavior *
_copyJsonBehavior(const JsonBehavior *from)
{
- JsonBehavior *newnode = makeNode(JsonBehavior);
+ JsonBehavior *newnode = makeNode(JsonBehavior);
COPY_SCALAR_FIELD(btype);
COPY_NODE_FIELD(default_expr);
static JsonCommon *
_copyJsonCommon(const JsonCommon *from)
{
- JsonCommon *newnode = makeNode(JsonCommon);
+ JsonCommon *newnode = makeNode(JsonCommon);
COPY_NODE_FIELD(expr);
COPY_NODE_FIELD(pathspec);
static JsonArgument *
_copyJsonArgument(const JsonArgument *from)
{
- JsonArgument *newnode = makeNode(JsonArgument);
+ JsonArgument *newnode = makeNode(JsonArgument);
COPY_NODE_FIELD(val);
COPY_STRING_FIELD(name);
static JsonTable *
_copyJsonTable(const JsonTable *from)
{
- JsonTable *newnode = makeNode(JsonTable);
+ JsonTable *newnode = makeNode(JsonTable);
COPY_NODE_FIELD(common);
COPY_NODE_FIELD(columns);
static Integer *
_copyInteger(const Integer *from)
{
- Integer *newnode = makeNode(Integer);
+ Integer *newnode = makeNode(Integer);
COPY_SCALAR_FIELD(ival);
static Boolean *
_copyBoolean(const Boolean *from)
{
- Boolean *newnode = makeNode(Boolean);
+ Boolean *newnode = makeNode(Boolean);
COPY_SCALAR_FIELD(boolval);
static BitString *
_copyBitString(const BitString *from)
{
- BitString *newnode = makeNode(BitString);
+ BitString *newnode = makeNode(BitString);
COPY_STRING_FIELD(bsval);
_equalA_Const(const A_Const *a, const A_Const *b)
{
/*
- * Hack for in-line val field. Also val is not valid is isnull is
- * true.
+ * Hack for in-line val field. Also val is not valid is isnull is true.
*/
if (!a->isnull && !b->isnull &&
!equal(&a->val, &b->val))
break;
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) expr;
+ JsonExpr *jexpr = (JsonExpr *) expr;
JsonCoercion *coercion = jexpr->result_coercion;
if (!coercion)
if (ctor->coercion)
exprSetCollation((Node *) ctor->coercion, collation);
else
- Assert(!OidIsValid(collation)); /* result is always a json[b] type */
+ Assert(!OidIsValid(collation)); /* result is always a
+ * json[b] type */
}
break;
case T_JsonIsPredicate:
break;
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) expr;
+ JsonExpr *jexpr = (JsonExpr *) expr;
JsonCoercion *coercion = jexpr->result_coercion;
if (!coercion)
return walker(((JsonIsPredicate *) node)->expr, context);
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) node;
+ JsonExpr *jexpr = (JsonExpr *) node;
if (walker(jexpr->formatted_expr, context))
return true;
break;
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) node;
- JsonExpr *newnode;
+ JsonExpr *jexpr = (JsonExpr *) node;
+ JsonExpr *newnode;
FLATCOPY(newnode, jexpr, JsonExpr);
MUTATE(newnode->path_spec, jexpr->path_spec, Node *);
break;
case T_JsonTableColumn:
{
- JsonTableColumn *jtc = (JsonTableColumn *) node;
+ JsonTableColumn *jtc = (JsonTableColumn *) node;
if (walker(jtc->typeName, context))
return true;
_outFloat(StringInfo str, const Float *node)
{
/*
- * We assume the value is a valid numeric literal and so does not
- * need quoting.
+ * We assume the value is a valid numeric literal and so does not need
+ * quoting.
*/
appendStringInfoString(str, node->fval);
}
_outString(StringInfo str, const String *node)
{
/*
- * We use outToken to provide escaping of the string's content,
- * but we don't want it to do anything with an empty string.
+ * We use outToken to provide escaping of the string's content, but we
+ * don't want it to do anything with an empty string.
*/
appendStringInfoChar(str, '"');
if (node->sval[0] != '\0')
Integer *
makeInteger(int i)
{
- Integer *v = makeNode(Integer);
+ Integer *v = makeNode(Integer);
v->ival = i;
return v;
Boolean *
makeBoolean(bool val)
{
- Boolean *v = makeNode(Boolean);
+ Boolean *v = makeNode(Boolean);
v->boolval = val;
return v;
}
/*
- * When building a fractional path, determine a cheapest fractional
- * path for each child relation too. Looking at startup and total
- * costs is not enough, because the cheapest fractional path may be
- * dominated by two separate paths (one for startup, one for total).
+ * When building a fractional path, determine a cheapest
+ * fractional path for each child relation too. Looking at startup
+ * and total costs is not enough, because the cheapest fractional
+ * path may be dominated by two separate paths (one for startup,
+ * one for total).
*
* When needed (building fractional path), determine the cheapest
* fractional path too.
*/
if (root->tuple_fraction > 0)
{
- double path_fraction = (1.0 / root->tuple_fraction);
+ double path_fraction = (1.0 / root->tuple_fraction);
cheapest_fractional =
get_cheapest_fractional_path_for_pathkeys(childrel->pathlist,
path_fraction);
/*
- * If we found no path with matching pathkeys, use the cheapest
- * total path instead.
+ * If we found no path with matching pathkeys, use the
+ * cheapest total path instead.
*
* XXX We might consider partially sorted paths too (with an
* incremental sort on top). But we'd have to build all the
static double
get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
{
- double width = -1.0; /* fake value */
+ double width = -1.0; /* fake value */
if (IsA(expr, RelabelType))
expr = (Expr *) ((RelabelType *) expr)->arg;
/* Try to find actual stat in corresponding relation */
if (IsA(expr, Var))
{
- Var *var = (Var *) expr;
+ Var *var = (Var *) expr;
if (var->varno > 0 && var->varno < root->simple_rel_array_size)
{
- RelOptInfo *rel = root->simple_rel_array[var->varno];
+ RelOptInfo *rel = root->simple_rel_array[var->varno];
if (rel != NULL &&
var->varattno >= rel->min_attr &&
var->varattno <= rel->max_attr)
{
- int ndx = var->varattno - rel->min_attr;
+ int ndx = var->varattno - rel->min_attr;
if (rel->attr_widths[ndx] > 0)
width = rel->attr_widths[ndx];
/* Didn't find any actual stats, try using type width instead. */
if (width < 0.0)
{
- Node *node = (Node*) expr;
+ Node *node = (Node *) expr;
width = get_typavgwidth(exprType(node), exprTypmod(node));
}
* Values are passed as Datum type, so comparisons can't be cheaper than
* comparing a Datum value.
*
- * FIXME I find this reasoning questionable. We may pass int2, and comparing
- * it is probably a bit cheaper than comparing a bigint.
+ * FIXME I find this reasoning questionable. We may pass int2, and
+ * comparing it is probably a bit cheaper than comparing a bigint.
*/
if (width <= sizeof(Datum))
return 1.0;
/*
* We consider the cost of a comparison not to be directly proportional to
- * width of the argument, because widths of the arguments could be slightly
- * different (we only know the average width for the whole column). So we
- * use log16(width) as an estimate.
+ * width of the argument, because widths of the arguments could be
+ * slightly different (we only know the average width for the whole
+ * column). So we use log16(width) as an estimate.
*/
return 1.0 + 0.125 * LOG2(width / sizeof(Datum));
}
bool heapSort)
{
Cost per_tuple_cost = 0.0;
- ListCell *lc;
- List *pathkeyExprs = NIL;
+ ListCell *lc;
+ List *pathkeyExprs = NIL;
double tuplesPerPrevGroup = tuples;
double totalFuncCost = 1.0;
bool has_fake_var = false;
int i = 0;
Oid prev_datatype = InvalidOid;
- List *cache_varinfos = NIL;
+ List *cache_varinfos = NIL;
/* fallback if pathkeys is unknown */
if (list_length(pathkeys) == 0)
{
/*
- * If we'll use a bounded heap-sort keeping just K tuples in memory, for
- * a total number of tuple comparisons of N log2 K; but the constant
- * factor is a bit higher than for quicksort. Tweak it so that the cost
- * curve is continuous at the crossover point.
+ * If we'll use a bounded heap-sort keeping just K tuples in memory,
+ * for a total number of tuple comparisons of N log2 K; but the
+ * constant factor is a bit higher than for quicksort. Tweak it so
+ * that the cost curve is continuous at the crossover point.
*/
output_tuples = (heapSort) ? 2.0 * output_tuples : tuples;
per_tuple_cost += 2.0 * cpu_operator_cost * LOG2(output_tuples);
}
/*
- * Computing total cost of sorting takes into account:
- * - per column comparison function cost
- * - we try to compute needed number of comparison per column
+ * Computing total cost of sorting takes into account the per-column
+ * comparison function cost. We try to compute the needed number of
+ * comparisons per column.
*/
foreach(lc, pathkeys)
{
- PathKey *pathkey = (PathKey*) lfirst(lc);
- EquivalenceMember *em;
- double nGroups,
- correctedNGroups;
- Cost funcCost = 1.0;
+ PathKey *pathkey = (PathKey *) lfirst(lc);
+ EquivalenceMember *em;
+ double nGroups,
+ correctedNGroups;
+ Cost funcCost = 1.0;
/*
* We believe that equivalence members aren't very different, so, to
pathkeyExprs = lappend(pathkeyExprs, em->em_expr);
/*
- * We need to calculate the number of comparisons for this column, which
- * requires knowing the group size. So we estimate the number of groups
- * by calling estimate_num_groups_incremental(), which estimates the
- * group size for "new" pathkeys.
+ * We need to calculate the number of comparisons for this column,
+ * which requires knowing the group size. So we estimate the number of
+ * groups by calling estimate_num_groups_incremental(), which
+ * estimates the group size for "new" pathkeys.
*
* Note: estimate_num_groups_incremental does not handle fake Vars, so
* use a default estimate otherwise.
&cache_varinfos,
list_length(pathkeyExprs) - 1);
else if (tuples > 4.0)
+
/*
* Use geometric mean as estimation if there are no stats.
*
- * We don't use DEFAULT_NUM_DISTINCT here, because that’s used for
- * a single column, but here we’re dealing with multiple columns.
+ * We don't use DEFAULT_NUM_DISTINCT here, because that's used for
+ * a single column, but here we're dealing with multiple columns.
*/
nGroups = ceil(2.0 + sqrt(tuples) * (i + 1) / list_length(pathkeys));
else
nGroups = tuples;
/*
- * Presorted keys are not considered in the cost above, but we still do
- * have to compare them in the qsort comparator. So make sure to factor
- * in the cost in that case.
+ * Presorted keys are not considered in the cost above, but we still
+ * do have to compare them in the qsort comparator. So make sure to
+ * factor in the cost in that case.
*/
if (i >= nPresortedKeys)
{
if (heapSort)
{
- /* have to keep at least one group, and a multiple of group size */
+ /*
+ * have to keep at least one group, and a multiple of group
+ * size
+ */
correctedNGroups = ceil(output_tuples / tuplesPerPrevGroup);
}
else
i++;
/*
- * Uniform distributions with all groups being of the same size are the
- * best case, with nice smooth behavior. Real-world distributions tend
- * not to be uniform, though, and we don’t have any reliable easy-to-use
- * information. As a basic defense against skewed distributions, we use
- * a 1.5 factor to make the expected group a bit larger, but we need to
- * be careful not to make the group larger than in the preceding step.
+ * Uniform distributions with all groups being of the same size are
+ * the best case, with nice smooth behavior. Real-world distributions
+ * tend not to be uniform, though, and we don't have any reliable
+ * easy-to-use information. As a basic defense against skewed
+ * distributions, we use a 1.5 factor to make the expected group a bit
+ * larger, but we need to be careful not to make the group larger than
+ * in the preceding step.
*/
tuplesPerPrevGroup = Min(tuplesPerPrevGroup,
ceil(1.5 * tuplesPerPrevGroup / nGroups));
/*
- * Once we get single-row group, it means tuples in the group are unique
- * and we can skip all remaining columns.
+ * Once we get single-row group, it means tuples in the group are
+ * unique and we can skip all remaining columns.
*/
if (tuplesPerPrevGroup <= 1.0)
break;
per_tuple_cost *= cpu_operator_cost;
/*
- * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles E.
- * Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort estimation
- * formula has additional term proportional to number of tuples (See Chapter
- * 8.2 and Theorem 4.1). That affects cases with a low number of tuples,
- * approximately less than 1e4. We could implement it as an additional
- * multiplier under the logarithm, but we use a bit more complex formula
- * which takes into account the number of unique tuples and it’s not clear
- * how to combine the multiplier with the number of groups. Estimate it as
- * 10 in cpu_operator_cost unit.
+ * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles
+ * E. Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort
+ * estimation formula has additional term proportional to number of tuples
+ * (see Chapter 8.2 and Theorem 4.1). That affects cases with a low number
+ * of tuples, approximately less than 1e4. We could implement it as an
+ * additional multiplier under the logarithm, but we use a bit more
+ * complex formula which takes into account the number of unique tuples
+ * and it's not clear how to combine the multiplier with the number of
+ * groups. Estimate it as 10 cpu_operator_cost units.
*/
per_tuple_cost += 10 * cpu_operator_cost;
double tuples)
{
return compute_cpu_sort_cost(root, pathkeys, nPresortedKeys,
- 0, tuples, tuples, false);
+ 0, tuples, tuples, false);
}
/*
/*
* Match!
*
- * Copy the sortref if it wasn't set yet. That may happen if the
- * ec was constructed from WHERE clause, i.e. it doesn't have a
- * target reference at all.
+ * Copy the sortref if it wasn't set yet. That may happen if
+ * the ec was constructed from WHERE clause, i.e. it doesn't
+ * have a target reference at all.
*/
if (cur_ec->ec_sortref == 0 && sortref > 0)
cur_ec->ec_sortref = sortref;
foreach(l, all_pathkeys)
{
- PathKey *front_pathkey = (PathKey *) lfirst(l);
+ PathKey *front_pathkey = (PathKey *) lfirst(l);
List *cur_mergeclauses;
List *outerkeys;
List *innerkeys;
#include "utils/selfuncs.h"
/* Consider reordering of GROUP BY keys? */
-bool enable_group_by_reordering = true;
+bool enable_group_by_reordering = true;
static bool pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys);
static bool matches_boolean_partition_clause(RestrictInfo *rinfo,
group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
List **group_clauses)
{
- List *new_group_pathkeys= NIL,
+ List *new_group_pathkeys = NIL,
*new_group_clauses = NIL;
ListCell *lc;
int n;
* there's a matching GROUP BY key. If we find one, we append it to the
* list, and do the same for the clauses.
*
- * Once we find the first pathkey without a matching GROUP BY key, the rest
- * of the pathkeys are useless and can't be used to evaluate the grouping,
- * so we abort the loop and ignore the remaining pathkeys.
+ * Once we find the first pathkey without a matching GROUP BY key, the
+ * rest of the pathkeys are useless and can't be used to evaluate the
+ * grouping, so we abort the loop and ignore the remaining pathkeys.
*
* XXX Pathkeys are built in a way to allow simply comparing pointers.
*/
foreach(lc, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(lc);
- SortGroupClause *sgc;
+ PathKey *pathkey = (PathKey *) lfirst(lc);
+ SortGroupClause *sgc;
/* abort on first mismatch */
if (!list_member_ptr(*group_pathkeys, pathkey))
/*
* Used to generate all permutations of a pathkey list.
*/
-typedef struct PathkeyMutatorState {
+typedef struct PathkeyMutatorState
+{
List *elemsList;
ListCell **elemCells;
void **elems;
int *positions;
- int mutatorNColumns;
- int count;
+ int mutatorNColumns;
+ int count;
} PathkeyMutatorState;
static void
PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
{
- int i;
+ int i;
int n = end - start;
- ListCell *lc;
+ ListCell *lc;
memset(state, 0, sizeof(*state));
state->elemsList = list_copy(elems);
- state->elems = palloc(sizeof(void*) * n);
- state->elemCells = palloc(sizeof(ListCell*) * n);
+ state->elems = palloc(sizeof(void *) * n);
+ state->elemCells = palloc(sizeof(ListCell *) * n);
state->positions = palloc(sizeof(int) * n);
i = 0;
static void
PathkeyMutatorSwap(int *a, int i, int j)
{
- int s = a[i];
+ int s = a[i];
- a[i] = a[j];
- a[j] = s;
+ a[i] = a[j];
+ a[j] = s;
}
/*
static bool
PathkeyMutatorNextSet(int *a, int n)
{
- int j, k, l, r;
+ int j,
+ k,
+ l,
+ r;
j = n - 2;
static List *
PathkeyMutatorNext(PathkeyMutatorState *state)
{
- int i;
+ int i;
state->count++;
}
/* update the list cells to point to the right elements */
- for(i = 0; i < state->mutatorNColumns; i++)
+ for (i = 0; i < state->mutatorNColumns; i++)
lfirst(state->elemCells[i]) =
- (void *) state->elems[ state->positions[i] - 1 ];
+ (void *) state->elems[state->positions[i] - 1];
return state->elemsList;
}
typedef struct PathkeySortCost
{
Cost cost;
- PathKey *pathkey;
+ PathKey *pathkey;
} PathkeySortCost;
static int
List **group_pathkeys, List **group_clauses,
int n_preordered)
{
- List *new_group_pathkeys = NIL,
- *new_group_clauses = NIL,
- *var_group_pathkeys;
+ List *new_group_pathkeys = NIL,
+ *new_group_clauses = NIL,
+ *var_group_pathkeys;
- ListCell *cell;
- PathkeyMutatorState mstate;
- double cheapest_sort_cost = -1.0;
+ ListCell *cell;
+ PathkeyMutatorState mstate;
+ double cheapest_sort_cost = -1.0;
- int nFreeKeys;
- int nToPermute;
+ int nFreeKeys;
+ int nToPermute;
/* If there are less than 2 unsorted pathkeys, we're done. */
if (list_length(*group_pathkeys) - n_preordered < 2)
return false;
/*
- * We could exhaustively cost all possible orderings of the pathkeys, but for
- * a large number of pathkeys it might be prohibitively expensive. So we try
- * to apply simple cheap heuristics first - we sort the pathkeys by sort cost
- * (as if the pathkey was sorted independently) and then check only the four
- * cheapest pathkeys. The remaining pathkeys are kept ordered by cost.
+ * We could exhaustively cost all possible orderings of the pathkeys, but
+ * for a large number of pathkeys it might be prohibitively expensive. So
+ * we try to apply simple cheap heuristics first - we sort the pathkeys by
+ * sort cost (as if the pathkey was sorted independently) and then check
+ * only the four cheapest pathkeys. The remaining pathkeys are kept
+ * ordered by cost.
*
* XXX This is a very simple heuristics, but likely to work fine for most
- * cases (because the number of GROUP BY clauses tends to be lower than 4).
- * But it ignores how the number of distinct values in each pathkey affects
- * the following steps. It might be better to use "more expensive" pathkey
- * first if it has many distinct values, because it then limits the number
- * of comparisons for the remaining pathkeys. But evaluating that is likely
- * quite the expensive.
+ * cases (because the number of GROUP BY clauses tends to be lower than
+ * 4). But it ignores how the number of distinct values in each pathkey
+ * affects the following steps. It might be better to use "more expensive"
+ * pathkey first if it has many distinct values, because it then limits
+ * the number of comparisons for the remaining pathkeys. But evaluating
+ * that is likely quite the expensive.
*/
nFreeKeys = list_length(*group_pathkeys) - n_preordered;
nToPermute = 4;
if (nFreeKeys > nToPermute)
{
- int i;
+ int i;
PathkeySortCost *costs = palloc(sizeof(PathkeySortCost) * nFreeKeys);
/* skip the pre-ordered pathkeys */
/* estimate cost for sorting individual pathkeys */
for (i = 0; cell != NULL; i++, (cell = lnext(*group_pathkeys, cell)))
{
- List *to_cost = list_make1(lfirst(cell));
+ List *to_cost = list_make1(lfirst(cell));
Assert(i < nFreeKeys);
Assert(list_length(new_group_pathkeys) == list_length(*group_pathkeys));
/*
- * Generate pathkey lists with permutations of the first nToPermute pathkeys.
+ * Generate pathkey lists with permutations of the first nToPermute
+ * pathkeys.
*
* XXX We simply calculate sort cost for each individual pathkey list, but
- * there's room for two dynamic programming optimizations here. Firstly, we
- * may pass the current "best" cost to cost_sort_estimate so that it can
- * "abort" if the estimated pathkeys list exceeds it. Secondly, it could pass
- * the return information about the position when it exceeded the cost, and
- * we could skip all permutations with the same prefix.
+ * there's room for two dynamic programming optimizations here. Firstly,
+ * we may pass the current "best" cost to cost_sort_estimate so that it
+ * can "abort" if the estimated pathkeys list exceeds it. Secondly, it
+ * could pass the return information about the position when it exceeded
+ * the cost, and we could skip all permutations with the same prefix.
*
* Imagine we've already found ordering with cost C1, and we're evaluating
* another ordering - cost_sort_estimate() calculates cost by adding the
* pathkeys one by one (more or less), and the cost only grows. If at any
- * point it exceeds C1, it can't possibly be "better" so we can discard it.
- * But we also know that we can discard all ordering with the same prefix,
- * because if we're estimating (a,b,c,d) and we exceed C1 at (a,b) then the
- * same thing will happen for any ordering with this prefix.
+ * point it exceeds C1, it can't possibly be "better" so we can discard
+ * it. But we also know that we can discard all ordering with the same
+ * prefix, because if we're estimating (a,b,c,d) and we exceed C1 at (a,b)
+ * then the same thing will happen for any ordering with this prefix.
*/
PathkeyMutatorInit(&mstate, new_group_pathkeys, n_preordered, n_preordered + nToPermute);
- while((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL)
+ while ((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL)
{
- Cost cost;
+ Cost cost;
cost = cost_sort_estimate(root, var_group_pathkeys, n_preordered, nrows);
/* Reorder the group clauses according to the reordered pathkeys. */
foreach(cell, new_group_pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(cell);
+ PathKey *pathkey = (PathKey *) lfirst(cell);
new_group_clauses = lappend(new_group_clauses,
- get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref,
- *group_clauses));
+ get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref,
+ *group_clauses));
}
/* Just append the rest GROUP BY clauses */
PathKeyInfo *info;
int n_preordered = 0;
- List *pathkeys = group_pathkeys;
- List *clauses = group_clauses;
+ List *pathkeys = group_pathkeys;
+ List *clauses = group_clauses;
/* always return at least the original pathkeys/clauses */
info = makeNode(PathKeyInfo);
infos = lappend(infos, info);
/*
- * Should we try generating alternative orderings of the group keys? If not,
- * we produce only the order specified in the query, i.e. the optimization
- * is effectively disabled.
+ * Should we try generating alternative orderings of the group keys? If
+ * not, we produce only the order specified in the query, i.e. the
+ * optimization is effectively disabled.
*/
if (!enable_group_by_reordering)
return infos;
}
/*
- * If the path is sorted in some way, try reordering the group keys to match
- * as much of the ordering as possible - we get this sort for free (mostly).
+ * If the path is sorted in some way, try reordering the group keys to
+ * match as much of the ordering as possible - we get this sort for free
+ * (mostly).
*
* We must not do this when there are no grouping sets, because those use
* more complex logic to decide the ordering.
static int
pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys)
{
- ListCell *key;
- int n = 0;
+ ListCell *key;
+ int n = 0;
/* no special ordering requested for grouping */
if (root->group_pathkeys == NIL)
/* walk the pathkeys and search for matching group key */
foreach(key, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(key);
+ PathKey *pathkey = (PathKey *) lfirst(key);
/* no matching group key, we're done */
if (!list_member_ptr(root->group_pathkeys, pathkey))
case T_ProjectionPath:
/*
- * If the generated plan node includes a Result node for
- * the projection, we can't execute it asynchronously.
+ * If the generated plan node includes a Result node for the
+ * projection, we can't execute it asynchronously.
*/
if (IsA(plan, Result))
return false;
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
- foreach (lc2, pathkey_orderings)
+ foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
else if (parse->hasAggs)
{
/*
- * We have aggregation, possibly with plain GROUP BY. Make
- * an AggPath.
+ * We have aggregation, possibly with plain GROUP BY.
+ * Make an AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
else if (group_clauses)
{
/*
- * We have GROUP BY without aggregation or grouping sets.
- * Make a GroupPath.
+ * We have GROUP BY without aggregation or grouping
+ * sets. Make a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
/*
* Now we may consider incremental sort on this path, but only
- * when the path is not already sorted and when incremental sort
- * is enabled.
+ * when the path is not already sorted and when incremental
+ * sort is enabled.
*/
if (is_sorted || !enable_incremental_sort)
continue;
continue;
/*
- * We should have already excluded pathkeys of length 1 because
- * then presorted_keys > 0 would imply is_sorted was true.
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
*/
Assert(list_length(root->group_pathkeys) != 1);
else if (parse->hasAggs)
{
/*
- * We have aggregation, possibly with plain GROUP BY. Make an
- * AggPath.
+ * We have aggregation, possibly with plain GROUP BY. Make
+ * an AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
else if (parse->groupClause)
{
/*
- * We have GROUP BY without aggregation or grouping sets. Make
- * a GroupPath.
+ * We have GROUP BY without aggregation or grouping sets.
+ * Make a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
- foreach (lc2, pathkey_orderings)
+ foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
&presorted_keys);
/*
- * Insert a Sort node, if required. But there's no point in
- * sorting anything but the cheapest path.
+ * Insert a Sort node, if required. But there's no point
+ * in sorting anything but the cheapest path.
*/
if (!is_sorted)
{
dNumGroups));
/*
- * Now we may consider incremental sort on this path, but only
- * when the path is not already sorted and when incremental
- * sort is enabled.
+ * Now we may consider incremental sort on this path, but
+ * only when the path is not already sorted and when
+ * incremental sort is enabled.
*/
if (is_sorted || !enable_incremental_sort)
continue;
- /* Restore the input path (we might have added Sort on top). */
+ /*
+ * Restore the input path (we might have added Sort on
+ * top).
+ */
path = path_original;
- /* no shared prefix, not point in building incremental sort */
+ /*
+ * no shared prefix, not point in building incremental
+ * sort
+ */
if (presorted_keys == 0)
continue;
/*
* We should have already excluded pathkeys of length 1
- * because then presorted_keys > 0 would imply is_sorted was
- * true.
+ * because then presorted_keys > 0 would imply is_sorted
+ * was true.
*/
Assert(list_length(root->group_pathkeys) != 1);
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
- foreach (lc2, pathkey_orderings)
+ foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
Assert(list_length(pathkey_orderings) > 0);
/* process all potentially interesting grouping reorderings */
- foreach (lc2, pathkey_orderings)
+ foreach(lc2, pathkey_orderings)
{
bool is_sorted;
int presorted_keys = 0;
/*
* Now we may consider incremental sort on this path, but only
- * when the path is not already sorted and when incremental sort
- * is enabled.
+ * when the path is not already sorted and when incremental
+ * sort is enabled.
*/
if (is_sorted || !enable_incremental_sort)
continue;
continue;
/*
- * We should have already excluded pathkeys of length 1 because
- * then presorted_keys > 0 would imply is_sorted was true.
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
*/
Assert(list_length(root->group_pathkeys) != 1);
const JsonConstructorExpr *ctor = (JsonConstructorExpr *) node;
ListCell *lc;
bool is_jsonb =
- ctor->returning->format->format_type == JS_FORMAT_JSONB;
+ ctor->returning->format->format_type == JS_FORMAT_JSONB;
/* Check argument_type => json[b] conversions */
foreach(lc, ctor->args)
/* JsonExpr is parallel-unsafe if subtransactions can be used. */
else if (IsA(node, JsonExpr))
{
- JsonExpr *jsexpr = (JsonExpr *) node;
+ JsonExpr *jsexpr = (JsonExpr *) node;
if (ExecEvalJsonNeedsSubTransaction(jsexpr, NULL))
{
context->case_val = raw;
formatted = eval_const_expressions_mutator((Node *) jve->formatted_expr,
- context);
+ context);
context->case_val = save_case_val;
return false;
if (IsA(node, Param))
{
- Param *param = (Param *)node;
+ Param *param = (Param *) node;
*context = bms_add_member(*context, param->paramid);
return false;
if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind))
{
- table_relation_estimate_size(rel, attr_widths, pages, tuples,
- allvisfrac);
+ table_relation_estimate_size(rel, attr_widths, pages, tuples,
+ allvisfrac);
}
else if (rel->rd_rel->relkind == RELKIND_INDEX)
{
- /*
- * XXX: It'd probably be good to move this into a callback,
- * individual index types e.g. know if they have a metapage.
- */
+ /*
+ * XXX: It'd probably be good to move this into a callback, individual
+ * index types e.g. know if they have a metapage.
+ */
- /* it has storage, ok to call the smgr */
- curpages = RelationGetNumberOfBlocks(rel);
+ /* it has storage, ok to call the smgr */
+ curpages = RelationGetNumberOfBlocks(rel);
- /* report estimated # pages */
- *pages = curpages;
- /* quick exit if rel is clearly empty */
- if (curpages == 0)
- {
- *tuples = 0;
- *allvisfrac = 0;
- return;
- }
+ /* report estimated # pages */
+ *pages = curpages;
+ /* quick exit if rel is clearly empty */
+ if (curpages == 0)
+ {
+ *tuples = 0;
+ *allvisfrac = 0;
+ return;
+ }
- /* coerce values in pg_class to more desirable types */
- relpages = (BlockNumber) rel->rd_rel->relpages;
- reltuples = (double) rel->rd_rel->reltuples;
- relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
+ /* coerce values in pg_class to more desirable types */
+ relpages = (BlockNumber) rel->rd_rel->relpages;
+ reltuples = (double) rel->rd_rel->reltuples;
+ relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
+ /*
+ * Discount the metapage while estimating the number of tuples. This
+ * is a kluge because it assumes more than it ought to about index
+ * structure. Currently it's OK for btree, hash, and GIN indexes but
+ * suspect for GiST indexes.
+ */
+ if (relpages > 0)
+ {
+ curpages--;
+ relpages--;
+ }
+
+ /* estimate number of tuples from previous tuple density */
+ if (reltuples >= 0 && relpages > 0)
+ density = reltuples / (double) relpages;
+ else
+ {
/*
- * Discount the metapage while estimating the number of tuples.
- * This is a kluge because it assumes more than it ought to about
- * index structure. Currently it's OK for btree, hash, and GIN
- * indexes but suspect for GiST indexes.
+ * If we have no data because the relation was never vacuumed,
+ * estimate tuple width from attribute datatypes. We assume here
+ * that the pages are completely full, which is OK for tables
+ * (since they've presumably not been VACUUMed yet) but is
+ * probably an overestimate for indexes. Fortunately
+ * get_relation_info() can clamp the overestimate to the parent
+ * table's size.
+ *
+ * Note: this code intentionally disregards alignment
+ * considerations, because (a) that would be gilding the lily
+ * considering how crude the estimate is, and (b) it creates
+ * platform dependencies in the default plans which are kind of a
+ * headache for regression testing.
+ *
+ * XXX: Should this logic be more index specific?
*/
- if (relpages > 0)
- {
- curpages--;
- relpages--;
- }
-
- /* estimate number of tuples from previous tuple density */
- if (reltuples >= 0 && relpages > 0)
- density = reltuples / (double) relpages;
- else
- {
- /*
- * If we have no data because the relation was never vacuumed,
- * estimate tuple width from attribute datatypes. We assume
- * here that the pages are completely full, which is OK for
- * tables (since they've presumably not been VACUUMed yet) but
- * is probably an overestimate for indexes. Fortunately
- * get_relation_info() can clamp the overestimate to the
- * parent table's size.
- *
- * Note: this code intentionally disregards alignment
- * considerations, because (a) that would be gilding the lily
- * considering how crude the estimate is, and (b) it creates
- * platform dependencies in the default plans which are kind
- * of a headache for regression testing.
- *
- * XXX: Should this logic be more index specific?
- */
- int32 tuple_width;
+ int32 tuple_width;
- tuple_width = get_rel_data_width(rel, attr_widths);
- tuple_width += MAXALIGN(SizeofHeapTupleHeader);
- tuple_width += sizeof(ItemIdData);
- /* note: integer division is intentional here */
- density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
- }
- *tuples = rint(density * (double) curpages);
+ tuple_width = get_rel_data_width(rel, attr_widths);
+ tuple_width += MAXALIGN(SizeofHeapTupleHeader);
+ tuple_width += sizeof(ItemIdData);
+ /* note: integer division is intentional here */
+ density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
+ }
+ *tuples = rint(density * (double) curpages);
- /*
- * We use relallvisible as-is, rather than scaling it up like we
- * do for the pages and tuples counts, on the theory that any
- * pages added since the last VACUUM are most likely not marked
- * all-visible. But costsize.c wants it converted to a fraction.
- */
- if (relallvisible == 0 || curpages <= 0)
- *allvisfrac = 0;
- else if ((double) relallvisible >= curpages)
- *allvisfrac = 1;
- else
- *allvisfrac = (double) relallvisible / curpages;
+ /*
+ * We use relallvisible as-is, rather than scaling it up like we do
+ * for the pages and tuples counts, on the theory that any pages added
+ * since the last VACUUM are most likely not marked all-visible. But
+ * costsize.c wants it converted to a fraction.
+ */
+ if (relallvisible == 0 || curpages <= 0)
+ *allvisfrac = 0;
+ else if ((double) relallvisible >= curpages)
+ *allvisfrac = 1;
+ else
+ *allvisfrac = (double) relallvisible / curpages;
}
else
{
- /*
- * Just use whatever's in pg_class. This covers foreign tables,
- * sequences, and also relkinds without storage (shouldn't get
- * here?); see initializations in AddNewRelationTuple(). Note
- * that FDW must cope if reltuples is -1!
- */
- *pages = rel->rd_rel->relpages;
- *tuples = rel->rd_rel->reltuples;
- *allvisfrac = 0;
+ /*
+ * Just use whatever's in pg_class. This covers foreign tables,
+ * sequences, and also relkinds without storage (shouldn't get here?);
+ * see initializations in AddNewRelationTuple(). Note that FDW must
+ * cope if reltuples is -1!
+ */
+ *pages = rel->rd_rel->relpages;
+ *tuples = rel->rd_rel->reltuples;
+ *allvisfrac = 0;
}
}
*/
Query *
parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText,
- const Oid *paramTypes, int numParams,
- QueryEnvironment *queryEnv)
+ const Oid *paramTypes, int numParams,
+ QueryEnvironment *queryEnv)
{
ParseState *pstate = make_parsestate(NULL);
Query *query;
ListCell *ltl;
ListCell *rtl;
const char *context;
- bool recursive = (pstate->p_parent_cte &&
- pstate->p_parent_cte->cterecursive);
+ bool recursive = (pstate->p_parent_cte &&
+ pstate->p_parent_cte->cterecursive);
context = (stmt->op == SETOP_UNION ? "UNION" :
(stmt->op == SETOP_INTERSECT ? "INTERSECT" :
setup_parser_errposition_callback(&pcbstate, pstate,
bestlocation);
- /* If it's a recursive union, we need to require hashing support. */
+ /*
+ * If it's a recursive union, we need to require hashing
+ * support.
+ */
op->groupClauses = lappend(op->groupClauses,
makeSortGroupClauseForSetOp(rescoltype, recursive));
}
if (IsA(node, A_Const))
{
- A_Const *aconst = castNode(A_Const, node);
+ A_Const *aconst = castNode(A_Const, node);
int targetlist_pos = 0;
int target_pos;
}
break;
case T_JsonExpr:
- /* Context item and PASSING arguments are already
- * marked with collations in parse_expr.c. */
+
+ /*
+ * Context item and PASSING arguments are already
+ * marked with collations in parse_expr.c.
+ */
break;
default:
if (exprtype == JSONOID || exprtype == JSONBOID)
{
- format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
+ format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
ereport(WARNING,
(errmsg("FORMAT JSON has no effect for json and jsonb types"),
parser_errposition(pstate, ve->format->location)));
format = default_format;
}
else if (exprtype == JSONOID || exprtype == JSONBOID)
- format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
+ format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
else
format = default_format;
FuncExpr *fexpr;
Oid fnoid;
- if (cast_is_needed) /* only CAST is allowed */
+ if (cast_is_needed) /* only CAST is allowed */
ereport(ERROR,
(errcode(ERRCODE_CANNOT_COERCE),
errmsg("cannot cast type %s to %s",
format_type_be(exprtype),
format_type_be(targettype)),
- parser_errposition(pstate, location)));
+ parser_errposition(pstate, location)));
fnoid = targettype == JSONOID ? F_TO_JSON : F_TO_JSONB;
fexpr = makeFuncExpr(fnoid, targettype, list_make1(expr),
if (format->format_type == JS_FORMAT_JSON)
{
JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ?
- format->encoding : JS_ENC_UTF8;
+ format->encoding : JS_ENC_UTF8;
if (targettype != BYTEAOID &&
format->encoding != JS_ENC_DEFAULT)
list_make2(texpr, enc),
InvalidOid, InvalidOid,
COERCE_EXPLICIT_CALL);
+
fexpr->location = location;
return (Node *) fexpr;
/* try to coerce expression to the output type */
res = coerce_to_target_type(pstate, expr, exprtype,
returning->typid, returning->typmod,
- /* XXX throwing errors when casting to char(N) */
+ /* XXX throwing errors when casting to char(N) */
COERCION_EXPLICIT,
COERCE_EXPLICIT_CAST,
location);
Node *placeholder;
Node *coercion;
Oid intermediate_typid =
- returning->format->format_type == JS_FORMAT_JSONB ? JSONBOID : JSONOID;
+ returning->format->format_type == JS_FORMAT_JSONB ? JSONBOID : JSONOID;
jsctor->args = args;
jsctor->func = fexpr;
transformJsonArrayQueryConstructor(ParseState *pstate,
JsonArrayQueryConstructor *ctor)
{
- SubLink *sublink = makeNode(SubLink);
+ SubLink *sublink = makeNode(SubLink);
SelectStmt *select = makeNode(SelectStmt);
RangeSubselect *range = makeNode(RangeSubselect);
Alias *alias = makeNode(Alias);
Oid aggfnoid;
Node *node;
Expr *aggfilter = agg_ctor->agg_filter ? (Expr *)
- transformWhereClause(pstate, agg_ctor->agg_filter,
- EXPR_KIND_FILTER, "FILTER") : NULL;
+ transformWhereClause(pstate, agg_ctor->agg_filter,
+ EXPR_KIND_FILTER, "FILTER") : NULL;
aggfnoid = DatumGetInt32(DirectFunctionCall1(regprocin,
CStringGetDatum(aggfn)));
aggref->aggtype = aggtype;
/* aggcollid and inputcollid will be set by parse_collate.c */
- aggref->aggtranstype = InvalidOid; /* will be set by planner */
+ aggref->aggtranstype = InvalidOid; /* will be set by planner */
/* aggargtypes will be set by transformAggregateCall */
/* aggdirectargs and args will be set by transformAggregateCall */
/* aggorder and aggdistinct will be set by transformAggregateCall */
aggref->aggvariadic = false;
aggref->aggkind = AGGKIND_NORMAL;
/* agglevelsup will be set by transformAggregateCall */
- aggref->aggsplit = AGGSPLIT_SIMPLE; /* planner might change this */
+ aggref->aggsplit = AGGSPLIT_SIMPLE; /* planner might change this */
aggref->location = agg_ctor->location;
transformAggregateCall(pstate, aggref, args, agg_ctor->agg_order, false);
{
if (agg->absent_on_null)
if (agg->unique)
- aggfnname = "pg_catalog.jsonb_object_agg_unique_strict"; /* F_JSONB_OBJECT_AGG_UNIQUE_STRICT */
+ aggfnname = "pg_catalog.jsonb_object_agg_unique_strict"; /* F_JSONB_OBJECT_AGG_UNIQUE_STRICT */
else
- aggfnname = "pg_catalog.jsonb_object_agg_strict"; /* F_JSONB_OBJECT_AGG_STRICT */
+ aggfnname = "pg_catalog.jsonb_object_agg_strict"; /* F_JSONB_OBJECT_AGG_STRICT */
+ else if (agg->unique)
+ aggfnname = "pg_catalog.jsonb_object_agg_unique"; /* F_JSONB_OBJECT_AGG_UNIQUE */
else
- if (agg->unique)
- aggfnname = "pg_catalog.jsonb_object_agg_unique"; /* F_JSONB_OBJECT_AGG_UNIQUE */
- else
- aggfnname = "pg_catalog.jsonb_object_agg"; /* F_JSONB_OBJECT_AGG */
+ aggfnname = "pg_catalog.jsonb_object_agg"; /* F_JSONB_OBJECT_AGG */
aggtype = JSONBOID;
}
if (agg->unique)
aggfnname = "pg_catalog.json_object_agg_unique_strict"; /* F_JSON_OBJECT_AGG_UNIQUE_STRICT */
else
- aggfnname = "pg_catalog.json_object_agg_strict"; /* F_JSON_OBJECT_AGG_STRICT */
+ aggfnname = "pg_catalog.json_object_agg_strict"; /* F_JSON_OBJECT_AGG_STRICT */
+ else if (agg->unique)
+ aggfnname = "pg_catalog.json_object_agg_unique"; /* F_JSON_OBJECT_AGG_UNIQUE */
else
- if (agg->unique)
- aggfnname = "pg_catalog.json_object_agg_unique"; /* F_JSON_OBJECT_AGG_UNIQUE */
- else
- aggfnname = "pg_catalog.json_object_agg"; /* F_JSON_OBJECT_AGG */
+ aggfnname = "pg_catalog.json_object_agg"; /* F_JSON_OBJECT_AGG */
aggtype = JSONOID;
}
* Transform a JSON output clause of JSON_VALUE and JSON_QUERY.
*/
static void
-transformJsonFuncExprOutput(ParseState *pstate, JsonFuncExpr *func,
+transformJsonFuncExprOutput(ParseState *pstate, JsonFuncExpr *func,
JsonExpr *jsexpr)
{
Node *expr = jsexpr->formatted_expr;
Oid typid;
} *p,
coercionTypids[] =
- {
- { &coercions->null, UNKNOWNOID },
- { &coercions->string, TEXTOID },
- { &coercions->numeric, NUMERICOID },
- { &coercions->boolean, BOOLOID },
- { &coercions->date, DATEOID },
- { &coercions->time, TIMEOID },
- { &coercions->timetz, TIMETZOID },
- { &coercions->timestamp, TIMESTAMPOID },
- { &coercions->timestamptz, TIMESTAMPTZOID },
- { &coercions->composite, contextItemTypeId },
- { NULL, InvalidOid }
- };
+ {
+ {&coercions->null, UNKNOWNOID},
+ {&coercions->string, TEXTOID},
+ {&coercions->numeric, NUMERICOID},
+ {&coercions->boolean, BOOLOID},
+ {&coercions->date, DATEOID},
+ {&coercions->time, TIMEOID},
+ {&coercions->timetz, TIMETZOID},
+ {&coercions->timestamp, TIMESTAMPOID},
+ {&coercions->timestamptz, TIMESTAMPTZOID},
+ {&coercions->composite, contextItemTypeId},
+ {NULL, InvalidOid}
+ };
for (p = coercionTypids; p->coercion; p++)
*p->coercion = initJsonItemCoercion(pstate, p->typid, returning);
transformJsonParseExpr(ParseState *pstate, JsonParseExpr *jsexpr)
{
JsonReturning *returning = transformJsonConstructorRet(pstate, jsexpr->output,
- "JSON()");
+ "JSON()");
Node *arg;
if (jsexpr->unique_keys)
}
return makeJsonConstructorExpr(pstate, JSCTOR_JSON_PARSE, list_make1(arg), NULL,
- returning, jsexpr->unique_keys, false,
- jsexpr->location);
+ returning, jsexpr->unique_keys, false,
+ jsexpr->location);
}
/*
{
Node *arg = transformExprRecurse(pstate, (Node *) jsexpr->expr);
JsonReturning *returning = transformJsonConstructorRet(pstate, jsexpr->output,
- "JSON_SCALAR()");
+ "JSON_SCALAR()");
if (exprType(arg) == UNKNOWNOID)
arg = coerce_to_specific_type(pstate, arg, TEXTOID, "JSON_SCALAR");
return makeJsonConstructorExpr(pstate, JSCTOR_JSON_SCALAR, list_make1(arg), NULL,
- returning, false, false, jsexpr->location);
+ returning, false, false, jsexpr->location);
}
/*
}
return makeJsonConstructorExpr(pstate, JSCTOR_JSON_SERIALIZE, list_make1(arg),
- NULL, returning, false, false, expr->location);
+ NULL, returning, false, false, expr->location);
}
/* Context for JSON_TABLE transformation */
typedef struct JsonTableContext
{
- ParseState *pstate; /* parsing state */
- JsonTable *table; /* untransformed node */
- TableFunc *tablefunc; /* transformed node */
- List *pathNames; /* list of all path and columns names */
- int pathNameId; /* path name id counter */
+ ParseState *pstate; /* parsing state */
+ JsonTable *table; /* untransformed node */
+ TableFunc *tablefunc; /* transformed node */
+ List *pathNames; /* list of all path and columns names */
+ int pathNameId; /* path name id counter */
Oid contextItemTypid; /* type oid of context item (json/jsonb) */
} JsonTableContext;
-static JsonTableParent * transformJsonTableColumns(JsonTableContext *cxt,
- JsonTablePlan *plan,
- List *columns,
- char *pathSpec,
- char **pathName,
- int location);
+static JsonTableParent *transformJsonTableColumns(JsonTableContext *cxt,
+ JsonTablePlan *plan,
+ List *columns,
+ char *pathSpec,
+ char **pathName,
+ int location);
static Node *
makeStringConst(char *str, int location)
{
- A_Const *n = makeNode(A_Const);
+ A_Const *n = makeNode(A_Const);
n->val.node.type = T_String;
n->val.sval.sval = str;
n->location = location;
- return (Node *)n;
+ return (Node *) n;
}
/*
static bool
isJsonTablePathNameDuplicate(JsonTableContext *cxt, const char *pathname)
{
- ListCell *lc;
+ ListCell *lc;
foreach(lc, cxt->pathNames)
{
foreach(lc, columns)
{
JsonTableColumn *jtc = castNode(JsonTableColumn, lfirst(lc));
- Node *node;
+ Node *node;
if (jtc->coltype != JTC_NESTED)
continue;
}
else
{
- Node *node1 =
- transformJsonTableChildPlan(cxt, plan->plan1, columns);
- Node *node2 =
- transformJsonTableChildPlan(cxt, plan->plan2, columns);
+ Node *node1 = transformJsonTableChildPlan(cxt, plan->plan1,
+ columns);
+ Node *node2 = transformJsonTableChildPlan(cxt, plan->plan2,
+ columns);
return makeJsonTableSiblingJoin(plan->join_type == JSTPJ_CROSS,
node1, node2);
static bool
typeIsComposite(Oid typid)
{
- char typtype;
+ char typtype;
if (typid == JSONOID ||
typid == JSONBOID ||
typtype = get_typtype(typid);
- if (typtype == TYPTYPE_COMPOSITE)
+ if (typtype == TYPTYPE_COMPOSITE)
return true;
if (typtype == TYPTYPE_DOMAIN)
JsonTable *jt = cxt->table;
TableFunc *tf = cxt->tablefunc;
bool errorOnError = jt->on_error &&
- jt->on_error->btype == JSON_BEHAVIOR_ERROR;
+ jt->on_error->btype == JSON_BEHAVIOR_ERROR;
foreach(col, columns)
{
if (rawc->name)
{
/* make sure column names are unique */
- ListCell *colname;
+ ListCell *colname;
foreach(colname, tf->colnames)
if (!strcmp((const char *) colname, rawc->name))
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("column name \"%s\" is not unique",
- rawc->name),
- parser_errposition(pstate, rawc->location)));
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("column name \"%s\" is not unique",
+ rawc->name),
+ parser_errposition(pstate, rawc->location)));
tf->colnames = lappend(tf->colnames,
makeString(pstrdup(rawc->name)));
}
/*
- * Determine the type and typmod for the new column. FOR
- * ORDINALITY columns are INTEGER by standard; the others are
- * user-specified.
+ * Determine the type and typmod for the new column. FOR ORDINALITY
+ * columns are INTEGER by standard; the others are user-specified.
*/
switch (rawc->coltype)
{
tf->coltypmods = lappend_int(tf->coltypmods, typmod);
tf->colcollations = lappend_oid(tf->colcollations,
type_is_collatable(typid)
- ? DEFAULT_COLLATION_OID
- : InvalidOid);
+ ? DEFAULT_COLLATION_OID
+ : InvalidOid);
tf->colvalexprs = lappend(tf->colvalexprs, colexpr);
}
}
errdetail("JSON_TABLE columns must contain "
"explicit AS pathname specification if "
"explicit PLAN clause is used"),
- parser_errposition(cxt->pstate, location)));
+ parser_errposition(cxt->pstate, location)));
*pathName = generateJsonTablePathName(cxt);
}
registerAllJsonTableColumns(&cxt, jt->columns);
-#if 0 /* XXX it' unclear from the standard whether root path name is mandatory or not */
+#if 0 /* XXX it' unclear from the standard whether
+ * root path name is mandatory or not */
if (plan && plan->plan_type != JSTP_DEFAULT && !rootPathName)
{
/* Assign root path name and create corresponding plan node */
JsonTablePlan *rootNode = makeNode(JsonTablePlan);
JsonTablePlan *rootPlan = (JsonTablePlan *)
- makeJsonTableJoinedPlan(JSTPJ_OUTER, (Node *) rootNode,
- (Node *) plan, jt->location);
+ makeJsonTableJoinedPlan(JSTPJ_OUTER, (Node *) rootNode,
+ (Node *) plan, jt->location);
rootPathName = generateJsonTablePathName(&cxt);
break;
case T_Float:
- {
- /* could be an oversize integer as well as a float ... */
-
- int64 val64;
- char *endptr;
-
- errno = 0;
- val64 = strtoi64(aconst->val.fval.fval, &endptr, 10);
- if (errno == 0 && *endptr == '\0')
{
- /*
- * It might actually fit in int32. Probably only INT_MIN can
- * occur, but we'll code the test generally just to be sure.
- */
- int32 val32 = (int32) val64;
+ /* could be an oversize integer as well as a float ... */
- if (val64 == (int64) val32)
- {
- val = Int32GetDatum(val32);
+ int64 val64;
+ char *endptr;
- typeid = INT4OID;
- typelen = sizeof(int32);
- typebyval = true;
+ errno = 0;
+ val64 = strtoi64(aconst->val.fval.fval, &endptr, 10);
+ if (errno == 0 && *endptr == '\0')
+ {
+ /*
+ * It might actually fit in int32. Probably only INT_MIN
+ * can occur, but we'll code the test generally just to be
+ * sure.
+ */
+ int32 val32 = (int32) val64;
+
+ if (val64 == (int64) val32)
+ {
+ val = Int32GetDatum(val32);
+
+ typeid = INT4OID;
+ typelen = sizeof(int32);
+ typebyval = true;
+ }
+ else
+ {
+ val = Int64GetDatum(val64);
+
+ typeid = INT8OID;
+ typelen = sizeof(int64);
+ typebyval = FLOAT8PASSBYVAL; /* int8 and float8 alike */
+ }
}
else
{
- val = Int64GetDatum(val64);
-
- typeid = INT8OID;
- typelen = sizeof(int64);
- typebyval = FLOAT8PASSBYVAL; /* int8 and float8 alike */
+ /* arrange to report location if numeric_in() fails */
+ setup_parser_errposition_callback(&pcbstate, pstate, aconst->location);
+ val = DirectFunctionCall3(numeric_in,
+ CStringGetDatum(aconst->val.fval.fval),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(-1));
+ cancel_parser_errposition_callback(&pcbstate);
+
+ typeid = NUMERICOID;
+ typelen = -1; /* variable len */
+ typebyval = false;
}
+ break;
}
- else
- {
- /* arrange to report location if numeric_in() fails */
- setup_parser_errposition_callback(&pcbstate, pstate, aconst->location);
- val = DirectFunctionCall3(numeric_in,
- CStringGetDatum(aconst->val.fval.fval),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(-1));
- cancel_parser_errposition_callback(&pcbstate);
-
- typeid = NUMERICOID;
- typelen = -1; /* variable len */
- typebyval = false;
- }
- break;
- }
case T_Boolean:
val = BoolGetDatum(boolVal(&aconst->val));
*/
void
setup_parse_fixed_parameters(ParseState *pstate,
- const Oid *paramTypes, int numParams)
+ const Oid *paramTypes, int numParams)
{
FixedParamState *parstate = palloc(sizeof(FixedParamState));
*/
void
setup_parse_variable_parameters(ParseState *pstate,
- Oid **paramTypes, int *numParams)
+ Oid **paramTypes, int *numParams)
{
VarParamState *parstate = palloc(sizeof(VarParamState));
{
RangeTblEntry *rte = makeNode(RangeTblEntry);
char *refname = alias ? alias->aliasname :
- pstrdup(tf->functype == TFT_XMLTABLE ? "xmltable" : "json_table");
+ pstrdup(tf->functype == TFT_XMLTABLE ? "xmltable" : "json_table");
Alias *eref;
int numaliases;
* cached descriptor too. We determine that based on the pg_inherits.xmin
* that was saved alongside that descriptor: if the xmin that was not in
* progress for that active snapshot is also not in progress for the
- * current active snapshot, then we can use it. Otherwise build one
- * from scratch.
+ * current active snapshot, then we can use it. Otherwise build one from
+ * scratch.
*/
if (omit_detached &&
rel->rd_partdesc_nodetached &&
hctl.keysize = sizeof(Oid);
hctl.entrysize = sizeof(avl_dbase);
hctl.hcxt = tmpcxt;
- dbhash = hash_create("autovacuum db hash", 20, &hctl, /* magic number here FIXME */
+ dbhash = hash_create("autovacuum db hash", 20, &hctl, /* magic number here
+ * FIXME */
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
/* start by inserting the new database */
char dbname[NAMEDATALEN];
/*
- * Report autovac startup to the cumulative stats system. We deliberately do
- * this before InitPostgres, so that the last_autovac_time will get
- * updated even if the connection attempt fails. This is to prevent
- * autovac from getting "stuck" repeatedly selecting an unopenable
- * database, rather than making any progress on stuff it can connect
- * to.
+ * Report autovac startup to the cumulative stats system. We
+ * deliberately do this before InitPostgres, so that the
+ * last_autovac_time will get updated even if the connection attempt
+ * fails. This is to prevent autovac from getting "stuck" repeatedly
+ * selecting an unopenable database, rather than making any progress
+ * on stuff it can connect to.
*/
pgstat_report_autovac(dbid);
/*
* Create a per-backend PGPROC struct in shared memory, except in the
- * EXEC_BACKEND case where this was done in SubPostmasterMain. We must
- * do this before we can use LWLocks (and in the EXEC_BACKEND case we
- * already had to do some stuff with LWLocks).
+ * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
+ * this before we can use LWLocks (and in the EXEC_BACKEND case we already
+ * had to do some stuff with LWLocks).
*/
#ifndef EXEC_BACKEND
InitProcess();
int pgprocno; /* pgprocno of archiver process */
/*
- * Forces a directory scan in pgarch_readyXlog(). Protected by
- * arch_lck.
+ * Forces a directory scan in pgarch_readyXlog(). Protected by arch_lck.
*/
bool force_dir_scan;
slock_t arch_lck;
} PgArchData;
-char *XLogArchiveLibrary = "";
+char *XLogArchiveLibrary = "";
/* ----------
static void pgarch_archiveDone(char *xlog);
static void pgarch_die(int code, Datum arg);
static void HandlePgArchInterrupts(void);
-static int ready_file_comparator(Datum a, Datum b, void *arg);
+static int ready_file_comparator(Datum a, Datum b, void *arg);
static void LoadArchiveLibrary(void);
static void call_archive_module_shutdown_callback(int code, Datum arg);
/*
* If we still have stored file names from the previous directory scan,
- * try to return one of those. We check to make sure the status file
- * is still present, as the archive_command for a previous file may
- * have already marked it done.
+ * try to return one of those. We check to make sure the status file is
+ * still present, as the archive_command for a previous file may have
+ * already marked it done.
*/
while (arch_files->arch_files_size > 0)
{
- struct stat st;
+ struct stat st;
char status_file[MAXPGPATH];
char *arch_file;
CStringGetDatum(basename), NULL) > 0)
{
/*
- * Remove the lowest priority file and add the current one to
- * the heap.
+ * Remove the lowest priority file and add the current one to the
+ * heap.
*/
arch_file = DatumGetCString(binaryheap_remove_first(arch_files->arch_heap));
strcpy(arch_file, basename);
binaryheap_build(arch_files->arch_heap);
/*
- * Fill arch_files array with the files to archive in ascending order
- * of priority.
+ * Fill arch_files array with the files to archive in ascending order of
+ * priority.
*/
arch_files->arch_files_size = arch_files->arch_heap->bh_size;
for (int i = 0; i < arch_files->arch_files_size; i++)
static int
ready_file_comparator(Datum a, Datum b, void *arg)
{
- char *a_str = DatumGetCString(a);
- char *b_str = DatumGetCString(b);
- bool a_history = IsTLHistoryFileName(a_str);
- bool b_history = IsTLHistoryFileName(b_str);
+ char *a_str = DatumGetCString(a);
+ char *b_str = DatumGetCString(b);
+ bool a_history = IsTLHistoryFileName(a_str);
+ bool b_history = IsTLHistoryFileName(b_str);
/* Timeline history files always have the highest priority. */
if (a_history != b_history)
if (archiveLibChanged)
{
/*
- * Call the currently loaded archive module's shutdown callback, if
- * one is defined.
+ * Call the currently loaded archive module's shutdown callback,
+ * if one is defined.
*/
call_archive_module_shutdown_callback(0, 0);
* load the new one, but there is presently no mechanism for
* unloading a library (see the comment above
* internal_load_library()). To deal with this, we simply restart
- * the archiver. The new archive module will be loaded when the new
- * archiver process starts up.
+ * the archiver. The new archive module will be loaded when the
+ * new archiver process starts up.
*/
ereport(LOG,
(errmsg("restarting archiver process because value of "
memset(&ArchiveContext, 0, sizeof(ArchiveModuleCallbacks));
/*
- * If shell archiving is enabled, use our special initialization
- * function. Otherwise, load the library and call its
- * _PG_archive_module_init().
+ * If shell archiving is enabled, use our special initialization function.
+ * Otherwise, load the library and call its _PG_archive_module_init().
*/
if (XLogArchiveLibrary[0] == '\0')
archive_init = shell_archive_init;
/*
* If we reached normal running, we go straight to waiting for
- * client backends to exit. If already in PM_STOP_BACKENDS or
- * a later state, do not change it.
+ * client backends to exit. If already in PM_STOP_BACKENDS or a
+ * later state, do not change it.
*/
if (pmState == PM_RUN || pmState == PM_HOT_STANDBY)
connsAllowed = false;
/*
* Time between progress updates for long-running startup operations.
*/
-int log_startup_progress_interval = 10000; /* 10 sec */
+int log_startup_progress_interval = 10000; /* 10 sec */
/* Signal handlers */
static void StartupProcTriggerHandler(SIGNAL_ARGS);
/*
* Force reporting remaining WAL statistics at process exit.
*
- * Since pgstat_report_wal is invoked with 'force' is false in main loop
- * to avoid overloading the cumulative stats system, there may exist
- * unreported stats counters for the WAL writer.
+ * Since pgstat_report_wal is invoked with 'force' is false in main
+ * loop to avoid overloading the cumulative stats system, there may
+ * exist unreported stats counters for the WAL writer.
*/
pgstat_report_wal(true);
if (!OidIsValid(collation))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
else
{
/*
- * NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T;
- * the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not
- * have to be considered below.
+ * NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T; the
+ * case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not have to
+ * be considered below.
*/
pg_regex_locale = pg_newlocale_from_collation(collation);
* Finalize the backup manifest, and send it to the client.
*/
void
-SendBackupManifest(backup_manifest_info *manifest, bbsink * sink)
+SendBackupManifest(backup_manifest_info *manifest, bbsink *sink)
{
uint8 checksumbuf[PG_SHA256_DIGEST_LENGTH];
char checksumstringbuf[PG_SHA256_DIGEST_STRING_LENGTH];
{
bbsink_copystream *mysink = (bbsink_copystream *) sink;
bbsink_state *state = sink->bbs_state;
- char *buf;
+ char *buf;
/*
* Initialize buffer. We ultimately want to send the archive and manifest
* data by means of CopyData messages where the payload portion of each
* message begins with a type byte. However, basebackup.c expects the
* buffer to be aligned, so we can't just allocate one extra byte for the
- * type byte. Instead, allocate enough extra bytes that the portion of
- * the buffer we reveal to our callers can be aligned, while leaving room
- * to slip the type byte in just beforehand. That will allow us to ship
- * the data with a single call to pq_putmessage and without needing any
- * extra copying.
+ * type byte. Instead, allocate enough extra bytes that the portion of the
+ * buffer we reveal to our callers can be aligned, while leaving room to
+ * slip the type byte in just beforehand. That will allow us to ship the
+ * data with a single call to pq_putmessage and without needing any extra
+ * copying.
*/
buf = palloc(mysink->base.bbs_buffer_length + MAXIMUM_ALIGNOF);
mysink->msgbuffer = buf + (MAXIMUM_ALIGNOF - 1);
return NULL; /* keep compiler quiet */
#else
bbsink_gzip *sink;
- int compresslevel;
+ int compresslevel;
Assert(next != NULL);
bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name)
{
bbsink_gzip *mysink = (bbsink_gzip *) sink;
- char *gz_archive_name;
- z_stream *zs = &mysink->zstream;
+ char *gz_archive_name;
+ z_stream *zs = &mysink->zstream;
/* Initialize compressor object. */
memset(zs, 0, sizeof(z_stream));
zs->avail_out = sink->bbs_next->bbs_buffer_length;
/*
- * We need to use deflateInit2() rather than deflateInit() here so that
- * we can request a gzip header rather than a zlib header. Otherwise, we
- * want to supply the same values that would have been used by default
- * if we had just called deflateInit().
+ * We need to use deflateInit2() rather than deflateInit() here so that we
+ * can request a gzip header rather than a zlib header. Otherwise, we want
+ * to supply the same values that would have been used by default if we
+ * had just called deflateInit().
*
* Per the documentation for deflateInit2, the third argument must be
* Z_DEFLATED; the fourth argument is the number of "window bits", by
errmsg("could not initialize compression library"));
/*
- * Add ".gz" to the archive name. Note that the pg_basebackup -z
- * produces archives named ".tar.gz" rather than ".tgz", so we match
- * that here.
+ * Add ".gz" to the archive name. Note that the pg_basebackup -z produces
+ * archives named ".tar.gz" rather than ".tgz", so we match that here.
*/
gz_archive_name = psprintf("%s.gz", archive_name);
Assert(sink->bbs_next != NULL);
bbsink_gzip_archive_contents(bbsink *sink, size_t len)
{
bbsink_gzip *mysink = (bbsink_gzip *) sink;
- z_stream *zs = &mysink->zstream;
+ z_stream *zs = &mysink->zstream;
/* Compress data from input buffer. */
zs->next_in = (uint8 *) mysink->base.bbs_buffer;
while (zs->avail_in > 0)
{
- int res;
+ int res;
/* Write output data into unused portion of output buffer. */
Assert(mysink->bytes_written < mysink->base.bbs_next->bbs_buffer_length);
bbsink_gzip_end_archive(bbsink *sink)
{
bbsink_gzip *mysink = (bbsink_gzip *) sink;
- z_stream *zs = &mysink->zstream;
+ z_stream *zs = &mysink->zstream;
/* There is no more data available. */
zs->next_in = (uint8 *) mysink->base.bbs_buffer;
while (1)
{
- int res;
+ int res;
/* Write output data into unused portion of output buffer. */
Assert(mysink->bytes_written < mysink->base.bbs_next->bbs_buffer_length);
mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written;
/*
- * As bbsink_gzip_archive_contents, but pass Z_FINISH since there
- * is no more input.
+ * As bbsink_gzip_archive_contents, but pass Z_FINISH since there is
+ * no more input.
*/
res = deflate(zs, Z_FINISH);
if (res == Z_STREAM_ERROR)
mysink->base.bbs_next->bbs_buffer_length - zs->avail_out;
/*
- * Apparently we had no data in the output buffer and deflate()
- * was not able to add any. We must be done.
+ * Apparently we had no data in the output buffer and deflate() was
+ * not able to add any. We must be done.
*/
if (mysink->bytes_written == 0)
break;
return NULL; /* keep compiler quiet */
#else
bbsink_lz4 *sink;
- int compresslevel;
+ int compresslevel;
Assert(next != NULL);
/*
* It's not a good idea to store your backups in the same directory that
- * you're backing up. If we allowed a relative path here, that could easily
- * happen accidentally, so we don't. The user could still accomplish the
- * same thing by including the absolute path to $PGDATA in the pathname,
- * but that's likely an intentional bad decision rather than an accident.
+ * you're backing up. If we allowed a relative path here, that could
+ * easily happen accidentally, so we don't. The user could still
+ * accomplish the same thing by including the absolute path to $PGDATA in
+ * the pathname, but that's likely an intentional bad decision rather than
+ * an accident.
*/
if (!is_absolute_path(pathname))
ereport(ERROR,
switch (pg_check_dir(pathname))
{
case 0:
+
/*
- * Does not exist, so create it using the same permissions we'd use
- * for a new subdirectory of the data directory itself.
+ * Does not exist, so create it using the same permissions we'd
+ * use for a new subdirectory of the data directory itself.
*/
if (MakePGDirectory(pathname) < 0)
ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m", pathname)));
+ (errcode_for_file_access(),
+ errmsg("could not create directory \"%s\": %m", pathname)));
break;
case 1:
/*
* We found one, so update it.
*
- * It is probably not a great idea to call BaseBackupAddTarget
- * for the same name multiple times, but if it happens, this
- * seems like the sanest behavior.
+ * It is probably not a great idea to call BaseBackupAddTarget for
+ * the same name multiple times, but if it happens, this seems
+ * like the sanest behavior.
*/
ttype->check_detail = check_detail;
ttype->get_sink = get_sink;
}
/*
- * We use TopMemoryContext for allocations here to make sure that the
- * data we need doesn't vanish under us; that's also why we copy the
- * target name into a newly-allocated chunk of memory.
+ * We use TopMemoryContext for allocations here to make sure that the data
+ * we need doesn't vanish under us; that's also why we copy the target
+ * name into a newly-allocated chunk of memory.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
ttype = palloc(sizeof(BaseBackupTargetType));
if ((compress->options & PG_COMPRESSION_OPTION_WORKERS) != 0)
{
/*
- * On older versions of libzstd, this option does not exist, and trying
- * to set it will fail. Similarly for newer versions if they are
- * compiled without threading support.
+ * On older versions of libzstd, this option does not exist, and
+ * trying to set it will fail. Similarly for newer versions if they
+ * are compiled without threading support.
*/
ret = ZSTD_CCtx_setParameter(mysink->cctx, ZSTD_c_nbWorkers,
compress->workers);
{
XLogRecordBuffer buf;
TransactionId txid;
- RmgrData rmgr;
+ RmgrData rmgr;
buf.origptr = ctx->reader->ReadRecPtr;
buf.endptr = ctx->reader->EndRecPtr;
}
/*
- * We don't allow to invoke more sync workers once we have reached the sync
- * worker limit per subscription. So, just return silently as we might get
- * here because of an otherwise harmless race condition.
+ * We don't allow to invoke more sync workers once we have reached the
+ * sync worker limit per subscription. So, just return silently as we
+ * might get here because of an otherwise harmless race condition.
*/
if (OidIsValid(relid) && nsyncworkers >= max_sync_workers_per_subscription)
{
}
/*
- * If the cache wasn't hit or it yielded a "does-not-exist" and we want
- * to create an entry.
+ * If the cache wasn't hit or it yielded a "does-not-exist" and we want to
+ * create an entry.
*/
/* search the lookup table */
/*
* Fetch info about column lists for the relation (from all the
- * publications). We unnest the int2vector values, because that
- * makes it easier to combine lists by simply adding the attnums
- * to a new bitmap (without having to parse the int2vector data).
- * This preserves NULL values, so that if one of the publications
- * has no column list, we'll know that.
+ * publications). We unnest the int2vector values, because that makes
+ * it easier to combine lists by simply adding the attnums to a new
+ * bitmap (without having to parse the int2vector data). This
+ * preserves NULL values, so that if one of the publications has no
+ * column list, we'll know that.
*/
resetStringInfo(&cmd);
appendStringInfo(&cmd,
nspname, relname, pubres->err)));
/*
- * Merge the column lists (from different publications) by creating
- * a single bitmap with all the attnums. If we find a NULL value,
- * that means one of the publications has no column list for the
- * table we're syncing.
+ * Merge the column lists (from different publications) by creating a
+ * single bitmap with all the attnums. If we find a NULL value, that
+ * means one of the publications has no column list for the table
+ * we're syncing.
*/
slot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
while (tuplestore_gettupleslot(pubres->tuplestore, true, false, slot))
{
- Datum cfval = slot_getattr(slot, 1, &isnull);
+ Datum cfval = slot_getattr(slot, 1, &isnull);
/* NULL means empty column list, so we're done. */
if (isnull)
}
included_cols = bms_add_member(included_cols,
- DatumGetInt16(cfval));
+ DatumGetInt16(cfval));
ExecClearTuple(slot);
}
quote_qualified_identifier(lrel.nspname, lrel.relname));
/*
- * XXX Do we need to list the columns in all cases? Maybe we're replicating
- * all columns?
+ * XXX Do we need to list the columns in all cases? Maybe we're
+ * replicating all columns?
*/
for (int i = 0; i < lrel.natts; i++)
{
/*
* COPY FROM does not honor RLS policies. That is not a problem for
- * subscriptions owned by roles with BYPASSRLS privilege (or superuser, who
- * has it implicitly), but other roles should not be able to circumvent
- * RLS. Disallow logical replication into RLS enabled relations for such
- * roles.
+ * subscriptions owned by roles with BYPASSRLS privilege (or superuser,
+ * who has it implicitly), but other roles should not be able to
+ * circumvent RLS. Disallow logical replication into RLS enabled
+ * relations for such roles.
*/
if (check_enable_rls(RelationGetRelid(rel), InvalidOid, false) == RLS_ENABLED)
ereport(ERROR,
static void
TargetPrivilegesCheck(Relation rel, AclMode mode)
{
- Oid relid;
- AclResult aclresult;
+ Oid relid;
+ AclResult aclresult;
relid = RelationGetRelid(rel);
aclresult = pg_class_aclcheck(relid, GetUserId(), mode);
Bitmapset *columns;
/*
- * Private context to store additional data for this entry - state for
- * the row filter expressions, column list, etc.
+ * Private context to store additional data for this entry - state for the
+ * row filter expressions, column list, etc.
*/
MemoryContext entry_cxt;
} RelationSyncEntry;
*/
typedef struct PGOutputTxnData
{
- bool sent_begin_txn; /* flag indicating whether BEGIN has
- * been sent */
-} PGOutputTxnData;
+ bool sent_begin_txn; /* flag indicating whether BEGIN has been sent */
+} PGOutputTxnData;
/* Map used to remember which relation schemas we sent. */
static HTAB *RelationSyncCache = NULL;
* using bandwidth on something with little/no use for logical replication.
*/
static void
-pgoutput_begin_txn(LogicalDecodingContext * ctx, ReorderBufferTXN * txn)
+pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
{
- PGOutputTxnData *txndata = MemoryContextAllocZero(ctx->context,
+ PGOutputTxnData *txndata = MemoryContextAllocZero(ctx->context,
sizeof(PGOutputTxnData));
txn->output_plugin_private = txndata;
*
* All the given publication-table mappings must be checked.
*
- * Multiple publications might have multiple column lists for this relation.
+ * Multiple publications might have multiple column lists for this
+ * relation.
*
* FOR ALL TABLES and FOR ALL TABLES IN SCHEMA implies "don't use column
* list" so it takes precedence.
bool pub_no_list = true;
/*
- * If the publication is FOR ALL TABLES then it is treated the same as if
- * there are no column lists (even if other publications have a list).
+ * If the publication is FOR ALL TABLES then it is treated the same as
+ * if there are no column lists (even if other publications have a
+ * list).
*/
if (!pub->alltables)
{
* Check for the presence of a column list in this publication.
*
* Note: If we find no pg_publication_rel row, it's a publication
- * defined for a whole schema, so it can't have a column list, just
- * like a FOR ALL TABLES publication.
+ * defined for a whole schema, so it can't have a column list,
+ * just like a FOR ALL TABLES publication.
*/
cftuple = SearchSysCache2(PUBLICATIONRELMAP,
ObjectIdGetDatum(entry->publish_as_relid),
* For updates, we can have only a new tuple when none of the replica
* identity columns changed and none of those columns have external data
* but we still need to evaluate the row filter for the new tuple as the
- * existing values of those columns might not match the filter. Also, users
- * can use constant expressions in the row filter, so we anyway need to
- * evaluate it for the new tuple.
+ * existing values of those columns might not match the filter. Also,
+ * users can use constant expressions in the row filter, so we anyway need
+ * to evaluate it for the new tuple.
*
* For deletes, we only have the old tuple.
*/
xid = txn->xid;
/*
- * Output BEGIN if we haven't yet. Avoid for non-transactional
- * messages.
+ * Output BEGIN if we haven't yet. Avoid for non-transactional messages.
*/
if (transactional)
{
/*
* Under what relid should we publish changes in this publication?
- * We'll use the top-most relid across all publications. Also track
- * the ancestor level for this publication.
+ * We'll use the top-most relid across all publications. Also
+ * track the ancestor level for this publication.
*/
- Oid pub_relid = relid;
- int ancestor_level = 0;
+ Oid pub_relid = relid;
+ int ancestor_level = 0;
/*
- * If this is a FOR ALL TABLES publication, pick the partition root
- * and set the ancestor level accordingly.
+ * If this is a FOR ALL TABLES publication, pick the partition
+ * root and set the ancestor level accordingly.
*/
if (pub->alltables)
{
/*
* We want to publish the changes as the top-most ancestor
- * across all publications. So we need to check if the
- * already calculated level is higher than the new one. If
- * yes, we can ignore the new value (as it's a child).
- * Otherwise the new value is an ancestor, so we keep it.
+ * across all publications. So we need to check if the already
+ * calculated level is higher than the new one. If yes, we can
+ * ignore the new value (as it's a child). Otherwise the new
+ * value is an ancestor, so we keep it.
*/
if (publish_ancestor_level > ancestor_level)
continue;
/*
- * If we found an ancestor higher up in the tree, discard
- * the list of publications through which we replicate it,
- * and use the new ancestor.
+ * If we found an ancestor higher up in the tree, discard the
+ * list of publications through which we replicate it, and use
+ * the new ancestor.
*/
if (publish_ancestor_level < ancestor_level)
{
MyReplicationSlot = s;
/*
- * The call to pgstat_acquire_replslot() protects against stats for
- * a different slot, from before a restart or such, being present during
+ * The call to pgstat_acquire_replslot() protects against stats for a
+ * different slot, from before a restart or such, being present during
* pgstat_report_replslot().
*/
if (SlotIsLogical(s))
if (!has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS))
{
/*
- * Only superusers and roles with privileges of pg_read_all_stats
- * can see details. Other users only get the pid value to know whether
- * it is a WAL receiver, but no details.
+ * Only superusers and roles with privileges of pg_read_all_stats can
+ * see details. Other users only get the pid value to know whether it
+ * is a WAL receiver, but no details.
*/
MemSet(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1));
}
* When skipping empty transactions in synchronous replication, we send a
* keepalive message to avoid delaying such transactions.
*
- * It is okay to check sync_standbys_defined flag without lock here as
- * in the worst case we will just send an extra keepalive message when it
- * is really not required.
+ * It is okay to check sync_standbys_defined flag without lock here as in
+ * the worst case we will just send an extra keepalive message when it is
+ * really not required.
*/
if (skipped_xact &&
SyncRepRequested() &&
/* result */
MVDependencies *dependencies = NULL;
- MemoryContext cxt;
+ MemoryContext cxt;
Assert(data->nattnums >= 2);
{
/*
* It's now safe to pin the buffer. We can't pin first and ask
- * questions later, because it might confuse code paths
- * like InvalidateBuffer() if we pinned a random non-matching
- * buffer.
+ * questions later, because it might confuse code paths like
+ * InvalidateBuffer() if we pinned a random non-matching buffer.
*/
if (have_private_ref)
PinBuffer(bufHdr, NULL); /* bump pin count */
if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
{
/*
- * Not every table AM uses BLCKSZ wide fixed size blocks.
- * Therefore tableam returns the size in bytes - but for the
- * purpose of this routine, we want the number of blocks.
- * Therefore divide, rounding up.
+ * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
+ * tableam returns the size in bytes - but for the purpose of this
+ * routine, we want the number of blocks. Therefore divide, rounding
+ * up.
*/
uint64 szbytes;
}
else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
{
- return smgrnblocks(RelationGetSmgr(relation), forkNum);
+ return smgrnblocks(RelationGetSmgr(relation), forkNum);
}
else
Assert(false);
BufferAccessStrategy bstrategy_dst;
/*
- * In general, we want to write WAL whenever wal_level > 'minimal', but
- * we can skip it when copying any fork of an unlogged relation other
- * than the init fork.
+ * In general, we want to write WAL whenever wal_level > 'minimal', but we
+ * can skip it when copying any fork of an unlogged relation other than
+ * the init fork.
*/
use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
bool permanent)
{
- Relation src_rel;
- Relation dst_rel;
- char relpersistence;
+ Relation src_rel;
+ Relation dst_rel;
+ char relpersistence;
/* Set the relpersistence. */
relpersistence = permanent ?
/*
* We can't use a real relcache entry for a relation in some other
- * database, but since we're only going to access the fields related
- * to physical storage, a fake one is good enough. If we didn't do this
- * and used the smgr layer directly, we would have to worry about
+ * database, but since we're only going to access the fields related to
+ * physical storage, a fake one is good enough. If we didn't do this and
+ * used the smgr layer directly, we would have to worry about
* invalidations.
*/
src_rel = CreateFakeRelcacheEntry(src_rnode);
*
* We have to sort them logically, because in KnownAssignedXidsAdd we
* call TransactionIdFollowsOrEquals and so on. But we know these XIDs
- * come from RUNNING_XACTS, which means there are only normal XIDs from
- * the same epoch, so this is safe.
+ * come from RUNNING_XACTS, which means there are only normal XIDs
+ * from the same epoch, so this is safe.
*/
qsort(xids, nxids, sizeof(TransactionId), xidLogicalComparator);
}
/*
- * If the caller has requested force flush or we have written more than 1/4
- * of the ring size, mark it as written in shared memory and notify the
- * receiver.
+ * If the caller has requested force flush or we have written more than
+ * 1/4 of the ring size, mark it as written in shared memory and notify
+ * the receiver.
*/
if (force_flush || mqh->mqh_send_pending > (mq->mq_ring_size >> 2))
{
/*
* In Hot Standby mode, the startup process requests a procState array
- * slot using InitRecoveryTransactionEnvironment(). Even though MaxBackends
- * doesn't account for the startup process, it is guaranteed to get a
- * free slot. This is because the autovacuum launcher and worker processes,
- * which are included in MaxBackends, are not started in Hot Standby mode.
+ * slot using InitRecoveryTransactionEnvironment(). Even though
+ * MaxBackends doesn't account for the startup process, it is guaranteed
+ * to get a free slot. This is because the autovacuum launcher and worker
+ * processes, which are included in MaxBackends, are not started in Hot
+ * Standby mode.
*/
size = add_size(size, mul_size(sizeof(ProcState), MaxBackends));
if (finalusedlp != nline)
{
/* The last line pointer is not the last used line pointer */
- int nunusedend = nline - finalusedlp;
+ int nunusedend = nline - finalusedlp;
Assert(nunused >= nunusedend && nunusedend > 0);
ResetUsage();
query = parse_analyze_fixedparams(parsetree, query_string, paramTypes, numParams,
- queryEnv);
+ queryEnv);
if (log_parser_stats)
ShowUsage("PARSE ANALYSIS STATISTICS");
ResetUsage();
query = parse_analyze_varparams(parsetree, query_string, paramTypes, numParams,
- queryEnv);
+ queryEnv);
/*
* Check all parameter types got determined.
oldcontext = MemoryContextSwitchTo(MessageContext);
querytree_list = pg_analyze_and_rewrite_fixedparams(parsetree, query_string,
- NULL, 0, NULL);
+ NULL, 0, NULL);
plantree_list = pg_plan_queries(querytree_list, query_string,
CURSOR_OPT_PARALLEL_OK, NULL);
* Note: this includes fflush()'ing the last of the prior output.
*
* This is also a good time to flush out collected statistics to the
- * cumulative stats system, and to update the PS stats display. We avoid doing
- * those every time through the message loop because it'd slow down
- * processing of batched messages, and because we don't want to report
- * uncommitted updates (that confuses autovacuum). The notification
- * processor wants a call too, if we are not in a transaction block.
+ * cumulative stats system, and to update the PS stats display. We
+ * avoid doing those every time through the message loop because it'd
+ * slow down processing of batched messages, and because we don't want
+ * to report uncommitted updates (that confuses autovacuum). The
+ * notification processor wants a call too, if we are not in a
+ * transaction block.
*
* Also, if an idle timeout is enabled, start the timer for that.
*/
}
else
{
- long stats_timeout;
+ long stats_timeout;
/*
* Process incoming notifies (including self-notifies), if
/*
* (4) turn off the idle-in-transaction, idle-session and
- * idle-stats-update timeouts if active. We do this before step (5) so
- * that any last-moment timeout is certain to be detected in step (5).
+ * idle-stats-update timeouts if active. We do this before step (5)
+ * so that any last-moment timeout is certain to be detected in step
+ * (5).
*
* At most one of these timeouts will be active, so there's no need to
* worry about combining the timeout.c calls into one.
/*
* Make fake type cache entry structure. Note that we can't just
- * modify typentry, since that points directly into the type cache.
+ * modify typentry, since that points directly into the type
+ * cache.
*/
record_typentry = palloc0(sizeof(*record_typentry));
record_typentry->type_id = element_type;
AclResult aclresult;
/*
- * User must have connect privilege for target database or have privileges of
- * pg_read_all_stats
+ * User must have connect privilege for target database or have privileges
+ * of pg_read_all_stats
*/
aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT);
if (aclresult != ACLCHECK_OK &&
AclResult aclresult;
/*
- * User must have privileges of pg_read_all_stats or have CREATE privilege for
- * target tablespace, either explicitly granted or implicitly because it
- * is default for current database.
+ * User must have privileges of pg_read_all_stats or have CREATE privilege
+ * for target tablespace, either explicitly granted or implicitly because
+ * it is default for current database.
*/
if (tblspcOid != MyDatabaseTableSpace &&
!has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS))
{"month", 5, DCH_month, false, FROM_CHAR_DATE_GREGORIAN},
{"mon", 3, DCH_mon, false, FROM_CHAR_DATE_GREGORIAN},
{"ms", 2, DCH_MS, true, FROM_CHAR_DATE_NONE},
- {"of", 2, DCH_OF, false, FROM_CHAR_DATE_NONE}, /* o */
+ {"of", 2, DCH_OF, false, FROM_CHAR_DATE_NONE}, /* o */
{"p.m.", 4, DCH_p_m, false, FROM_CHAR_DATE_NONE}, /* p */
{"pm", 2, DCH_pm, false, FROM_CHAR_DATE_NONE},
{"q", 1, DCH_Q, true, FROM_CHAR_DATE_NONE}, /* q */
{"sssss", 5, DCH_SSSS, true, FROM_CHAR_DATE_NONE}, /* s */
{"ssss", 4, DCH_SSSS, true, FROM_CHAR_DATE_NONE},
{"ss", 2, DCH_SS, true, FROM_CHAR_DATE_NONE},
- {"tzh", 3, DCH_TZH, false, FROM_CHAR_DATE_NONE}, /* t */
+ {"tzh", 3, DCH_TZH, false, FROM_CHAR_DATE_NONE}, /* t */
{"tzm", 3, DCH_TZM, true, FROM_CHAR_DATE_NONE},
{"tz", 2, DCH_tz, false, FROM_CHAR_DATE_NONE},
{"us", 2, DCH_US, true, FROM_CHAR_DATE_NONE}, /* u */
if (!OidIsValid(collid))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
if (!OidIsValid(collid))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
if (!OidIsValid(collid))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
/* Context for key uniqueness check in builder functions */
typedef struct JsonUniqueBuilderState
{
- JsonUniqueCheckState check; /* unique check */
+ JsonUniqueCheckState check; /* unique check */
StringInfoData skipped_keys; /* skipped keys with NULL values */
- MemoryContext mcxt; /* context for saving skipped keys */
+ MemoryContext mcxt; /* context for saving skipped keys */
} JsonUniqueBuilderState;
/* Element of object stack for key uniqueness check during json parsing */
return false;
case JSONTYPE_ARRAY:
- return false; /* TODO recurse into elements */
+ return false; /* TODO recurse into elements */
case JSONTYPE_COMPOSITE:
- return false; /* TODO recurse into fields */
+ return false; /* TODO recurse into fields */
case JSONTYPE_NUMERIC:
case JSONTYPE_CAST:
json_unique_hash(const void *key, Size keysize)
{
const JsonUniqueHashEntry *entry = (JsonUniqueHashEntry *) key;
- uint32 hash = hash_bytes_uint32(entry->object_id);
+ uint32 hash = hash_bytes_uint32(entry->object_id);
hash ^= hash_bytes((const unsigned char *) entry->key, entry->key_len);
if (!out->data)
{
MemoryContext oldcxt = MemoryContextSwitchTo(cxt->mcxt);
+
initStringInfo(out);
MemoryContextSwitchTo(oldcxt);
}
out = state->str;
/*
- * Append comma delimiter only if we have already outputted some fields
- * after the initial string "{ ".
+ * Append comma delimiter only if we have already outputted some
+ * fields after the initial string "{ ".
*/
if (out->len > 2)
appendStringInfoString(out, ", ");
if (nulls[i])
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument %d cannot be null", i + 1),
+ errmsg("argument %d cannot be null", i + 1),
errhint("Object keys should be text.")));
/* save key offset before key appending */
Datum *args;
bool *nulls;
Oid *types;
+
/* build argument values to build the object */
int nargs = extract_variadic_args(fcinfo, 0, true,
&args, &types, &nulls);
Datum *args;
bool *nulls;
Oid *types;
+
/* build argument values to build the object */
int nargs = extract_variadic_args(fcinfo, 0, true,
&args, &types, &nulls);
if (throw_error)
json_ereport_error(result, lex);
- return false; /* invalid json */
+ return false; /* invalid json */
}
if (check_unique_keys && !state.unique)
(errcode(ERRCODE_DUPLICATE_JSON_OBJECT_KEY_VALUE),
errmsg("duplicate JSON object key value")));
- return false; /* not unique keys */
+ return false; /* not unique keys */
}
- return true; /* ok */
+ return true; /* ok */
}
/*
return false;
case JSONBTYPE_ARRAY:
- return false; /* TODO recurse into elements */
+ return false; /* TODO recurse into elements */
case JSONBTYPE_COMPOSITE:
- return false; /* TODO recurse into fields */
+ return false; /* TODO recurse into fields */
case JSONBTYPE_NUMERIC:
case JSONBTYPE_JSONCAST:
Datum *args;
bool *nulls;
Oid *types;
+
/* build argument values to build the object */
int nargs = extract_variadic_args(fcinfo, 0, true,
&args, &types, &nulls);
Datum *args;
bool *nulls;
Oid *types;
+
/* build argument values to build the object */
int nargs = extract_variadic_args(fcinfo, 0, true,
&args, &types, &nulls);
Jsonb *
JsonbMakeEmptyArray(void)
{
- JsonbValue jbv;
+ JsonbValue jbv;
jbv.type = jbvArray;
jbv.val.array.elems = NULL;
Jsonb *
JsonbMakeEmptyObject(void)
{
- JsonbValue jbv;
+ JsonbValue jbv;
jbv.type = jbvObject;
jbv.val.object.pairs = NULL;
return pstrdup(v.val.boolean ? "true" : "false");
else if (v.type == jbvNumeric)
return DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(v.val.numeric)));
+ PointerGetDatum(v.val.numeric)));
else if (v.type == jbvNull)
return pstrdup("null");
else
if (hasNonUniq || skip_nulls)
{
- JsonbPair *ptr, *res;
+ JsonbPair *ptr,
+ *res;
while (skip_nulls && object->val.object.nPairs > 0 &&
object->val.object.pairs->value.type == jbvNull)
json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod,
void **cache, MemoryContext mcxt, bool *isnull)
{
- JsValue jsv = { 0 };
+ JsValue jsv = {0};
JsonbValue jbv;
jsv.is_json = json_type == JSONOID;
jsv.val.json.str = VARDATA_ANY(json);
jsv.val.json.len = VARSIZE_ANY_EXHDR(json);
- jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */
+ jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in
+ * populate_composite() */
}
else
{
if (!*cache)
*cache = MemoryContextAllocZero(mcxt, sizeof(ColumnIOData));
- return populate_record_field(*cache , typid, typmod, NULL, mcxt,
+ return populate_record_field(*cache, typid, typmod, NULL, mcxt,
PointerGetDatum(NULL), &jsv, isnull);
}
{
List *varnames; /* list of variable names */
List *varexprs; /* list of variable expressions */
- JsonPathDatatypeStatus current; /* status of @ item */
+ JsonPathDatatypeStatus current; /* status of @ item */
bool lax; /* jsonpath is lax or strict */
bool mutable; /* resulting mutability status */
} JsonPathMutableContext;
jspIsMutableWalker(&arg, cxt);
break;
- /* literals */
+ /* literals */
case jpiNull:
case jpiString:
case jpiNumeric:
case jpiBool:
- /* accessors */
+ /* accessors */
case jpiKey:
case jpiAnyKey:
- /* special items */
+ /* special items */
case jpiSubscript:
case jpiLast:
- /* item methods */
+ /* item methods */
case jpiType:
case jpiSize:
case jpiAbs:
JsonbValue *value);
static void getJsonPathVariable(JsonPathExecContext *cxt,
JsonPathItem *variable, JsonbValue *value);
-static int getJsonPathVariableFromJsonb(void *varsJsonb, char *varName,
- int varNameLen, JsonbValue *val,
- JsonbValue *baseObject);
+static int getJsonPathVariableFromJsonb(void *varsJsonb, char *varName,
+ int varNameLen, JsonbValue *val,
+ JsonbValue *baseObject);
static int JsonbArraySize(JsonbValue *jb);
static JsonPathBool executeComparison(JsonPathItem *cmp, JsonbValue *lv,
JsonbValue *rv, void *p);
static JsonTableJoinState *JsonTableInitPlanState(JsonTableContext *cxt,
- Node *plan, JsonTableScanState *parent);
+ Node *plan, JsonTableScanState *parent);
static bool JsonTableNextRow(JsonTableScanState *scan);
compareDatetime(Datum val1, Oid typid1, Datum val2, Oid typid2,
bool useTz, bool *cast_error)
{
- PGFunction cmpfunc;
+ PGFunction cmpfunc;
*cast_error = false;
JsonbValue *
JsonPathValue(Datum jb, JsonPath *jp, bool *empty, bool *error, List *vars)
{
- JsonbValue *res;
- JsonValueList found = { 0 };
+ JsonbValue *res;
+ JsonValueList found = {0};
JsonPathExecResult jper PG_USED_FOR_ASSERTS_ONLY;
int count;
text *txt = DatumGetTextP(val);
char *str = text_to_cstring(txt);
Jsonb *jb =
- DatumGetJsonbP(DirectFunctionCall1(jsonb_in,
- CStringGetDatum(str)));
+ DatumGetJsonbP(DirectFunctionCall1(jsonb_in,
+ CStringGetDatum(str)));
pfree(str);
{
JsonTableContext *cxt;
PlanState *ps = &state->ss.ps;
- TableFuncScan *tfs = castNode(TableFuncScan, ps->plan);
+ TableFuncScan *tfs = castNode(TableFuncScan, ps->plan);
TableFunc *tf = tfs->tablefunc;
JsonExpr *ci = castNode(JsonExpr, tf->docexpr);
JsonTableParent *root = castNode(JsonTableParent, tf->plan);
{
MemoryContext oldcxt;
JsonPathExecResult res;
- Jsonb *js = (Jsonb *) DatumGetJsonbP(item);
+ Jsonb *js = (Jsonb *) DatumGetJsonbP(item);
JsonValueListClear(&scan->found);
oldcxt = MemoryContextSwitchTo(scan->mcxt);
res = executeJsonPath(scan->path, scan->args, EvalJsonPathVar, js,
- scan->errorOnError, &scan->found, false /* FIXME */);
+ scan->errorOnError, &scan->found, false /* FIXME */ );
MemoryContextSwitchTo(oldcxt);
/* inner rows are exhausted */
if (state->u.join.cross)
- state->u.join.advanceRight = false; /* next outer row */
+ state->u.join.advanceRight = false; /* next outer row */
else
- return false; /* end of scan */
+ return false; /* end of scan */
}
while (!state->u.join.advanceRight)
JsonTableRescanRecursive(state->u.join.right);
if (!JsonTableNextJoinRow(state->u.join.right))
- continue; /* next outer row */
+ continue; /* next outer row */
state->u.join.advanceRight = true; /* next inner row */
}
{
scan->current = PointerGetDatum(NULL);
scan->currentIsNull = true;
- return false; /* end of scan */
+ return false; /* end of scan */
}
/* set current row item */
JsonTableScanState *scan = cxt->colexprs[colnum].scan;
Datum result;
- if (scan->currentIsNull) /* NULL from outer/union join */
+ if (scan->currentIsNull) /* NULL from outer/union join */
{
result = (Datum) 0;
*isnull = true;
}
- else if (estate) /* regular column */
+ else if (estate) /* regular column */
{
result = ExecEvalExpr(estate, econtext, isnull);
}
if (!OidIsValid(collation))
{
/*
- * This typically means that the parser could not resolve a
- * conflict of implicit collations, so report it that way.
+ * This typically means that the parser could not resolve a conflict
+ * of implicit collations, so report it that way.
*/
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
if (range_count == 0)
{
/*
- * Add an empty range so we get an empty result (not a null result).
+ * Add an empty range so we get an empty result (not a null
+ * result).
*/
accumArrayResult(state,
RangeTypePGetDatum(make_empty_range(rngtypcache)),
alloc_var(result, res_ndigits);
res_digits = result->digits;
+ /*
+ * The full multiple-place algorithm is taken from Knuth volume 2,
+ * Algorithm 4.3.1D.
+ *
+ * We need the first divisor digit to be >= NBASE/2. If it isn't, make it
+ * so by scaling up both the divisor and dividend by the factor "d". (The
+ * reason for allocating dividend[0] above is to leave room for possible
+ * carry here.)
+ */
+ if (divisor[1] < HALF_NBASE)
+ {
+ int d = NBASE / (divisor[1] + 1);
+
+ carry = 0;
+ for (i = var2ndigits; i > 0; i--)
+ {
+ carry += divisor[i] * d;
+ divisor[i] = carry % NBASE;
+ carry = carry / NBASE;
+ }
+ Assert(carry == 0);
+ carry = 0;
+ /* at this point only var1ndigits of dividend can be nonzero */
+ for (i = var1ndigits; i >= 0; i--)
+ {
+ carry += dividend[i] * d;
+ dividend[i] = carry % NBASE;
+ carry = carry / NBASE;
+ }
+ Assert(carry == 0);
+ Assert(divisor[1] >= HALF_NBASE);
+ }
+ /* First 2 divisor digits are used repeatedly in main loop */
+ divisor1 = divisor[1];
+ divisor2 = divisor[2];
+
+ /*
+ * Begin the main loop. Each iteration of this loop produces the j'th
+ * quotient digit by dividing dividend[j .. j + var2ndigits] by the
+ * divisor; this is essentially the same as the common manual procedure
+ * for long division.
+ */
+ for (j = 0; j < res_ndigits; j++)
+ {
+ /* Estimate quotient digit from the first two dividend digits */
+ int next2digits = dividend[j] * NBASE + dividend[j + 1];
+ int qhat;
+
/*
- * The full multiple-place algorithm is taken from Knuth volume 2,
- * Algorithm 4.3.1D.
- *
- * We need the first divisor digit to be >= NBASE/2. If it isn't,
- * make it so by scaling up both the divisor and dividend by the
- * factor "d". (The reason for allocating dividend[0] above is to
- * leave room for possible carry here.)
+ * If next2digits are 0, then quotient digit must be 0 and there's no
+ * need to adjust the working dividend. It's worth testing here to
+ * fall out ASAP when processing trailing zeroes in a dividend.
*/
- if (divisor[1] < HALF_NBASE)
+ if (next2digits == 0)
{
- int d = NBASE / (divisor[1] + 1);
-
- carry = 0;
- for (i = var2ndigits; i > 0; i--)
- {
- carry += divisor[i] * d;
- divisor[i] = carry % NBASE;
- carry = carry / NBASE;
- }
- Assert(carry == 0);
- carry = 0;
- /* at this point only var1ndigits of dividend can be nonzero */
- for (i = var1ndigits; i >= 0; i--)
- {
- carry += dividend[i] * d;
- dividend[i] = carry % NBASE;
- carry = carry / NBASE;
- }
- Assert(carry == 0);
- Assert(divisor[1] >= HALF_NBASE);
+ res_digits[j] = 0;
+ continue;
}
- /* First 2 divisor digits are used repeatedly in main loop */
- divisor1 = divisor[1];
- divisor2 = divisor[2];
+
+ if (dividend[j] == divisor1)
+ qhat = NBASE - 1;
+ else
+ qhat = next2digits / divisor1;
/*
- * Begin the main loop. Each iteration of this loop produces the j'th
- * quotient digit by dividing dividend[j .. j + var2ndigits] by the
- * divisor; this is essentially the same as the common manual
- * procedure for long division.
+ * Adjust quotient digit if it's too large. Knuth proves that after
+ * this step, the quotient digit will be either correct or just one
+ * too large. (Note: it's OK to use dividend[j+2] here because we
+ * know the divisor length is at least 2.)
*/
- for (j = 0; j < res_ndigits; j++)
+ while (divisor2 * qhat >
+ (next2digits - qhat * divisor1) * NBASE + dividend[j + 2])
+ qhat--;
+
+ /* As above, need do nothing more when quotient digit is 0 */
+ if (qhat > 0)
{
- /* Estimate quotient digit from the first two dividend digits */
- int next2digits = dividend[j] * NBASE + dividend[j + 1];
- int qhat;
+ NumericDigit *dividend_j = ÷nd[j];
/*
- * If next2digits are 0, then quotient digit must be 0 and there's
- * no need to adjust the working dividend. It's worth testing
- * here to fall out ASAP when processing trailing zeroes in a
- * dividend.
+ * Multiply the divisor by qhat, and subtract that from the
+ * working dividend. The multiplication and subtraction are
+ * folded together here, noting that qhat <= NBASE (since it might
+ * be one too large), and so the intermediate result "tmp_result"
+ * is in the range [-NBASE^2, NBASE - 1], and "borrow" is in the
+ * range [0, NBASE].
*/
- if (next2digits == 0)
+ borrow = 0;
+ for (i = var2ndigits; i >= 0; i--)
{
- res_digits[j] = 0;
- continue;
- }
+ int tmp_result;
- if (dividend[j] == divisor1)
- qhat = NBASE - 1;
- else
- qhat = next2digits / divisor1;
+ tmp_result = dividend_j[i] - borrow - divisor[i] * qhat;
+ borrow = (NBASE - 1 - tmp_result) / NBASE;
+ dividend_j[i] = tmp_result + borrow * NBASE;
+ }
/*
- * Adjust quotient digit if it's too large. Knuth proves that
- * after this step, the quotient digit will be either correct or
- * just one too large. (Note: it's OK to use dividend[j+2] here
- * because we know the divisor length is at least 2.)
+ * If we got a borrow out of the top dividend digit, then indeed
+ * qhat was one too large. Fix it, and add back the divisor to
+ * correct the working dividend. (Knuth proves that this will
+ * occur only about 3/NBASE of the time; hence, it's a good idea
+ * to test this code with small NBASE to be sure this section gets
+ * exercised.)
*/
- while (divisor2 * qhat >
- (next2digits - qhat * divisor1) * NBASE + dividend[j + 2])
- qhat--;
-
- /* As above, need do nothing more when quotient digit is 0 */
- if (qhat > 0)
+ if (borrow)
{
- NumericDigit *dividend_j = ÷nd[j];
-
- /*
- * Multiply the divisor by qhat, and subtract that from the
- * working dividend. The multiplication and subtraction are
- * folded together here, noting that qhat <= NBASE (since it
- * might be one too large), and so the intermediate result
- * "tmp_result" is in the range [-NBASE^2, NBASE - 1], and
- * "borrow" is in the range [0, NBASE].
- */
- borrow = 0;
+ qhat--;
+ carry = 0;
for (i = var2ndigits; i >= 0; i--)
{
- int tmp_result;
-
- tmp_result = dividend_j[i] - borrow - divisor[i] * qhat;
- borrow = (NBASE - 1 - tmp_result) / NBASE;
- dividend_j[i] = tmp_result + borrow * NBASE;
- }
-
- /*
- * If we got a borrow out of the top dividend digit, then
- * indeed qhat was one too large. Fix it, and add back the
- * divisor to correct the working dividend. (Knuth proves
- * that this will occur only about 3/NBASE of the time; hence,
- * it's a good idea to test this code with small NBASE to be
- * sure this section gets exercised.)
- */
- if (borrow)
- {
- qhat--;
- carry = 0;
- for (i = var2ndigits; i >= 0; i--)
+ carry += dividend_j[i] + divisor[i];
+ if (carry >= NBASE)
{
- carry += dividend_j[i] + divisor[i];
- if (carry >= NBASE)
- {
- dividend_j[i] = carry - NBASE;
- carry = 1;
- }
- else
- {
- dividend_j[i] = carry;
- carry = 0;
- }
+ dividend_j[i] = carry - NBASE;
+ carry = 1;
+ }
+ else
+ {
+ dividend_j[i] = carry;
+ carry = 0;
}
- /* A carry should occur here to cancel the borrow above */
- Assert(carry == 1);
}
+ /* A carry should occur here to cancel the borrow above */
+ Assert(carry == 1);
}
-
- /* And we're done with this quotient digit */
- res_digits[j] = qhat;
}
+ /* And we're done with this quotient digit */
+ res_digits[j] = qhat;
+ }
+
pfree(dividend);
/*
}
datum = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion,
- &isnull);
+ &isnull);
if (!isnull)
{
char *actual_versionstr;
{
#ifdef USE_ICU
UCollator *collator;
- UErrorCode status;
+ UErrorCode status;
status = U_ZERO_ERROR;
collator = ucol_open(icu_locale, &status);
char *stats_type = text_to_cstring(PG_GETARG_TEXT_P(0));
Oid dboid = PG_GETARG_OID(1);
Oid objoid = PG_GETARG_OID(2);
- PgStat_Kind kind = pgstat_get_kind_from_str(stats_type);
+ PgStat_Kind kind = pgstat_get_kind_from_str(stats_type);
PG_RETURN_BOOL(pgstat_have_entry(kind, dboid, objoid));
}
/*
* Non-empty range A contains non-empty range B if lower
* bound of A is lower or equal to lower bound of range B
- * and upper bound of range A is greater than or equal to upper
- * bound of range A.
+ * and upper bound of range A is greater than or equal to
+ * upper bound of range A.
*
* All non-empty ranges contain an empty range.
*/
Oid fk_relid; /* referencing relation */
char confupdtype; /* foreign key's ON UPDATE action */
char confdeltype; /* foreign key's ON DELETE action */
- int ndelsetcols; /* number of columns referenced in ON DELETE SET clause */
- int16 confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on delete */
+ int ndelsetcols; /* number of columns referenced in ON DELETE
+ * SET clause */
+ int16 confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on
+ * delete */
char confmatchtype; /* foreign key's match type */
int nkeys; /* number of key columns */
int16 pk_attnums[RI_MAX_NUMKEYS]; /* attnums of referenced cols */
/*
* Fetch or prepare a saved plan for the trigger.
*/
- switch (tgkind) {
+ switch (tgkind)
+ {
case RI_TRIGTYPE_UPDATE:
queryno = is_set_null
? RI_PLAN_SETNULL_ONUPDATE
const char *qualsep;
Oid queryoids[RI_MAX_NUMKEYS];
const char *fk_only;
- int num_cols_to_set;
+ int num_cols_to_set;
const int16 *set_cols;
- switch (tgkind) {
+ switch (tgkind)
+ {
case RI_TRIGTYPE_UPDATE:
num_cols_to_set = riinfo->nkeys;
set_cols = riinfo->fk_attnums;
break;
case RI_TRIGTYPE_DELETE:
+
/*
- * If confdelsetcols are present, then we only update
- * the columns specified in that array, otherwise we
- * update all the referencing columns.
+ * If confdelsetcols are present, then we only update the
+ * columns specified in that array, otherwise we update all
+ * the referencing columns.
*/
- if (riinfo->ndelsetcols != 0) {
+ if (riinfo->ndelsetcols != 0)
+ {
num_cols_to_set = riinfo->ndelsetcols;
set_cols = riinfo->confdelsetcols;
}
- else {
+ else
+ {
num_cols_to_set = riinfo->nkeys;
set_cols = riinfo->fk_attnums;
}
if (string)
appendStringInfo(&buf, " ON DELETE %s", string);
- /* Add columns specified to SET NULL or SET DEFAULT if provided. */
+ /*
+ * Add columns specified to SET NULL or SET DEFAULT if
+ * provided.
+ */
val = SysCacheGetAttr(CONSTROID, tup,
Anum_pg_constraint_confdelsetcols, &isnull);
if (!isnull)
case T_GroupingFunc: /* own parentheses */
case T_WindowFunc: /* own parentheses */
case T_CaseExpr: /* other separators */
- case T_JsonExpr: /* own parentheses */
+ case T_JsonExpr: /* own parentheses */
return true;
default:
return false;
if (format->encoding != JS_ENC_DEFAULT)
{
const char *encoding =
- format->encoding == JS_ENC_UTF16 ? "UTF16" :
- format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8";
+ format->encoding == JS_ENC_UTF16 ? "UTF16" :
+ format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8";
appendStringInfo(buf, " ENCODING %s", encoding);
}
if (!json_format_by_default ||
returning->format->format_type !=
- (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON))
+ (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON))
get_json_format(returning->format, buf);
}
if (jexpr->passing_values)
{
- ListCell *lc1, *lc2;
+ ListCell *lc1,
+ *lc2;
bool needcomma = false;
appendStringInfoString(buf, " PASSING ");
if (nargs > 0)
{
const char *sep = ctor->type == JSCTOR_JSON_OBJECT &&
- (nargs % 2) != 0 ? " : " : ", ";
+ (nargs % 2) != 0 ? " : " : ", ";
appendStringInfoString(buf, sep);
}
if (is_json_objectagg)
{
if (i > 2)
- break; /* skip ABSENT ON NULL and WITH UNIQUE args */
+ break; /* skip ABSENT ON NULL and WITH UNIQUE
+ * args */
appendStringInfoString(buf, " : ");
}
}
else
{
- JsonTableParent *n = castNode(JsonTableParent, node);
+ JsonTableParent *n = castNode(JsonTableParent, node);
- if (needcomma)
- appendStringInfoChar(context->buf, ',');
+ if (needcomma)
+ appendStringInfoChar(context->buf, ',');
- appendStringInfoChar(context->buf, ' ');
- appendContextKeyword(context, "NESTED PATH ", 0, 0, 0);
- get_const_expr(n->path, context, -1);
- appendStringInfo(context->buf, " AS %s", quote_identifier(n->name));
- get_json_table_columns(tf, n, context, showimplicit);
+ appendStringInfoChar(context->buf, ' ');
+ appendContextKeyword(context, "NESTED PATH ", 0, 0, 0);
+ get_const_expr(n->path, context, -1);
+ appendStringInfo(context->buf, " AS %s", quote_identifier(n->name));
+ get_json_table_columns(tf, n, context, showimplicit);
}
}
}
else
{
- JsonTableParent *n = castNode(JsonTableParent, node);
+ JsonTableParent *n = castNode(JsonTableParent, node);
- appendStringInfoString(context->buf, quote_identifier(n->name));
+ appendStringInfoString(context->buf, quote_identifier(n->name));
- if (n->child)
- {
+ if (n->child)
+ {
appendStringInfoString(context->buf,
n->outerJoin ? " OUTER " : " INNER ");
get_json_table_plan(tf, n->child, context,
IsA(n->child, JsonTableSibling));
- }
+ }
}
if (parenthesize)
if (jexpr->passing_values)
{
- ListCell *lc1, *lc2;
+ ListCell *lc1,
+ *lc2;
bool needcomma = false;
appendStringInfoChar(buf, ' ');
*/
double
estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs,
- double input_rows,
- List **pgset, EstimationInfo *estinfo,
- List **cache_varinfos, int prevNExprs)
+ double input_rows,
+ List **pgset, EstimationInfo *estinfo,
+ List **cache_varinfos, int prevNExprs)
{
List *varinfos = (cache_varinfos) ? *cache_varinfos : NIL;
double srf_multiplier = 1.0;
if (cache_varinfos && j++ < prevNExprs)
{
if (pgset)
- i++; /* to keep in sync with lines below */
+ i++; /* to keep in sync with lines below */
continue;
}
Oid statOid = InvalidOid;
MVNDistinct *stats;
StatisticExtInfo *matched_info = NULL;
- RangeTblEntry *rte;
+ RangeTblEntry *rte;
/* bail out immediately if the table has no extended statistics */
if (!rel->statlist)
foreach(slist, onerel->statlist)
{
StatisticExtInfo *info = (StatisticExtInfo *) lfirst(slist);
- RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root);
+ RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root);
ListCell *expr_item;
int pos;
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
#if SIZEOF_DATUM >= 8
+
/*
* If this build has pass-by-value timestamps, then we can use a standard
* comparator function.
if (type == UNITS)
{
interval2itm(*interval, tm);
- switch (val)
- {
- case DTK_MILLENNIUM:
- /* caution: C division may have negative remainder */
- tm->tm_year = (tm->tm_year / 1000) * 1000;
- /* FALL THRU */
- case DTK_CENTURY:
- /* caution: C division may have negative remainder */
- tm->tm_year = (tm->tm_year / 100) * 100;
- /* FALL THRU */
- case DTK_DECADE:
- /* caution: C division may have negative remainder */
- tm->tm_year = (tm->tm_year / 10) * 10;
- /* FALL THRU */
- case DTK_YEAR:
- tm->tm_mon = 0;
- /* FALL THRU */
- case DTK_QUARTER:
- tm->tm_mon = 3 * (tm->tm_mon / 3);
- /* FALL THRU */
- case DTK_MONTH:
- tm->tm_mday = 0;
- /* FALL THRU */
- case DTK_DAY:
- tm->tm_hour = 0;
- /* FALL THRU */
- case DTK_HOUR:
- tm->tm_min = 0;
- /* FALL THRU */
- case DTK_MINUTE:
- tm->tm_sec = 0;
- /* FALL THRU */
- case DTK_SECOND:
- tm->tm_usec = 0;
- break;
- case DTK_MILLISEC:
- tm->tm_usec = (tm->tm_usec / 1000) * 1000;
- break;
- case DTK_MICROSEC:
- break;
-
- default:
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("unit \"%s\" not supported for type %s",
- lowunits, format_type_be(INTERVALOID)),
- (val == DTK_WEEK) ? errdetail("Months usually have fractional weeks.") : 0));
- }
+ switch (val)
+ {
+ case DTK_MILLENNIUM:
+ /* caution: C division may have negative remainder */
+ tm->tm_year = (tm->tm_year / 1000) * 1000;
+ /* FALL THRU */
+ case DTK_CENTURY:
+ /* caution: C division may have negative remainder */
+ tm->tm_year = (tm->tm_year / 100) * 100;
+ /* FALL THRU */
+ case DTK_DECADE:
+ /* caution: C division may have negative remainder */
+ tm->tm_year = (tm->tm_year / 10) * 10;
+ /* FALL THRU */
+ case DTK_YEAR:
+ tm->tm_mon = 0;
+ /* FALL THRU */
+ case DTK_QUARTER:
+ tm->tm_mon = 3 * (tm->tm_mon / 3);
+ /* FALL THRU */
+ case DTK_MONTH:
+ tm->tm_mday = 0;
+ /* FALL THRU */
+ case DTK_DAY:
+ tm->tm_hour = 0;
+ /* FALL THRU */
+ case DTK_HOUR:
+ tm->tm_min = 0;
+ /* FALL THRU */
+ case DTK_MINUTE:
+ tm->tm_sec = 0;
+ /* FALL THRU */
+ case DTK_SECOND:
+ tm->tm_usec = 0;
+ break;
+ case DTK_MILLISEC:
+ tm->tm_usec = (tm->tm_usec / 1000) * 1000;
+ break;
+ case DTK_MICROSEC:
+ break;
- if (itm2interval(tm, result) != 0)
+ default:
ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("interval out of range")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("unit \"%s\" not supported for type %s",
+ lowunits, format_type_be(INTERVALOID)),
+ (val == DTK_WEEK) ? errdetail("Months usually have fractional weeks.") : 0));
+ }
+
+ if (itm2interval(tm, result) != 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("interval out of range")));
}
else
{
if (type == UNITS)
{
interval2itm(*interval, tm);
- switch (val)
- {
- case DTK_MICROSEC:
- intresult = tm->tm_sec * INT64CONST(1000000) + tm->tm_usec;
- break;
+ switch (val)
+ {
+ case DTK_MICROSEC:
+ intresult = tm->tm_sec * INT64CONST(1000000) + tm->tm_usec;
+ break;
- case DTK_MILLISEC:
- if (retnumeric)
- /*---
- * tm->tm_sec * 1000 + fsec / 1000
- * = (tm->tm_sec * 1'000'000 + fsec) / 1000
- */
- PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 3));
- else
- PG_RETURN_FLOAT8(tm->tm_sec * 1000.0 + tm->tm_usec / 1000.0);
- break;
+ case DTK_MILLISEC:
+ if (retnumeric)
+ /*---
+ * tm->tm_sec * 1000 + fsec / 1000
+ * = (tm->tm_sec * 1'000'000 + fsec) / 1000
+ */
+ PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 3));
+ else
+ PG_RETURN_FLOAT8(tm->tm_sec * 1000.0 + tm->tm_usec / 1000.0);
+ break;
- case DTK_SECOND:
- if (retnumeric)
- /*---
- * tm->tm_sec + fsec / 1'000'000
- * = (tm->tm_sec * 1'000'000 + fsec) / 1'000'000
- */
- PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 6));
- else
- PG_RETURN_FLOAT8(tm->tm_sec + tm->tm_usec / 1000000.0);
- break;
+ case DTK_SECOND:
+ if (retnumeric)
+ /*---
+ * tm->tm_sec + fsec / 1'000'000
+ * = (tm->tm_sec * 1'000'000 + fsec) / 1'000'000
+ */
+ PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 6));
+ else
+ PG_RETURN_FLOAT8(tm->tm_sec + tm->tm_usec / 1000000.0);
+ break;
- case DTK_MINUTE:
- intresult = tm->tm_min;
- break;
+ case DTK_MINUTE:
+ intresult = tm->tm_min;
+ break;
- case DTK_HOUR:
- intresult = tm->tm_hour;
- break;
+ case DTK_HOUR:
+ intresult = tm->tm_hour;
+ break;
- case DTK_DAY:
- intresult = tm->tm_mday;
- break;
+ case DTK_DAY:
+ intresult = tm->tm_mday;
+ break;
- case DTK_MONTH:
- intresult = tm->tm_mon;
- break;
+ case DTK_MONTH:
+ intresult = tm->tm_mon;
+ break;
- case DTK_QUARTER:
- intresult = (tm->tm_mon / 3) + 1;
- break;
+ case DTK_QUARTER:
+ intresult = (tm->tm_mon / 3) + 1;
+ break;
- case DTK_YEAR:
- intresult = tm->tm_year;
- break;
+ case DTK_YEAR:
+ intresult = tm->tm_year;
+ break;
- case DTK_DECADE:
- /* caution: C division may have negative remainder */
- intresult = tm->tm_year / 10;
- break;
+ case DTK_DECADE:
+ /* caution: C division may have negative remainder */
+ intresult = tm->tm_year / 10;
+ break;
- case DTK_CENTURY:
- /* caution: C division may have negative remainder */
- intresult = tm->tm_year / 100;
- break;
+ case DTK_CENTURY:
+ /* caution: C division may have negative remainder */
+ intresult = tm->tm_year / 100;
+ break;
- case DTK_MILLENNIUM:
- /* caution: C division may have negative remainder */
- intresult = tm->tm_year / 1000;
- break;
+ case DTK_MILLENNIUM:
+ /* caution: C division may have negative remainder */
+ intresult = tm->tm_year / 1000;
+ break;
- default:
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("unit \"%s\" not supported for type %s",
- lowunits, format_type_be(INTERVALOID))));
- intresult = 0;
- }
+ default:
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("unit \"%s\" not supported for type %s",
+ lowunits, format_type_be(INTERVALOID))));
+ intresult = 0;
+ }
}
else if (type == RESERV && val == DTK_EPOCH)
{
*
* This is needed so that ssup_datum_unsigned_cmp() (an unsigned integer
* 3-way comparator) works correctly on all platforms. If we didn't do
- * this, the comparator would have to call memcmp() with a pair of pointers
- * to the first byte of each abbreviated key, which is slower.
+ * this, the comparator would have to call memcmp() with a pair of
+ * pointers to the first byte of each abbreviated key, which is slower.
*/
res = DatumBigEndianToNative(res);
bool result;
Oid collid = PG_GET_COLLATION();
bool locale_is_c = false;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
check_collation_set(collid);
bool result;
Oid collid = PG_GET_COLLATION();
bool locale_is_c = false;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
check_collation_set(collid);
{
Oid collid = PG_GET_COLLATION();
bool locale_is_c = false;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
bool result;
check_collation_set(collid);
{
Oid collid = PG_GET_COLLATION();
bool locale_is_c = false;
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
bool result;
check_collation_set(collid);
*
* This is needed so that ssup_datum_unsigned_cmp() (an unsigned integer
* 3-way comparator) works correctly on all platforms. If we didn't do
- * this, the comparator would have to call memcmp() with a pair of pointers
- * to the first byte of each abbreviated key, which is slower.
+ * this, the comparator would have to call memcmp() with a pair of
+ * pointers to the first byte of each abbreviated key, which is slower.
*/
res = DatumBigEndianToNative(res);
queryEnv);
else
tlist = pg_analyze_and_rewrite_fixedparams(rawtree,
- plansource->query_string,
- plansource->param_types,
- plansource->num_params,
- queryEnv);
+ plansource->query_string,
+ plansource->param_types,
+ plansource->num_params,
+ queryEnv);
/* Release snapshot if we got one */
if (snapshot_set)
Bitmapset *uindexattrs; /* columns in unique indexes */
Bitmapset *pkindexattrs; /* columns in the primary index */
Bitmapset *idindexattrs; /* columns in the replica identity */
- Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
+ Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
List *indexoidlist;
List *newindexoidlist;
Oid relpkindex;
{
if (indexDesc->rd_indam->amhotblocking)
hotblockingattrs = bms_add_member(hotblockingattrs,
- attrnum - FirstLowInvalidHeapAttributeNumber);
+ attrnum - FirstLowInvalidHeapAttributeNumber);
if (isKey && i < indexDesc->rd_index->indnkeyatts)
uindexattrs = bms_add_member(uindexattrs,
pull_varattnos(indexExpressions, 1, &hotblockingattrs);
/*
- * Collect all attributes in the index predicate, too. We have to ignore
- * amhotblocking flag, because the row might become indexable, in which
- * case we have to add it to the index.
+ * Collect all attributes in the index predicate, too. We have to
+ * ignore amhotblocking flag, because the row might become indexable,
+ * in which case we have to add it to the index.
*/
pull_varattnos(indexPredicate, 1, &hotblockingattrs);
/*
* Now save copies of the bitmaps in the relcache entry. We intentionally
* set rd_attrsvalid last, because that's what signals validity of the
- * values; if we run out of memory before making that copy, we won't
- * leave the relcache entry looking like the other ones are valid but
- * empty.
+ * values; if we run out of memory before making that copy, we won't leave
+ * the relcache entry looking like the other ones are valid but empty.
*/
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
relation->rd_keyattr = bms_copy(uindexattrs);
pubdesc->pubactions.pubtruncate |= pubform->pubtruncate;
/*
- * Check if all columns referenced in the filter expression are part of
- * the REPLICA IDENTITY index or not.
+ * Check if all columns referenced in the filter expression are part
+ * of the REPLICA IDENTITY index or not.
*
* If the publication is FOR ALL TABLES then it means the table has no
* row filters and we can skip the validation.
if (!pubform->puballtables &&
(pubform->pubupdate || pubform->pubdelete) &&
pub_rf_contains_invalid_column(pubid, relation, ancestors,
- pubform->pubviaroot))
+ pubform->pubviaroot))
{
if (pubform->pubupdate)
pubdesc->rf_valid_for_update = false;
if (!pubform->puballtables &&
(pubform->pubupdate || pubform->pubdelete) &&
pub_collist_contains_invalid_column(pubid, relation, ancestors,
- pubform->pubviaroot))
+ pubform->pubviaroot))
{
if (pubform->pubupdate)
pubdesc->cols_valid_for_update = false;
void
RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath, char *dstdbpath)
{
- RelMapFile map;
+ RelMapFile map;
/*
* Read the relmap file from the source database.
* RelationMappingLock.
*
* There's no point in trying to preserve files here. The new database
- * isn't usable yet anyway, and won't ever be if we can't install a
- * relmap file.
+ * isn't usable yet anyway, and won't ever be if we can't install a relmap
+ * file.
*/
write_relmap_file(&map, true, false, false, dbid, tsid, dstdbpath);
}
* There shouldn't be anyone else updating relmaps during WAL replay,
* but grab the lock to interlock against load_relmap_file().
*
- * Note that we use the same WAL record for updating the relmap of
- * an existing database as we do for creating a new database. In
- * the latter case, taking the relmap log and sending sinval messages
- * is unnecessary, but harmless. If we wanted to avoid it, we could
- * add a flag to the WAL record to indicate which operation is being
+ * Note that we use the same WAL record for updating the relmap of an
+ * existing database as we do for creating a new database. In the
+ * latter case, taking the relmap log and sending sinval messages is
+ * unnecessary, but harmless. If we wanted to avoid it, we could add a
+ * flag to the WAL record to indicate which operation is being
* performed.
*/
LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE);
iculocale = NULL;
default_locale.provider = dbform->datlocprovider;
+
/*
* Default locale is currently always deterministic. Nondeterministic
* locales currently don't support pattern matching, which would break a
InitTemporaryFileAccess();
/*
- * Initialize local buffers for WAL record construction, in case we
- * ever try to insert XLOG.
+ * Initialize local buffers for WAL record construction, in case we ever
+ * try to insert XLOG.
*/
InitXLogInsert();
}
/*
- * If this is either a bootstrap process or a standalone backend, start
- * up the XLOG machinery, and register to have it closed down at exit.
- * In other cases, the startup process is responsible for starting up
- * the XLOG machinery, and the checkpointer for closing it down.
+ * If this is either a bootstrap process or a standalone backend, start up
+ * the XLOG machinery, and register to have it closed down at exit. In
+ * other cases, the startup process is responsible for starting up the
+ * XLOG machinery, and the checkpointer for closing it down.
*/
if (!IsUnderPostmaster)
{
*/
#ifdef USE_ASSERT_CHECKING
{
- int held_lwlocks = LWLockHeldCount();
+ int held_lwlocks = LWLockHeldCount();
+
if (held_lwlocks)
elog(WARNING, "holding %d lwlocks at the end of ShutdownPostgres()",
held_lwlocks);
break;
case T_JsonExpr:
{
- JsonExpr *jexpr = (JsonExpr *) node;
+ JsonExpr *jexpr = (JsonExpr *) node;
APP_JUMB(jexpr->op);
JumbleExpr(jstate, jexpr->formatted_expr);
/* These are specific to the index_btree subcase: */
bool enforceUnique; /* complain if we find duplicate tuples */
- bool uniqueNullsNotDistinct; /* unique constraint null treatment */
+ bool uniqueNullsNotDistinct; /* unique constraint null treatment */
/* These are specific to the index_hash subcase: */
uint32 high_mask; /* masks for sortable part of hash code */
return compare;
/*
- * No need to waste effort calling the tiebreak function when there are
- * no other keys to sort on.
+ * No need to waste effort calling the tiebreak function when there are no
+ * other keys to sort on.
*/
if (state->onlyKey != NULL)
return 0;
return compare;
/*
- * No need to waste effort calling the tiebreak function when there are
- * no other keys to sort on.
+ * No need to waste effort calling the tiebreak function when there are no
+ * other keys to sort on.
*/
if (state->onlyKey != NULL)
return 0;
int compare;
compare = ApplyInt32SortComparator(a->datum1, a->isnull1,
- b->datum1, b->isnull1,
- &state->sortKeys[0]);
+ b->datum1, b->isnull1,
+ &state->sortKeys[0]);
if (compare != 0)
return compare;
/*
- * No need to waste effort calling the tiebreak function when there are
- * no other keys to sort on.
+ * No need to waste effort calling the tiebreak function when there are no
+ * other keys to sort on.
*/
if (state->onlyKey != NULL)
return 0;
* the new cluster should be the result of a fresh initdb.)
*
* We use "STRATEGY = file_copy" here because checkpoints during initdb
- * are cheap. "STRATEGY = wal_log" would generate more WAL, which would
- * be a little bit slower and make the new cluster a little bit bigger.
+ * are cheap. "STRATEGY = wal_log" would generate more WAL, which would be
+ * a little bit slower and make the new cluster a little bit bigger.
*/
static const char *const template0_setup[] = {
"CREATE DATABASE template0 IS_TEMPLATE = true ALLOW_CONNECTIONS = false"
if ($ENV{with_icu} eq 'yes')
{
- command_fails_like(['initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2"],
+ command_fails_like(
+ [ 'initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2" ],
qr/initdb: error: ICU locale must be specified/,
'locale provider ICU requires --icu-locale');
- command_ok(['initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=en', "$tempdir/data3"],
+ command_ok(
+ [
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=en',
+ "$tempdir/data3"
+ ],
'option --icu-locale');
- command_fails_like(['initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=@colNumeric=lower', "$tempdir/dataX"],
+ command_fails_like(
+ [
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=@colNumeric=lower',
+ "$tempdir/dataX"
+ ],
qr/FATAL: could not open collator for locale/,
'fails for invalid ICU locale');
}
else
{
- command_fails(['initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2"],
- 'locale provider ICU fails since no ICU support');
+ command_fails(
+ [ 'initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2" ],
+ 'locale provider ICU fails since no ICU support');
}
-command_fails(['initdb', '--no-sync', '--locale-provider=xyz', "$tempdir/dataX"],
- 'fails for invalid locale provider');
+command_fails(
+ [ 'initdb', '--no-sync', '--locale-provider=xyz', "$tempdir/dataX" ],
+ 'fails for invalid locale provider');
-command_fails(['initdb', '--no-sync', '--locale-provider=libc', '--icu-locale=en', "$tempdir/dataX"],
- 'fails for invalid option combination');
+command_fails(
+ [
+ 'initdb', '--no-sync',
+ '--locale-provider=libc', '--icu-locale=en',
+ "$tempdir/dataX"
+ ],
+ 'fails for invalid option combination');
done_testing();
if (PQresultStatus(res) == PGRES_TUPLES_OK)
{
- int ntups = PQntuples(res);
+ int ntups = PQntuples(res);
if (ntups > 1)
{
/*
* We expect the btree checking functions to return one void row
* each, or zero rows if the check was skipped due to the object
- * being in the wrong state to be checked, so we should output some
- * sort of warning if we get anything more, not because it
- * indicates corruption, but because it suggests a mismatch between
- * amcheck and pg_amcheck versions.
+ * being in the wrong state to be checked, so we should output
+ * some sort of warning if we get anything more, not because it
+ * indicates corruption, but because it suggests a mismatch
+ * between amcheck and pg_amcheck versions.
*
* In conjunction with --progress, anything written to stderr at
* this time would present strangely to the user without an extra
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): localhost\.postgres/
],
- 'multipart database patterns are rejected'
-);
+ 'multipart database patterns are rejected');
# Check that a three-part schema name is rejected
$node->command_checks_all(
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): localhost\.postgres\.pg_catalog/
],
- 'three part schema patterns are rejected'
-);
+ 'three part schema patterns are rejected');
# Check that a four-part table name is rejected
$node->command_checks_all(
[
qr/pg_amcheck: error: improper relation name \(too many dotted names\): localhost\.postgres\.pg_catalog\.pg_class/
],
- 'four part table patterns are rejected'
-);
+ 'four part table patterns are rejected');
# Check that too many dotted names still draws an error under --no-strict-names
# That flag means that it is ok for the object to be missing, not that it is ok
# for the object name to be ungrammatical
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-t', 'this.is.a.really.long.dotted.string' ],
+ [
+ 'pg_amcheck', '--no-strict-names',
+ '-t', 'this.is.a.really.long.dotted.string'
+ ],
2,
[qr/^$/],
[
qr/pg_amcheck: error: improper relation name \(too many dotted names\): this\.is\.a\.really\.long\.dotted\.string/
],
- 'ungrammatical table names still draw errors under --no-strict-names'
-);
+ 'ungrammatical table names still draw errors under --no-strict-names');
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-s', 'postgres.long.dotted.string' ],
+ [
+ 'pg_amcheck', '--no-strict-names', '-s',
+ 'postgres.long.dotted.string'
+ ],
2,
[qr/^$/],
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): postgres\.long\.dotted\.string/
],
- 'ungrammatical schema names still draw errors under --no-strict-names'
-);
+ 'ungrammatical schema names still draw errors under --no-strict-names');
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-d', 'postgres.long.dotted.string' ],
+ [
+ 'pg_amcheck', '--no-strict-names', '-d',
+ 'postgres.long.dotted.string'
+ ],
2,
[qr/^$/],
[
qr/pg_amcheck: error: improper qualified name \(too many dotted names\): postgres\.long\.dotted\.string/
],
- 'ungrammatical database names still draw errors under --no-strict-names'
-);
+ 'ungrammatical database names still draw errors under --no-strict-names');
# Likewise for exclusion patterns
$node->command_checks_all(
'-r', 'postgres.none.none',
'-r', 'postgres.pg_catalog.none',
'-r', 'postgres.none.pg_class',
- '-t', 'postgres.pg_catalog.pg_class', # This exists
+ '-t', 'postgres.pg_catalog.pg_class', # This exists
],
0,
[qr/^$/],
));
# We have not yet broken the index, so we should get no corruption
-$node->command_like(
- [ 'pg_amcheck', '-p', $node->port, 'postgres' ],
+$node->command_like([ 'pg_amcheck', '-p', $node->port, 'postgres' ],
qr/^$/,
'pg_amcheck all schemas, tables and indexes reports no corruption');
void (*report_output_file) (const char *);
char filename[MAXPGPATH];
FILE *file;
-} bbstreamer_extractor;
+} bbstreamer_extractor;
static void bbstreamer_plain_writer_content(bbstreamer *streamer,
bbstreamer_member *member,
bbstreamer_extractor_finalize(bbstreamer *streamer)
{
bbstreamer_extractor *mystreamer PG_USED_FOR_ASSERTS_ONLY
- = (bbstreamer_extractor *) streamer;
+ = (bbstreamer_extractor *) streamer;
Assert(mystreamer->file == NULL);
}
bbstreamer base;
char *pathname;
gzFile gzfile;
-} bbstreamer_gzip_writer;
+} bbstreamer_gzip_writer;
typedef struct bbstreamer_gzip_decompressor
{
};
static void bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
- bbstreamer_member *member,
- const char *data, int len,
- bbstreamer_archive_context context);
+ bbstreamer_member *member,
+ const char *data, int len,
+ bbstreamer_archive_context context);
static void bbstreamer_gzip_decompressor_finalize(bbstreamer *streamer);
static void bbstreamer_gzip_decompressor_free(bbstreamer *streamer);
static void *gzip_palloc(void *opaque, unsigned items, unsigned size);
bbstreamer_gzip_decompressor_new(bbstreamer *next)
{
#ifdef HAVE_LIBZ
- bbstreamer_gzip_decompressor *streamer;
- z_stream *zs;
+ bbstreamer_gzip_decompressor *streamer;
+ z_stream *zs;
Assert(next != NULL);
*/
static void
bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
- bbstreamer_member *member,
- const char *data, int len,
- bbstreamer_archive_context context)
+ bbstreamer_member *member,
+ const char *data, int len,
+ bbstreamer_archive_context context)
{
bbstreamer_gzip_decompressor *mystreamer;
- z_stream *zs;
+ z_stream *zs;
mystreamer = (bbstreamer_gzip_decompressor *) streamer;
/* Process the current chunk */
while (zs->avail_in > 0)
{
- int res;
+ int res;
Assert(mystreamer->bytes_written < mystreamer->base.bbs_buffer.maxlen);
/*
* This call decompresses data starting at zs->next_in and updates
- * zs->next_in * and zs->avail_in. It generates output data starting at
- * zs->next_out and updates zs->next_out and zs->avail_out accordingly.
+ * zs->next_in * and zs->avail_in. It generates output data starting
+ * at zs->next_out and updates zs->next_out and zs->avail_out
+ * accordingly.
*/
res = inflate(zs, Z_NO_FLUSH);
{
bbstreamer base;
- LZ4F_compressionContext_t cctx;
- LZ4F_decompressionContext_t dctx;
- LZ4F_preferences_t prefs;
+ LZ4F_compressionContext_t cctx;
+ LZ4F_decompressionContext_t dctx;
+ LZ4F_preferences_t prefs;
size_t bytes_written;
bool header_written;
bbstreamer_lz4_compressor_new(bbstreamer *next, pg_compress_specification *compress)
{
#ifdef USE_LZ4
- bbstreamer_lz4_frame *streamer;
- LZ4F_errorCode_t ctxError;
- LZ4F_preferences_t *prefs;
+ bbstreamer_lz4_frame *streamer;
+ LZ4F_errorCode_t ctxError;
+ LZ4F_preferences_t *prefs;
Assert(next != NULL);
const char *data, int len,
bbstreamer_archive_context context)
{
- bbstreamer_lz4_frame *mystreamer;
- uint8 *next_in,
- *next_out;
- size_t out_bound,
- compressed_size,
- avail_out;
+ bbstreamer_lz4_frame *mystreamer;
+ uint8 *next_in,
+ *next_out;
+ size_t out_bound,
+ compressed_size,
+ avail_out;
mystreamer = (bbstreamer_lz4_frame *) streamer;
next_in = (uint8 *) data;
}
/*
- * Update the offset and capacity of output buffer based on number of bytes
- * written to output buffer.
+ * Update the offset and capacity of output buffer based on number of
+ * bytes written to output buffer.
*/
next_out = (uint8 *) mystreamer->base.bbs_buffer.data + mystreamer->bytes_written;
avail_out = mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written;
out_bound = LZ4F_compressBound(len, &mystreamer->prefs);
if (avail_out < out_bound)
{
- bbstreamer_content(mystreamer->base.bbs_next, member,
- mystreamer->base.bbs_buffer.data,
- mystreamer->bytes_written,
- context);
-
- /* Enlarge buffer if it falls short of out bound. */
- if (mystreamer->base.bbs_buffer.maxlen < out_bound)
- enlargeStringInfo(&mystreamer->base.bbs_buffer, out_bound);
-
- avail_out = mystreamer->base.bbs_buffer.maxlen;
- mystreamer->bytes_written = 0;
- next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
+ bbstreamer_content(mystreamer->base.bbs_next, member,
+ mystreamer->base.bbs_buffer.data,
+ mystreamer->bytes_written,
+ context);
+
+ /* Enlarge buffer if it falls short of out bound. */
+ if (mystreamer->base.bbs_buffer.maxlen < out_bound)
+ enlargeStringInfo(&mystreamer->base.bbs_buffer, out_bound);
+
+ avail_out = mystreamer->base.bbs_buffer.maxlen;
+ mystreamer->bytes_written = 0;
+ next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
}
/*
static void
bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
- uint8 *next_out;
- size_t footer_bound,
- compressed_size,
- avail_out;
+ bbstreamer_lz4_frame *mystreamer;
+ uint8 *next_out;
+ size_t footer_bound,
+ compressed_size,
+ avail_out;
mystreamer = (bbstreamer_lz4_frame *) streamer;
if ((mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written) <
footer_bound)
{
- bbstreamer_content(mystreamer->base.bbs_next, NULL,
- mystreamer->base.bbs_buffer.data,
- mystreamer->bytes_written,
- BBSTREAMER_UNKNOWN);
-
- /* Enlarge buffer if it falls short of footer bound. */
- if (mystreamer->base.bbs_buffer.maxlen < footer_bound)
- enlargeStringInfo(&mystreamer->base.bbs_buffer, footer_bound);
-
- avail_out = mystreamer->base.bbs_buffer.maxlen;
- mystreamer->bytes_written = 0;
- next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
+ bbstreamer_content(mystreamer->base.bbs_next, NULL,
+ mystreamer->base.bbs_buffer.data,
+ mystreamer->bytes_written,
+ BBSTREAMER_UNKNOWN);
+
+ /* Enlarge buffer if it falls short of footer bound. */
+ if (mystreamer->base.bbs_buffer.maxlen < footer_bound)
+ enlargeStringInfo(&mystreamer->base.bbs_buffer, footer_bound);
+
+ avail_out = mystreamer->base.bbs_buffer.maxlen;
+ mystreamer->bytes_written = 0;
+ next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
}
else
{
static void
bbstreamer_lz4_compressor_free(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
+ bbstreamer_lz4_frame *mystreamer;
mystreamer = (bbstreamer_lz4_frame *) streamer;
bbstreamer_free(streamer->bbs_next);
bbstreamer_lz4_decompressor_new(bbstreamer *next)
{
#ifdef USE_LZ4
- bbstreamer_lz4_frame *streamer;
- LZ4F_errorCode_t ctxError;
+ bbstreamer_lz4_frame *streamer;
+ LZ4F_errorCode_t ctxError;
Assert(next != NULL);
const char *data, int len,
bbstreamer_archive_context context)
{
- bbstreamer_lz4_frame *mystreamer;
- uint8 *next_in,
- *next_out;
- size_t avail_in,
- avail_out;
+ bbstreamer_lz4_frame *mystreamer;
+ uint8 *next_in,
+ *next_out;
+ size_t avail_in,
+ avail_out;
mystreamer = (bbstreamer_lz4_frame *) streamer;
next_in = (uint8 *) data;
while (avail_in > 0)
{
- size_t ret,
- read_size,
- out_size;
+ size_t ret,
+ read_size,
+ out_size;
read_size = avail_in;
out_size = avail_out;
mystreamer->bytes_written += out_size;
/*
- * If output buffer is full then forward the content to next streamer and
- * update the output buffer.
+ * If output buffer is full then forward the content to next streamer
+ * and update the output buffer.
*/
if (mystreamer->bytes_written >= mystreamer->base.bbs_buffer.maxlen)
{
static void
bbstreamer_lz4_decompressor_finalize(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
+ bbstreamer_lz4_frame *mystreamer;
mystreamer = (bbstreamer_lz4_frame *) streamer;
static void
bbstreamer_lz4_decompressor_free(bbstreamer *streamer)
{
- bbstreamer_lz4_frame *mystreamer;
+ bbstreamer_lz4_frame *mystreamer;
mystreamer = (bbstreamer_lz4_frame *) streamer;
bbstreamer_free(streamer->bbs_next);
typedef struct ArchiveStreamState
{
int tablespacenum;
- pg_compress_specification *compress;
+ pg_compress_specification *compress;
bbstreamer *streamer;
bbstreamer *manifest_inject_streamer;
PQExpBuffer manifest_buffer;
/* Handle to child process */
static pid_t bgchild = -1;
static bool in_log_streamer = false;
+
/* Flag to indicate if child process exited unexpectedly */
static volatile sig_atomic_t bgchild_exited = false;
*/
#ifdef WIN32
/*
- * In order to signal the main thread of an ungraceful exit we
- * set the same flag that we use on Unix to signal SIGCHLD.
+ * In order to signal the main thread of an ungraceful exit we set the
+ * same flag that we use on Unix to signal SIGCHLD.
*/
bgchild_exited = true;
#endif
}
else
{
- char *alg;
+ char *alg;
alg = palloc((sep - option) + 1);
memcpy(alg, option, sep - option);
/*
* We have to parse the archive if (1) we're suppose to extract it, or if
- * (2) we need to inject backup_manifest or recovery configuration into it.
- * However, we only know how to parse tar archives.
+ * (2) we need to inject backup_manifest or recovery configuration into
+ * it. However, we only know how to parse tar archives.
*/
must_parse_archive = (format == 'p' || inject_manifest ||
- (spclocation == NULL && writerecoveryconf));
+ (spclocation == NULL && writerecoveryconf));
/* At present, we only know how to parse tar archives. */
if (must_parse_archive && !is_tar && !is_compressed_tar)
/*
* In tar format, we just write the archive without extracting it.
* Normally, we write it to the archive name provided by the caller,
- * but when the base directory is "-" that means we need to write
- * to standard output.
+ * but when the base directory is "-" that means we need to write to
+ * standard output.
*/
if (strcmp(basedir, "-") == 0)
{
}
/*
- * If we're supposed to inject the backup manifest into the results,
- * it should be done here, so that the file content can be injected
- * directly, without worrying about the details of the tar format.
+ * If we're supposed to inject the backup manifest into the results, it
+ * should be done here, so that the file content can be injected directly,
+ * without worrying about the details of the tar format.
*/
if (inject_manifest)
manifest_inject_streamer = streamer;
/*
- * If this is the main tablespace and we're supposed to write
- * recovery information, arrange to do that.
+ * If this is the main tablespace and we're supposed to write recovery
+ * information, arrange to do that.
*/
if (spclocation == NULL && writerecoveryconf)
{
}
/*
- * If we're doing anything that involves understanding the contents of
- * the archive, we'll need to parse it. If not, we can skip parsing it,
- * but old versions of the server send improperly terminated tarfiles,
- * so if we're talking to such a server we'll need to add the terminator
- * here.
+ * If we're doing anything that involves understanding the contents of the
+ * archive, we'll need to parse it. If not, we can skip parsing it, but
+ * old versions of the server send improperly terminated tarfiles, so if
+ * we're talking to such a server we'll need to add the terminator here.
*/
if (must_parse_archive)
streamer = bbstreamer_tar_parser_new(streamer);
streamer = bbstreamer_tar_terminator_new(streamer);
/*
- * If the user has requested a server compressed archive along with archive
- * extraction at client then we need to decompress it.
+ * If the user has requested a server compressed archive along with
+ * archive extraction at client then we need to decompress it.
*/
if (format == 'p')
{
}
if (maxrate > 0)
AppendIntegerCommandOption(&buf, use_new_option_syntax, "MAX_RATE",
- maxrate);
+ maxrate);
if (format == 't')
AppendPlainCommandOption(&buf, use_new_option_syntax, "TABLESPACE_MAP");
if (!verify_checksums)
{
if (use_new_option_syntax)
AppendIntegerCommandOption(&buf, use_new_option_syntax,
- "VERIFY_CHECKSUMS", 0);
+ "VERIFY_CHECKSUMS", 0);
else
AppendPlainCommandOption(&buf, use_new_option_syntax,
- "NOVERIFY_CHECKSUMS");
+ "NOVERIFY_CHECKSUMS");
}
if (manifest)
* we do anything anyway.
*
* Note that this is skipped for tar format backups and backups that
- * the server is storing to a target location, since in that case
- * we won't be storing anything into these directories and thus should
+ * the server is storing to a target location, since in that case we
+ * won't be storing anything into these directories and thus should
* not create them.
*/
if (backup_target == NULL && format == 'p' && !PQgetisnull(res, i, 1))
*/
if (includewal == STREAM_WAL)
{
- pg_compress_algorithm wal_compress_algorithm;
- int wal_compress_level;
+ pg_compress_algorithm wal_compress_algorithm;
+ int wal_compress_level;
if (verbose)
pg_log_info("starting background WAL receiver");
int option_index;
char *compression_algorithm = "none";
char *compression_detail = NULL;
- CompressionLocation compressloc = COMPRESS_LOCATION_UNSPECIFIED;
- pg_compress_specification client_compress;
+ CompressionLocation compressloc = COMPRESS_LOCATION_UNSPECIFIED;
+ pg_compress_specification client_compress;
pg_logging_init(argv[0]);
progname = get_progname(argv[0]);
/*
* If the user has not specified where to perform backup compression,
- * default to the client, unless the user specified --target, in which case
- * the server is the only choice.
+ * default to the client, unless the user specified --target, in which
+ * case the server is the only choice.
*/
if (compressloc == COMPRESS_LOCATION_UNSPECIFIED)
{
}
/*
- * If any compression that we're doing is happening on the client side,
- * we must try to parse the compression algorithm and detail, but if it's
- * all on the server side, then we're just going to pass through whatever
- * was requested and let the server decide what to do.
+ * If any compression that we're doing is happening on the client side, we
+ * must try to parse the compression algorithm and detail, but if it's all
+ * on the server side, then we're just going to pass through whatever was
+ * requested and let the server decide what to do.
*/
if (compressloc == COMPRESS_LOCATION_CLIENT)
{
- pg_compress_algorithm alg;
+ pg_compress_algorithm alg;
char *error_detail;
if (!parse_compress_algorithm(compression_algorithm, &alg))
}
/*
- * Can't perform client-side compression if the backup is not being
- * sent to the client.
+ * Can't perform client-side compression if the backup is not being sent
+ * to the client.
*/
if (backup_target != NULL && compressloc == COMPRESS_LOCATION_CLIENT)
{
atexit(disconnect_atexit);
#ifndef WIN32
+
/*
* Trap SIGCHLD to be able to handle the WAL stream process exiting. There
- * is no SIGCHLD on Windows, there we rely on the background thread setting
- * the signal variable on unexpected but graceful exit. If the WAL stream
- * thread crashes on Windows it will bring down the entire process as it's
- * a thread, so there is nothing to catch should that happen. A crash on
- * UNIX will be caught by the signal handler.
+ * is no SIGCHLD on Windows, there we rely on the background thread
+ * setting the signal variable on unexpected but graceful exit. If the WAL
+ * stream thread crashes on Windows it will bring down the entire process
+ * as it's a thread, so there is nothing to catch should that happen. A
+ * crash on UNIX will be caught by the signal handler.
*/
pqsignal(SIGCHLD, sigchld_handler);
#endif
/* pg_recvlogical doesn't use an exported snapshot, so suppress */
if (use_new_option_syntax)
AppendStringCommandOption(query, use_new_option_syntax,
- "SNAPSHOT", "nothing");
+ "SNAPSHOT", "nothing");
else
AppendPlainCommandOption(query, use_new_option_syntax,
"NOEXPORT_SNAPSHOT");
umask(0077);
# Initialize node without replication settings
-$node->init(extra => ['--data-checksums'],
- auth_extra => [ '--create-role', 'backupuser' ]);
+$node->init(
+ extra => ['--data-checksums'],
+ auth_extra => [ '--create-role', 'backupuser' ]);
$node->start;
my $pgdata = $node->data_dir;
# Now that we have a server that supports replication commands, test whether
# certain invalid compression commands fail on the client side with client-side
# compression and on the server side with server-side compression.
-my $client_fails =
- 'pg_basebackup: error: ';
+my $client_fails = 'pg_basebackup: error: ';
my $server_fails =
- 'pg_basebackup: error: could not initiate base backup: ERROR: ';
+ 'pg_basebackup: error: could not initiate base backup: ERROR: ';
my @compression_failure_tests = (
[
'extrasquishy',
'gzip:workers=3',
'invalid compression specification: compression algorithm "gzip" does not accept a worker count',
'failure on worker count for gzip'
- ],
-);
+ ],);
for my $cft (@compression_failure_tests)
{
my $cfail = quotemeta($client_fails . $cft->[1]);
$node->command_fails_like(
[ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', $cft->[0] ],
qr/$cfail/,
- 'client '. $cft->[2]);
+ 'client ' . $cft->[2]);
$node->command_fails_like(
- [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress',
- 'server-' . $cft->[0] ],
+ [
+ 'pg_basebackup', '-D',
+ "$tempdir/backup", '--compress',
+ 'server-' . $cft->[0]
+ ],
qr/$sfail/,
'server ' . $cft->[2]);
}
}
# Run base backup.
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
+$node->command_ok(
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
'pg_basebackup runs');
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
# to our physical temp location. That way we can use shorter names
# for the tablespace directories, which hopefully won't run afoul of
# the 99 character length limit.
-my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
+my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
my $real_sys_tempdir = "$sys_tempdir/tempdir";
dir_symlink "$tempdir", $real_sys_tempdir;
mkdir "$tempdir/tblspc1";
-my $realTsDir = "$real_sys_tempdir/tblspc1";
+my $realTsDir = "$real_sys_tempdir/tblspc1";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';");
$node->safe_psql('postgres',
my $repTsDir = "$tempdir/tblspc1replica";
my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
mkdir $repTsDir;
- PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0], '-C', $repTsDir);
+ PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0],
+ '-C', $repTsDir);
# Update tablespace map to point to new directory.
# XXX Ideally pg_basebackup would handle this.
$realTsDir = "$real_sys_tempdir/$superlongname";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';");
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
+$node->command_ok(
+ [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
'pg_basebackup tar with long symlink target');
$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
rmtree("$tempdir/tarbackup_l3");
'WAL files copied');
rmtree("$tempdir/backupxs");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream', '-Ft' ],
+ [
+ @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream',
+ '-Ft'
+ ],
'pg_basebackup -X stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
rmtree("$tempdir/backupxst");
qr/unrecognized target/,
'backup target unrecognized');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none', '-D', "$tempdir/blackhole" ],
+ [
+ @pg_basebackup_defs, '--target', 'blackhole', '-X',
+ 'none', '-D', "$tempdir/blackhole"
+ ],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
$node->command_fails_like(
[ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none' ],
'backup target blackhole');
$node->command_ok(
- [ @pg_basebackup_defs, '--target', "server:$tempdir/backuponserver", '-X', 'none' ],
+ [
+ @pg_basebackup_defs, '--target',
+ "server:$tempdir/backuponserver", '-X',
+ 'none'
+ ],
'backup target server');
ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created');
rmtree("$tempdir/backuponserver");
[qw(createuser --replication --role=pg_write_server_files backupuser)],
'create backup user');
$node->command_ok(
- [ @pg_basebackup_defs, '-U', 'backupuser', '--target', "server:$tempdir/backuponserver", '-X', 'none' ],
+ [
+ @pg_basebackup_defs, '-U', 'backupuser', '--target',
+ "server:$tempdir/backuponserver",
+ '-X', 'none'
+ ],
'backup target server');
-ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created as non-superuser');
+ok( -f "$tempdir/backuponserver/base.tar",
+ 'backup tar was created as non-superuser');
rmtree("$tempdir/backuponserver");
$node->command_fails(
],
'pg_basebackup fails with -C -S --no-slot');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-D', "$tempdir/blackhole" ],
+ [
+ @pg_basebackup_defs, '--target', 'blackhole', '-D',
+ "$tempdir/blackhole"
+ ],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
'pg_basebackup fails with -C -S --no-slot');
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0' ],
+ [
+ @pg_basebackup_defs, '-D',
+ "$tempdir/backupxs_slot", '-C',
+ '-S', 'slot0'
+ ],
'pg_basebackup -C runs');
rmtree("$tempdir/backupxs_slot");
'restart LSN of new slot is not null');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot1", '-C', '-S', 'slot0' ],
+ [
+ @pg_basebackup_defs, '-D',
+ "$tempdir/backupxs_slot1", '-C',
+ '-S', 'slot0'
+ ],
'pg_basebackup fails with -C -S and a previously existing slot');
$node->safe_psql('postgres',
);
is($lsn, '', 'restart LSN of new slot is null');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/fail", '-S', 'slot1', '-X', 'none' ],
+ [
+ @pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
+ 'slot1', '-X', 'none'
+ ],
'pg_basebackup with replication slot fails without WAL streaming');
$node->command_ok(
[
my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', '');
my $sigchld_bb = IPC::Run::start(
[
- @pg_basebackup_defs, '--wal-method=stream', '-D', "$tempdir/sigchld",
- '--max-rate=32', '-d', $node->connstr('postgres')
+ @pg_basebackup_defs, '--wal-method=stream',
+ '-D', "$tempdir/sigchld",
+ '--max-rate=32', '-d',
+ $node->connstr('postgres')
],
'<',
\$sigchld_bb_stdin,
\$sigchld_bb_stderr,
$sigchld_bb_timeout);
-is($node->poll_query_until('postgres',
- "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE " .
- "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' " .
- "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'"),
+is( $node->poll_query_until(
+ 'postgres',
+ "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE "
+ . "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' "
+ . "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'"),
"1",
"Walsender killed");
-ok(pump_until($sigchld_bb, $sigchld_bb_timeout, \$sigchld_bb_stderr,
- qr/background process terminated unexpectedly/),
- 'background process exit message');
+ok( pump_until(
+ $sigchld_bb, $sigchld_bb_timeout,
+ \$sigchld_bb_stderr, qr/background process terminated unexpectedly/),
+ 'background process exit message');
$sigchld_bb->finish();
done_testing();
'creating a replication slot');
my $slot = $primary->slot($slot_name);
is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
-is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
+is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
$primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ],
'dropping a replication slot');
is($primary->slot($slot_name)->{'slot_type'},
$primary->wait_for_catchup($standby);
# Get a walfilename from before the promotion to make sure it is archived
# after promotion
-my $standby_slot = $standby->slot($archive_slot);
+my $standby_slot = $standby->slot($archive_slot);
my $replication_slot_lsn = $standby_slot->{'restart_lsn'};
# pg_walfile_name() is not supported while in recovery, so use the primary
[
'pg_recvlogical', '-S',
'test', '-d',
- $node->connstr('postgres'), '--create-slot', '--two-phase'
+ $node->connstr('postgres'), '--create-slot',
+ '--two-phase'
],
'slot with two-phase created');
$node->safe_psql('postgres',
"BEGIN; INSERT INTO test_table values (11); PREPARE TRANSACTION 'test'");
-$node->safe_psql('postgres',
- "COMMIT PREPARED 'test'");
-$nextlsn =
- $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
+$node->safe_psql('postgres', "COMMIT PREPARED 'test'");
+$nextlsn = $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
chomp($nextlsn);
$node->command_fails(
[
- 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
- '--start', '--endpos', "$nextlsn", '--two-phase', '--no-loop', '-f', '-'
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
+ $node->connstr('postgres'), '--start',
+ '--endpos', "$nextlsn",
+ '--two-phase', '--no-loop',
+ '-f', '-'
],
'incorrect usage');
* achieves the goal of postmaster running in a similar environment as pg_ctl.
*/
static void
-InheritStdHandles(STARTUPINFO* si)
+InheritStdHandles(STARTUPINFO *si)
{
si->dwFlags |= STARTF_USESTDHANDLES;
si->hStdInput = GetStdHandle(STD_INPUT_HANDLE);
si.cb = sizeof(si);
/*
- * Set stdin/stdout/stderr handles to be inherited in the child
- * process. That allows postmaster and the processes it starts to perform
+ * Set stdin/stdout/stderr handles to be inherited in the child process.
+ * That allows postmaster and the processes it starts to perform
* additional checks to see if running in a service (otherwise they get
* the default console handles - which point to "somewhere").
*/
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ],
4, 'pg_ctl status with nonexistent directory');
is_supported = false;
else
{
- tmp = ReadStr(AH);
+ tmp = ReadStr(AH);
- if (strcmp(tmp, "true") == 0)
- is_supported = false;
+ if (strcmp(tmp, "true") == 0)
+ is_supported = false;
- free(tmp);
+ free(tmp);
}
if (!is_supported)
int byt;
/*
- * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal() inside
- * ReadInt rather than returning EOF. It doesn't seem worth jumping
- * through hoops to deal with that case better, because no such files are
- * likely to exist in the wild: only some 7.1 development versions of
- * pg_dump ever generated such files.
+ * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal()
+ * inside ReadInt rather than returning EOF. It doesn't seem worth
+ * jumping through hoops to deal with that case better, because no such
+ * files are likely to exist in the wild: only some 7.1 development
+ * versions of pg_dump ever generated such files.
*/
if (AH->version < K_VERS_1_3)
*type = BLK_DATA;
for (cell = patterns->head; cell; cell = cell->next)
{
- PQExpBufferData dbbuf;
- int dotcnt;
+ PQExpBufferData dbbuf;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_namespace n\n");
*/
for (cell = patterns->head; cell; cell = cell->next)
{
- int dotcnt;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_extension e\n");
for (cell = patterns->head; cell; cell = cell->next)
{
- int dotcnt;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_foreign_server s\n");
for (cell = patterns->head; cell; cell = cell->next)
{
- PQExpBufferData dbbuf;
- int dotcnt;
+ PQExpBufferData dbbuf;
+ int dotcnt;
/*
* Query must remain ABSOLUTELY devoid of unqualified names. This
{
/*
* It's necessary to add parentheses around the expression because
- * pg_get_expr won't supply the parentheses for things like WHERE TRUE.
+ * pg_get_expr won't supply the parentheses for things like WHERE
+ * TRUE.
*/
appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual);
}
/*
* Not every relation has storage. Also, in a pre-v12 database,
- * partitioned tables have a relfilenode, which should not be preserved
- * when upgrading.
+ * partitioned tables have a relfilenode, which should not be
+ * preserved when upgrading.
*/
if (OidIsValid(relfilenode) && relkind != RELKIND_PARTITIONED_TABLE)
appendPQExpBuffer(upgrade_buffer,
for (SimpleStringListCell *cell = patterns->head; cell; cell = cell->next)
{
- int dotcnt;
+ int dotcnt;
appendPQExpBufferStr(query,
"SELECT datname FROM pg_catalog.pg_database n\n");
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
#########################################
# Basic checks
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
###############################################################
# Definition of the pg_dump runs to make.
'CREATE PUBLICATION pub3' => {
create_order => 50,
create_sql => 'CREATE PUBLICATION pub3;',
- regexp => qr/^
+ regexp => qr/^
\QCREATE PUBLICATION pub3 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
like => { %full_runs, section_post_data => 1, },
'CREATE PUBLICATION pub4' => {
create_order => 50,
create_sql => 'CREATE PUBLICATION pub4;',
- regexp => qr/^
+ regexp => qr/^
\QCREATE PUBLICATION pub4 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
},
- 'ALTER PUBLICATION pub1 ADD TABLE test_seventh_table (col3, col2) WHERE (col1 = 1)' => {
+ 'ALTER PUBLICATION pub1 ADD TABLE test_seventh_table (col3, col2) WHERE (col1 = 1)'
+ => {
create_order => 52,
create_sql =>
'ALTER PUBLICATION pub1 ADD TABLE dump_test.test_seventh_table (col3, col2) WHERE (col1 = 1);',
/xm,
like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
- },
+ },
'ALTER PUBLICATION pub3 ADD ALL TABLES IN SCHEMA dump_test' => {
create_order => 51,
regexp => qr/^
\QALTER PUBLICATION pub3 ADD ALL TABLES IN SCHEMA dump_test;\E
/xm,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
},
regexp => qr/^
\QALTER PUBLICATION pub4 ADD TABLE ONLY dump_test.test_table WHERE ((col1 > 0));\E
/xm,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
exclude_test_table => 1,
},
},
- 'ALTER PUBLICATION pub4 ADD TABLE test_second_table WHERE (col2 = \'test\');' => {
+ 'ALTER PUBLICATION pub4 ADD TABLE test_second_table WHERE (col2 = \'test\');'
+ => {
create_order => 52,
create_sql =>
'ALTER PUBLICATION pub4 ADD TABLE dump_test.test_second_table WHERE (col2 = \'test\');',
/xm,
like => { %full_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
- },
+ },
'CREATE SCHEMA public' => {
regexp => qr/^CREATE SCHEMA public;/m,
$node->command_fails_like(
[ 'pg_dumpall', '--exclude-database', '.' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./,
- 'pg_dumpall: option --exclude-database rejects multipart pattern "."'
-);
+ 'pg_dumpall: option --exclude-database rejects multipart pattern "."');
$node->command_fails_like(
[ 'pg_dumpall', '--exclude-database', 'myhost.mydb' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/,
- 'pg_dumpall: option --exclude-database rejects multipart database names'
-);
+ 'pg_dumpall: option --exclude-database rejects multipart database names');
#########################################
# Test valid database exclusion patterns
$node->command_fails_like(
[ 'pg_dump', '--schema', 'myhost.mydb.myschema' ],
qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/,
- 'pg_dump: option --schema rejects three-part schema names'
-);
+ 'pg_dump: option --schema rejects three-part schema names');
$node->command_fails_like(
[ 'pg_dump', '--schema', 'otherdb.myschema' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/,
- 'pg_dump: option --schema rejects cross-database multipart schema names'
-);
+ 'pg_dump: option --schema rejects cross-database multipart schema names');
$node->command_fails_like(
[ 'pg_dump', '--schema', '.' ],
qr/pg_dump: error: cross-database references are not implemented: \./,
- 'pg_dump: option --schema rejects degenerate two-part schema name: "."'
-);
+ 'pg_dump: option --schema rejects degenerate two-part schema name: "."');
$node->command_fails_like(
[ 'pg_dump', '--schema', '"some.other.db".myschema' ],
$node->command_fails_like(
[ 'pg_dump', '--table', 'myhost.mydb.myschema.mytable' ],
qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/,
- 'pg_dump: option --table rejects four-part table names'
-);
+ 'pg_dump: option --table rejects four-part table names');
$node->command_fails_like(
[ 'pg_dump', '--table', 'otherdb.pg_catalog.pg_class' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/,
- 'pg_dump: option --table rejects cross-database three part table names'
-);
+ 'pg_dump: option --table rejects cross-database three part table names');
command_fails_like(
- [ 'pg_dump', '-p', "$port", '--table', '"some.other.db".pg_catalog.pg_class' ],
+ [
+ 'pg_dump', '-p', "$port", '--table',
+ '"some.other.db".pg_catalog.pg_class'
+ ],
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/,
'pg_dump: option --table rejects cross-database three part table names with embedded dots'
);
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
my $node = PostgreSQL::Test::Cluster->new('main');
my $port = $node->port;
. generate_ascii_string(1, 9)
. generate_ascii_string(11, 12)
. generate_ascii_string(14, 33)
- . ($PostgreSQL::Test::Utils::windows_os ? '' : '"x"') # IPC::Run mishandles '"' on Windows
- . generate_ascii_string(35, 43) # skip ','
+ . ($PostgreSQL::Test::Utils::windows_os
+ ? ''
+ : '"x"') # IPC::Run mishandles '"' on Windows
+ . generate_ascii_string(35, 43) # skip ','
. generate_ascii_string(45, 54);
my $dbname2 = 'regression' . generate_ascii_string(55, 65) # skip 'B'-'W'
. generate_ascii_string(88, 99) # skip 'd'-'w'
my ($stderr, $result);
my $restore_super = qq{regress_a'b\\c=d\\ne"f};
$restore_super =~ s/"//g
- if $PostgreSQL::Test::Utils::windows_os; # IPC::Run mishandles '"' on Windows
+ if
+ $PostgreSQL::Test::Utils::windows_os; # IPC::Run mishandles '"' on Windows
# Restore full dump through psql using environment variables for
{"pg_internal.init", true}, /* defined as RELCACHE_INIT_FILENAME */
/*
- * If there is a backup_label or tablespace_map file, it indicates that
- * a recovery failed and this cluster probably can't be rewound, but
- * exclude them anyway if they are found.
+ * If there is a backup_label or tablespace_map file, it indicates that a
+ * recovery failed and this cluster probably can't be rewound, but exclude
+ * them anyway if they are found.
*/
{"backup_label", false}, /* defined as BACKUP_LABEL_FILE */
{"tablespace_map", false}, /* defined as TABLESPACE_MAP */
{
my $test_mode = shift;
- my $primary_xlogdir = "${PostgreSQL::Test::Utils::tmp_check}/xlog_primary";
+ my $primary_xlogdir =
+ "${PostgreSQL::Test::Utils::tmp_check}/xlog_primary";
rmtree($primary_xlogdir);
RewindTest::setup_cluster($test_mode);
# copy operation and the result will be an error.
my $ret = run_log(
[
- 'pg_rewind', '--debug',
+ 'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $primary_pgdata,
'--no-sync',
],
- '2>>', "$standby_pgdata/tst_both_dir/file1");
+ '2>>',
+ "$standby_pgdata/tst_both_dir/file1");
ok(!$ret, 'Error out on copying growing file');
# Ensure that the files are of different size, the final error message should
],
'>', \$stdout, '2>', \$stderr;
- is($result, 1, "$test_name: psql exit code");
- is($stderr, '', "$test_name: psql no stderr");
+ is($result, 1, "$test_name: psql exit code");
+ is($stderr, '', "$test_name: psql no stderr");
is($stdout, $expected_stdout, "$test_name: query result matches");
return;
# Initialize primary, data checksums are mandatory
$node_primary =
- PostgreSQL::Test::Cluster->new('primary' . ($extra_name ? "_${extra_name}" : ''));
+ PostgreSQL::Test::Cluster->new(
+ 'primary' . ($extra_name ? "_${extra_name}" : ''));
# Set up pg_hba.conf and pg_ident.conf for the role running
# pg_rewind. This role is used for all the tests, and has
my $extra_name = shift;
$node_standby =
- PostgreSQL::Test::Cluster->new('standby' . ($extra_name ? "_${extra_name}" : ''));
+ PostgreSQL::Test::Cluster->new(
+ 'standby' . ($extra_name ? "_${extra_name}" : ''));
$node_primary->backup('my_backup');
$node_standby->init_from_backup($node_primary, 'my_backup');
my $connstr_primary = $node_primary->connstr();
# segments from the old primary to the archives. These
# will be used by pg_rewind.
rmtree($node_primary->archive_dir);
- PostgreSQL::Test::RecursiveCopy::copypath($node_primary->data_dir . "/pg_wal",
+ PostgreSQL::Test::RecursiveCopy::copypath(
+ $node_primary->data_dir . "/pg_wal",
$node_primary->archive_dir);
# Fast way to remove entire directory content
my $tempdir = PostgreSQL::Test::Utils::tempdir;
# Initialize node to upgrade
-my $oldnode = PostgreSQL::Test::Cluster->new('old_node',
+my $oldnode =
+ PostgreSQL::Test::Cluster->new('old_node',
install_path => $ENV{oldinstall});
# To increase coverage of non-standard segment size and group access without
$oldnode->command_ok(
[
'psql', '-X',
- '-f', "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
+ '-f', "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
'regression'
]);
}
break;
case PG_STATUS:
+
/*
* For output to a display, do leading truncation. Append \r so
* that the next message is output at the start of the line.
# Include a user-defined tablespace in the hopes of detecting problems in that
# area.
-my $source_ts_path =PostgreSQL::Test::Utils::tempdir_short();
+my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short();
my $source_ts_prefix = $source_ts_path;
$source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!;
$primary->init(allows_streaming => 1);
$primary->start;
my $backup_path = $primary->backup_dir . '/test_options';
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+$primary->command_ok(
+ [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
"base backup ok");
# Verify that pg_verifybackup -q succeeds and produces no output.
my $tempdir = PostgreSQL::Test::Utils::tempdir;
-test_bad_manifest(
- 'input string ended unexpectedly',
- qr/could not parse backup manifest: parsing failed/,
- <
+test_bad_manifest('input string ended unexpectedly',
+ qr/could not parse backup manifest: parsing failed/, <
{
EOM
$primary->init(allows_streaming => 1);
$primary->start;
my $backup_path = $primary->backup_dir . '/test_wal';
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+$primary->command_ok(
+ [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
"base backup ok");
# Rename pg_wal.
my $backup_path2 = $primary->backup_dir . '/test_tli';
# The base backup run below does a checkpoint, that removes the first segment
# of the current timeline.
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
+$primary->command_ok(
+ [ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
"base backup 2 ok");
command_ok(
[ 'pg_verifybackup', $backup_path2 ],
$primary->init(allows_streaming => 1);
$primary->start;
-my $backup_path = $primary->backup_dir . '/server-backup';
+my $backup_path = $primary->backup_dir . '/server-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'backup_archive' => 'base.tar',
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'backup_archive' => 'base.tar',
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => ['--compress', 'server-gzip'],
- 'backup_archive' => 'base.tar.gz',
+ 'backup_flags' => [ '--compress', 'server-gzip' ],
+ 'backup_archive' => 'base.tar.gz',
'decompress_program' => $ENV{'GZIP_PROGRAM'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => ['--compress', 'server-lz4'],
- 'backup_archive' => 'base.tar.lz4',
+ 'backup_flags' => [ '--compress', 'server-lz4' ],
+ 'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
- 'decompress_flags' => [ '-d', '-m'],
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'decompress_flags' => [ '-d', '-m' ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'server-zstd'],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'server-zstd' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
- }
-);
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ });
for my $tc (@test_configuration)
{
my $method = $tc->{'compression_method'};
- SKIP: {
+ SKIP:
+ {
skip "$method compression not supported by this build", 3
- if ! $tc->{'enabled'};
+ if !$tc->{'enabled'};
skip "no decompressor available for $method", 3
if exists $tc->{'decompress_program'}
&& (!defined $tc->{'decompress_program'}
- || $tc->{'decompress_program'} eq '');
+ || $tc->{'decompress_program'} eq '');
# Take a server-side backup.
my @backup = (
- 'pg_basebackup', '--no-sync', '-cfast', '--target',
- "server:$backup_path", '-Xfetch'
- );
- push @backup, @{$tc->{'backup_flags'}};
+ 'pg_basebackup', '--no-sync',
+ '-cfast', '--target',
+ "server:$backup_path", '-Xfetch');
+ push @backup, @{ $tc->{'backup_flags'} };
$primary->command_ok(\@backup,
- "server side backup, compression $method");
+ "server side backup, compression $method");
# Verify that the we got the files we expected.
my $backup_files = join(',',
sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
- my $expected_backup_files = join(',',
- sort ('backup_manifest', $tc->{'backup_archive'}));
- is($backup_files,$expected_backup_files,
+ my $expected_backup_files =
+ join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+ is($backup_files, $expected_backup_files,
"found expected backup files, compression $method");
# Decompress.
if (exists $tc->{'decompress_program'})
{
my @decompress = ($tc->{'decompress_program'});
- push @decompress, @{$tc->{'decompress_flags'}}
- if $tc->{'decompress_flags'};
+ push @decompress, @{ $tc->{'decompress_flags'} }
+ if $tc->{'decompress_flags'};
push @decompress, $backup_path . '/' . $tc->{'backup_archive'};
system_or_bail(@decompress);
}
- SKIP: {
+ SKIP:
+ {
my $tar = $ENV{TAR};
# don't check for a working tar here, to accommodate various odd
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
- if (!defined $tar || $tar eq '');
+ if (!defined $tar || $tar eq '');
# Untar.
mkdir($extract_path);
'-C', $extract_path);
# Verify.
- $primary->command_ok([ 'pg_verifybackup', '-n',
- '-m', "$backup_path/backup_manifest", '-e', $extract_path ],
+ $primary->command_ok(
+ [
+ 'pg_verifybackup', '-n',
+ '-m', "$backup_path/backup_manifest",
+ '-e', $extract_path
+ ],
"verify backup, compression $method");
}
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => ['--compress', 'server-gzip:5'],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'backup_flags' => [ '--compress', 'server-gzip:5' ],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => ['--compress', 'server-lz4:5'],
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'backup_flags' => [ '--compress', 'server-lz4:5' ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'server-zstd:5'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'backup_flags' => [ '--compress', 'server-zstd:5' ],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'parallel zstd',
- 'backup_flags' => ['--compress', 'server-zstd:workers=3'],
- 'enabled' => check_pg_config("#define USE_ZSTD 1"),
- 'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/
- }
-);
+ 'backup_flags' => [ '--compress', 'server-zstd:workers=3' ],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1"),
+ 'possibly_unsupported' =>
+ qr/could not set compression worker count to 3: Unsupported parameter/
+ });
for my $tc (@test_configuration)
{
my $backup_path = $primary->backup_dir . '/' . 'extract_backup';
- my $method = $tc->{'compression_method'};
+ my $method = $tc->{'compression_method'};
- SKIP: {
+ SKIP:
+ {
skip "$method compression not supported by this build", 2
- if ! $tc->{'enabled'};
+ if !$tc->{'enabled'};
# Take backup with server compression enabled.
- my @backup = (
+ my @backup = (
'pg_basebackup', '-D', $backup_path,
'-Xfetch', '--no-sync', '-cfast', '-Fp');
- push @backup, @{$tc->{'backup_flags'}};
+ push @backup, @{ $tc->{'backup_flags'} };
my @verify = ('pg_verifybackup', '-e', $backup_path);
my $backup_stdout = '';
my $backup_stderr = '';
my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
- '2>', \$backup_stderr);
+ '2>', \$backup_stderr);
if ($backup_stdout ne '')
{
print "# standard output was:\n$backup_stdout";
{
print "# standard error was:\n$backup_stderr";
}
- if (! $backup_result && $tc->{'possibly_unsupported'} &&
- $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+ if ( !$backup_result
+ && $tc->{'possibly_unsupported'}
+ && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
{
skip "compression with $method not supported by this build", 2;
}
# Make sure that it verifies OK.
$primary->command_ok(\@verify,
- "backup verified, compression method \"$method\"");
+ "backup verified, compression method \"$method\"");
}
# Remove backup immediately to save disk space.
$primary->init(allows_streaming => 1);
$primary->start;
-my $backup_path = $primary->backup_dir . '/client-backup';
+my $backup_path = $primary->backup_dir . '/client-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
my @test_configuration = (
{
'compression_method' => 'none',
- 'backup_flags' => [],
- 'backup_archive' => 'base.tar',
- 'enabled' => 1
+ 'backup_flags' => [],
+ 'backup_archive' => 'base.tar',
+ 'enabled' => 1
},
{
'compression_method' => 'gzip',
- 'backup_flags' => ['--compress', 'client-gzip:5'],
- 'backup_archive' => 'base.tar.gz',
+ 'backup_flags' => [ '--compress', 'client-gzip:5' ],
+ 'backup_archive' => 'base.tar.gz',
'decompress_program' => $ENV{'GZIP_PROGRAM'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define HAVE_LIBZ 1")
},
{
'compression_method' => 'lz4',
- 'backup_flags' => ['--compress', 'client-lz4:5'],
- 'backup_archive' => 'base.tar.lz4',
+ 'backup_flags' => [ '--compress', 'client-lz4:5' ],
+ 'backup_archive' => 'base.tar.lz4',
'decompress_program' => $ENV{'LZ4'},
- 'decompress_flags' => [ '-d' ],
- 'output_file' => 'base.tar',
- 'enabled' => check_pg_config("#define USE_LZ4 1")
+ 'decompress_flags' => ['-d'],
+ 'output_file' => 'base.tar',
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
'compression_method' => 'zstd',
- 'backup_flags' => ['--compress', 'client-zstd:5'],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'client-zstd:5' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1")
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1")
},
{
'compression_method' => 'parallel zstd',
- 'backup_flags' => ['--compress', 'client-zstd:workers=3'],
- 'backup_archive' => 'base.tar.zst',
+ 'backup_flags' => [ '--compress', 'client-zstd:workers=3' ],
+ 'backup_archive' => 'base.tar.zst',
'decompress_program' => $ENV{'ZSTD'},
- 'decompress_flags' => [ '-d' ],
- 'enabled' => check_pg_config("#define USE_ZSTD 1"),
- 'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/
- }
-);
+ 'decompress_flags' => ['-d'],
+ 'enabled' => check_pg_config("#define USE_ZSTD 1"),
+ 'possibly_unsupported' =>
+ qr/could not set compression worker count to 3: Unsupported parameter/
+ });
for my $tc (@test_configuration)
{
my $method = $tc->{'compression_method'};
- SKIP: {
+ SKIP:
+ {
skip "$method compression not supported by this build", 3
- if ! $tc->{'enabled'};
+ if !$tc->{'enabled'};
skip "no decompressor available for $method", 3
if exists $tc->{'decompress_program'}
&& (!defined $tc->{'decompress_program'}
- || $tc->{'decompress_program'} eq '');
+ || $tc->{'decompress_program'} eq '');
# Take a client-side backup.
- my @backup = (
+ my @backup = (
'pg_basebackup', '-D', $backup_path,
'-Xfetch', '--no-sync', '-cfast', '-Ft');
- push @backup, @{$tc->{'backup_flags'}};
+ push @backup, @{ $tc->{'backup_flags'} };
my $backup_stdout = '';
my $backup_stderr = '';
my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
- '2>', \$backup_stderr);
+ '2>', \$backup_stderr);
if ($backup_stdout ne '')
{
print "# standard output was:\n$backup_stdout";
{
print "# standard error was:\n$backup_stderr";
}
- if (! $backup_result && $tc->{'possibly_unsupported'} &&
- $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+ if ( !$backup_result
+ && $tc->{'possibly_unsupported'}
+ && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
{
skip "compression with $method not supported by this build", 3;
}
# Verify that the we got the files we expected.
my $backup_files = join(',',
sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
- my $expected_backup_files = join(',',
- sort ('backup_manifest', $tc->{'backup_archive'}));
- is($backup_files,$expected_backup_files,
+ my $expected_backup_files =
+ join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+ is($backup_files, $expected_backup_files,
"found expected backup files, compression $method");
# Decompress.
if (exists $tc->{'decompress_program'})
{
my @decompress = ($tc->{'decompress_program'});
- push @decompress, @{$tc->{'decompress_flags'}}
- if $tc->{'decompress_flags'};
+ push @decompress, @{ $tc->{'decompress_flags'} }
+ if $tc->{'decompress_flags'};
push @decompress, $backup_path . '/' . $tc->{'backup_archive'};
push @decompress, $backup_path . '/' . $tc->{'output_file'}
- if $tc->{'output_file'};
+ if $tc->{'output_file'};
system_or_bail(@decompress);
}
- SKIP: {
+ SKIP:
+ {
my $tar = $ENV{TAR};
# don't check for a working tar here, to accommodate various odd
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
- if (!defined $tar || $tar eq '');
+ if (!defined $tar || $tar eq '');
# Untar.
mkdir($extract_path);
'-C', $extract_path);
# Verify.
- $primary->command_ok([ 'pg_verifybackup', '-n',
- '-m', "$backup_path/backup_manifest", '-e', $extract_path ],
+ $primary->command_ok(
+ [
+ 'pg_verifybackup', '-n',
+ '-m', "$backup_path/backup_manifest",
+ '-e', $extract_path
+ ],
"verify backup, compression $method");
}
XLogReaderState *xlogreader_state;
XLogDumpPrivate private;
XLogDumpConfig config;
- XLogStats stats;
+ XLogStats stats;
XLogRecord *record;
XLogRecPtr first_record;
char *waldir = NULL;
int nclients = 1; /* number of clients */
int nthreads = 1; /* number of threads */
bool is_connect; /* establish connection for each transaction */
-bool report_per_command = false; /* report per-command latencies, retries
- * after errors and failures (errors
- * without retrying) */
+bool report_per_command = false; /* report per-command latencies,
+ * retries after errors and failures
+ * (errors without retrying) */
int main_pid; /* main process id used in log filename */
/*
*/
uint32 max_tries = 1;
-bool failures_detailed = false; /* whether to group failures in reports
- * or logs by basic types */
+bool failures_detailed = false; /* whether to group failures in
+ * reports or logs by basic types */
const char *pghost = NULL;
const char *pgport = NULL;
/*
* The maximum number of variables that we can currently store in 'vars'
- * without having to reallocate more space. We must always have max_vars >=
- * nvars.
+ * without having to reallocate more space. We must always have max_vars
+ * >= nvars.
*/
int max_vars;
{
pg_time_usec_t start_time; /* interval start time, for aggregates */
- /*
- * Transactions are counted depending on their execution and outcome. First
- * a transaction may have started or not: skipped transactions occur under
- * --rate and --latency-limit when the client is too late to execute them.
- * Secondly, a started transaction may ultimately succeed or fail, possibly
- * after some retries when --max-tries is not one. Thus
+ /*----------
+ * Transactions are counted depending on their execution and outcome.
+ * First a transaction may have started or not: skipped transactions occur
+ * under --rate and --latency-limit when the client is too late to execute
+ * them. Secondly, a started transaction may ultimately succeed or fail,
+ * possibly after some retries when --max-tries is not one. Thus
*
* the number of all transactions =
* 'skipped' (it was too late to execute them) +
* 'cnt' (the number of successful transactions) +
- * failed (the number of failed transactions).
+ * 'failed' (the number of failed transactions).
*
* A successful transaction can have several unsuccessful tries before a
* successful run. Thus
* failed (the number of failed transactions) =
* 'serialization_failures' (they got a serialization error and were not
* successfully retried) +
- * 'deadlock_failures' (they got a deadlock error and were not successfully
- * retried).
+ * 'deadlock_failures' (they got a deadlock error and were not
+ * successfully retried).
*
- * If the transaction was retried after a serialization or a deadlock error
- * this does not guarantee that this retry was successful. Thus
+ * If the transaction was retried after a serialization or a deadlock
+ * error this does not guarantee that this retry was successful. Thus
*
* 'retries' (number of retries) =
* number of retries in all retried transactions =
* 'retried' (number of all retried transactions) =
* successfully retried transactions +
* failed transactions.
+ *----------
*/
int64 cnt; /* number of successful transactions, not
* including 'skipped' */
int64 skipped; /* number of transactions skipped under --rate
* and --latency-limit */
- int64 retries; /* number of retries after a serialization or a
- * deadlock error in all the transactions */
- int64 retried; /* number of all transactions that were retried
- * after a serialization or a deadlock error
- * (perhaps the last try was unsuccessful) */
- int64 serialization_failures; /* number of transactions that were not
- * successfully retried after a
+ int64 retries; /* number of retries after a serialization or
+ * a deadlock error in all the transactions */
+ int64 retried; /* number of all transactions that were
+ * retried after a serialization or a deadlock
+ * error (perhaps the last try was
+ * unsuccessful) */
+ int64 serialization_failures; /* number of transactions that were
+ * not successfully retried after a
* serialization error */
int64 deadlock_failures; /* number of transactions that were not
* successfully retried after a deadlock
* States for failed commands.
*
* If the SQL/meta command fails, in CSTATE_ERROR clean up after an error:
- * - clear the conditional stack;
- * - if we have an unterminated (possibly failed) transaction block, send
- * the rollback command to the server and wait for the result in
- * CSTATE_WAIT_ROLLBACK_RESULT. If something goes wrong with rolling back,
- * go to CSTATE_ABORTED.
+ * (1) clear the conditional stack; (2) if we have an unterminated
+ * (possibly failed) transaction block, send the rollback command to the
+ * server and wait for the result in CSTATE_WAIT_ROLLBACK_RESULT. If
+ * something goes wrong with rolling back, go to CSTATE_ABORTED.
*
- * But if everything is ok we are ready for future transactions: if this is
- * a serialization or deadlock error and we can re-execute the transaction
- * from the very beginning, go to CSTATE_RETRY; otherwise go to
- * CSTATE_FAILURE.
+ * But if everything is ok we are ready for future transactions: if this
+ * is a serialization or deadlock error and we can re-execute the
+ * transaction from the very beginning, go to CSTATE_RETRY; otherwise go
+ * to CSTATE_FAILURE.
*
* In CSTATE_RETRY report an error, set the same parameters for the
* transaction execution as in the previous tries and process the first
int command; /* command number in script */
/* client variables */
- Variables variables;
+ Variables variables;
/* various times about current transaction in microseconds */
pg_time_usec_t txn_scheduled; /* scheduled start time of transaction */
bool prepared[MAX_SCRIPTS]; /* whether client prepared the script */
/*
- * For processing failures and repeating transactions with serialization or
- * deadlock errors:
+ &nb