# Read list of tablespace OIDs. There should be just one.
# Take a full backup.
# Now make some database changes.
int threshold_version;
/* A function pointer for determining if the check applies */
DataTypesUsageVersionCheck version_hook;
-} DataTypesUsageChecks;
+} DataTypesUsageChecks;
/*
* Special values for threshold_version for indicating that a check applies to
*/
{
.status = gettext_noop("Checking for system-defined composite types in user tables"),
- .report_filename = "tables_using_composite.txt",
- .base_query =
- "SELECT t.oid FROM pg_catalog.pg_type t "
- "LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid "
- " WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')",
- .report_text =
- gettext_noop("Your installation contains system-defined composite types in user tables.\n"
- "These type OIDs are not stable across PostgreSQL versions,\n"
- "so this cluster cannot currently be upgraded. You can drop the\n"
- "problem columns and restart the upgrade.\n"),
- .threshold_version = ALL_VERSIONS
+ .report_filename = "tables_using_composite.txt",
+ .base_query =
+ "SELECT t.oid FROM pg_catalog.pg_type t "
+ "LEFT JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid "
+ " WHERE typtype = 'c' AND (t.oid < 16384 OR nspname = 'information_schema')",
+ .report_text =
+ gettext_noop("Your installation contains system-defined composite types in user tables.\n"
+ "These type OIDs are not stable across PostgreSQL versions,\n"
+ "so this cluster cannot currently be upgraded. You can drop the\n"
+ "problem columns and restart the upgrade.\n"),
+ .threshold_version = ALL_VERSIONS
},
/*
*/
{
.status = gettext_noop("Checking for incompatible \"line\" data type"),
- .report_filename = "tables_using_line.txt",
- .base_query =
- "SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid",
- .report_text =
- gettext_noop("Your installation contains the \"line\" data type in user tables.\n"
- "This data type changed its internal and input/output format\n"
- "between your old and new versions so this\n"
- "cluster cannot currently be upgraded. You can\n"
- "drop the problem columns and restart the upgrade.\n"),
- .threshold_version = 903
+ .report_filename = "tables_using_line.txt",
+ .base_query =
+ "SELECT 'pg_catalog.line'::pg_catalog.regtype AS oid",
+ .report_text =
+ gettext_noop("Your installation contains the \"line\" data type in user tables.\n"
+ "This data type changed its internal and input/output format\n"
+ "between your old and new versions so this\n"
+ "cluster cannot currently be upgraded. You can\n"
+ "drop the problem columns and restart the upgrade.\n"),
+ .threshold_version = 903
},
/*
*/
{
.status = gettext_noop("Checking for reg* data types in user tables"),
- .report_filename = "tables_using_reg.txt",
+ .report_filename = "tables_using_reg.txt",
/*
* Note: older servers will not have all of these reg* types, so we
* have to write the query like this rather than depending on casts to
* regtype.
*/
- .base_query =
- "SELECT oid FROM pg_catalog.pg_type t "
- "WHERE t.typnamespace = "
- " (SELECT oid FROM pg_catalog.pg_namespace "
- " WHERE nspname = 'pg_catalog') "
- " AND t.typname IN ( "
+ .base_query =
+ "SELECT oid FROM pg_catalog.pg_type t "
+ "WHERE t.typnamespace = "
+ " (SELECT oid FROM pg_catalog.pg_namespace "
+ " WHERE nspname = 'pg_catalog') "
+ " AND t.typname IN ( "
/* pg_class.oid is preserved, so 'regclass' is OK */
- " 'regcollation', "
- " 'regconfig', "
- " 'regdictionary', "
- " 'regnamespace', "
- " 'regoper', "
- " 'regoperator', "
- " 'regproc', "
- " 'regprocedure' "
+ " 'regcollation', "
+ " 'regconfig', "
+ " 'regdictionary', "
+ " 'regnamespace', "
+ " 'regoper', "
+ " 'regoperator', "
+ " 'regproc', "
+ " 'regprocedure' "
/* pg_authid.oid is preserved, so 'regrole' is OK */
/* pg_type.oid is (mostly) preserved, so 'regtype' is OK */
- " )",
- .report_text =
- gettext_noop("Your installation contains one of the reg* data types in user tables.\n"
- "These data types reference system OIDs that are not preserved by\n"
- "pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
- "drop the problem columns and restart the upgrade.\n"),
- .threshold_version = ALL_VERSIONS
+ " )",
+ .report_text =
+ gettext_noop("Your installation contains one of the reg* data types in user tables.\n"
+ "These data types reference system OIDs that are not preserved by\n"
+ "pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
+ "drop the problem columns and restart the upgrade.\n"),
+ .threshold_version = ALL_VERSIONS
},
/*
*/
{
.status = gettext_noop("Checking for incompatible \"aclitem\" data type"),
- .report_filename = "tables_using_aclitem.txt",
- .base_query =
- "SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid",
- .report_text =
- gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n"
- "The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
- "so this cluster cannot currently be upgraded. You can drop the\n"
- "problem columns and restart the upgrade.\n"),
- .threshold_version = 1500
+ .report_filename = "tables_using_aclitem.txt",
+ .base_query =
+ "SELECT 'pg_catalog.aclitem'::pg_catalog.regtype AS oid",
+ .report_text =
+ gettext_noop("Your installation contains the \"aclitem\" data type in user tables.\n"
+ "The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
+ "so this cluster cannot currently be upgraded. You can drop the\n"
+ "problem columns and restart the upgrade.\n"),
+ .threshold_version = 1500
},
/*
*/
{
.status = gettext_noop("Checking for invalid \"unknown\" user columns"),
- .report_filename = "tables_using_unknown.txt",
- .base_query =
- "SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid",
- .report_text =
- gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n"
- "This data type is no longer allowed in tables, so this cluster\n"
- "cannot currently be upgraded. You can drop the problem columns\n"
- "and restart the upgrade.\n"),
- .threshold_version = 906
+ .report_filename = "tables_using_unknown.txt",
+ .base_query =
+ "SELECT 'pg_catalog.unknown'::pg_catalog.regtype AS oid",
+ .report_text =
+ gettext_noop("Your installation contains the \"unknown\" data type in user tables.\n"
+ "This data type is no longer allowed in tables, so this cluster\n"
+ "cannot currently be upgraded. You can drop the problem columns\n"
+ "and restart the upgrade.\n"),
+ .threshold_version = 906
},
/*
*/
{
.status = gettext_noop("Checking for invalid \"sql_identifier\" user columns"),
- .report_filename = "tables_using_sql_identifier.txt",
- .base_query =
- "SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid",
- .report_text =
- gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n"
- "The on-disk format for this data type has changed, so this\n"
- "cluster cannot currently be upgraded. You can drop the problem\n"
- "columns and restart the upgrade.\n"),
- .threshold_version = 1100
+ .report_filename = "tables_using_sql_identifier.txt",
+ .base_query =
+ "SELECT 'information_schema.sql_identifier'::pg_catalog.regtype AS oid",
+ .report_text =
+ gettext_noop("Your installation contains the \"sql_identifier\" data type in user tables.\n"
+ "The on-disk format for this data type has changed, so this\n"
+ "cluster cannot currently be upgraded. You can drop the problem\n"
+ "columns and restart the upgrade.\n"),
+ .threshold_version = 1100
},
/*
*/
{
.status = gettext_noop("Checking for incompatible \"jsonb\" data type in user tables"),
- .report_filename = "tables_using_jsonb.txt",
- .base_query =
- "SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid",
- .report_text =
- gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n"
- "The internal format of \"jsonb\" changed during 9.4 beta so this\n"
- "cluster cannot currently be upgraded. You can drop the problem \n"
- "columns and restart the upgrade.\n"),
- .threshold_version = MANUAL_CHECK,
- .version_hook = jsonb_9_4_check_applicable
+ .report_filename = "tables_using_jsonb.txt",
+ .base_query =
+ "SELECT 'pg_catalog.jsonb'::pg_catalog.regtype AS oid",
+ .report_text =
+ gettext_noop("Your installation contains the \"jsonb\" data type in user tables.\n"
+ "The internal format of \"jsonb\" changed during 9.4 beta so this\n"
+ "cluster cannot currently be upgraded. You can drop the problem \n"
+ "columns and restart the upgrade.\n"),
+ .threshold_version = MANUAL_CHECK,
+ .version_hook = jsonb_9_4_check_applicable
},
/*
*/
{
.status = gettext_noop("Checking for removed \"abstime\" data type in user tables"),
- .report_filename = "tables_using_abstime.txt",
- .base_query =
- "SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid",
- .report_text =
- gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n"
- "The \"abstime\" type has been removed in PostgreSQL version 12,\n"
- "so this cluster cannot currently be upgraded. You can drop the\n"
- "problem columns, or change them to another data type, and restart\n"
- "the upgrade.\n"),
- .threshold_version = 1100
+ .report_filename = "tables_using_abstime.txt",
+ .base_query =
+ "SELECT 'pg_catalog.abstime'::pg_catalog.regtype AS oid",
+ .report_text =
+ gettext_noop("Your installation contains the \"abstime\" data type in user tables.\n"
+ "The \"abstime\" type has been removed in PostgreSQL version 12,\n"
+ "so this cluster cannot currently be upgraded. You can drop the\n"
+ "problem columns, or change them to another data type, and restart\n"
+ "the upgrade.\n"),
+ .threshold_version = 1100
},
{
.status = gettext_noop("Checking for removed \"reltime\" data type in user tables"),
- .report_filename = "tables_using_reltime.txt",
- .base_query =
- "SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid",
- .report_text =
- gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n"
- "The \"reltime\" type has been removed in PostgreSQL version 12,\n"
- "so this cluster cannot currently be upgraded. You can drop the\n"
- "problem columns, or change them to another data type, and restart\n"
- "the upgrade.\n"),
- .threshold_version = 1100
+ .report_filename = "tables_using_reltime.txt",
+ .base_query =
+ "SELECT 'pg_catalog.reltime'::pg_catalog.regtype AS oid",
+ .report_text =
+ gettext_noop("Your installation contains the \"reltime\" data type in user tables.\n"
+ "The \"reltime\" type has been removed in PostgreSQL version 12,\n"
+ "so this cluster cannot currently be upgraded. You can drop the\n"
+ "problem columns, or change them to another data type, and restart\n"
+ "the upgrade.\n"),
+ .threshold_version = 1100
},
{
.status = gettext_noop("Checking for removed \"tinterval\" data type in user tables"),
- .report_filename = "tables_using_tinterval.txt",
- .base_query =
- "SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid",
- .report_text =
- gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n"
- "The \"tinterval\" type has been removed in PostgreSQL version 12,\n"
- "so this cluster cannot currently be upgraded. You can drop the\n"
- "problem columns, or change them to another data type, and restart\n"
- "the upgrade.\n"),
- .threshold_version = 1100
+ .report_filename = "tables_using_tinterval.txt",
+ .base_query =
+ "SELECT 'pg_catalog.tinterval'::pg_catalog.regtype AS oid",
+ .report_text =
+ gettext_noop("Your installation contains the \"tinterval\" data type in user tables.\n"
+ "The \"tinterval\" type has been removed in PostgreSQL version 12,\n"
+ "so this cluster cannot currently be upgraded. You can drop the\n"
+ "problem columns, or change them to another data type, and restart\n"
+ "the upgrade.\n"),
+ .threshold_version = 1100
},
/* End of checks marker, must remain last */
* there's no storage involved in a view.
*/
static void
-check_for_data_types_usage(ClusterInfo *cluster, DataTypesUsageChecks * checks)
+check_for_data_types_usage(ClusterInfo *cluster, DataTypesUsageChecks *checks)
{
bool found = false;
bool *results;
my $tempdir = PostgreSQL::Test::Utils::tempdir;
-test_bad_manifest('input string ended unexpectedly',
+test_bad_manifest(
+ 'input string ended unexpectedly',
qr/could not parse backup manifest: The input string ended unexpectedly/,
program_options_handling_ok('pg_waldump');
# wrong number of arguments
-command_fails_like([ 'pg_waldump', ], qr/error: no arguments/, 'no arguments');
-command_fails_like([ 'pg_waldump', 'foo', 'bar', 'baz' ], qr/error: too many command-line arguments/, 'too many arguments');
+command_fails_like([ 'pg_waldump', ], qr/error: no arguments/,
+ 'no arguments');
+command_fails_like(
+ [ 'pg_waldump', 'foo', 'bar', 'baz' ],
+ qr/error: too many command-line arguments/,
+ 'too many arguments');
# invalid option arguments
-command_fails_like([ 'pg_waldump', '--block', 'bad' ], qr/error: invalid block number/, 'invalid block number');
-command_fails_like([ 'pg_waldump', '--fork', 'bad' ], qr/error: invalid fork name/, 'invalid fork name');
-command_fails_like([ 'pg_waldump', '--limit', 'bad' ], qr/error: invalid value/, 'invalid limit');
-command_fails_like([ 'pg_waldump', '--relation', 'bad' ], qr/error: invalid relation/, 'invalid relation specification');
-command_fails_like([ 'pg_waldump', '--rmgr', 'bad' ], qr/error: resource manager .* does not exist/, 'invalid rmgr name');
-command_fails_like([ 'pg_waldump', '--start', 'bad' ], qr/error: invalid WAL location/, 'invalid start LSN');
-command_fails_like([ 'pg_waldump', '--end', 'bad' ], qr/error: invalid WAL location/, 'invalid end LSN');
+command_fails_like(
+ [ 'pg_waldump', '--block', 'bad' ],
+ qr/error: invalid block number/,
+ 'invalid block number');
+command_fails_like(
+ [ 'pg_waldump', '--fork', 'bad' ],
+ qr/error: invalid fork name/,
+ 'invalid fork name');
+command_fails_like(
+ [ 'pg_waldump', '--limit', 'bad' ],
+ qr/error: invalid value/,
+ 'invalid limit');
+command_fails_like(
+ [ 'pg_waldump', '--relation', 'bad' ],
+ qr/error: invalid relation/,
+ 'invalid relation specification');
+command_fails_like(
+ [ 'pg_waldump', '--rmgr', 'bad' ],
+ qr/error: resource manager .* does not exist/,
+ 'invalid rmgr name');
+command_fails_like(
+ [ 'pg_waldump', '--start', 'bad' ],
+ qr/error: invalid WAL location/,
+ 'invalid start LSN');
+command_fails_like(
+ [ 'pg_waldump', '--end', 'bad' ],
+ qr/error: invalid WAL location/,
+ 'invalid end LSN');
# rmgr list: If you add one to the list, consider also adding a test
# case exercising the new rmgr below.
-command_like([ 'pg_waldump', '--rmgr=list'], qr/^XLOG
+command_like(
+ [ 'pg_waldump', '--rmgr=list' ], qr/^XLOG
Transaction
Storage
CLOG
my $node = PostgreSQL::Test::Cluster->new('main');
$node->init;
-$node->append_conf('postgresql.conf', q{
+$node->append_conf(
+ 'postgresql.conf', q{
autovacuum = off
checkpoint_timeout = 1h
});
$node->start;
-my ($start_lsn, $start_walfile) = split /\|/, $node->safe_psql('postgres', q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())});
+my ($start_lsn, $start_walfile) = split /\|/,
+ $node->safe_psql('postgres',
+ q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())}
+ );
-$node->safe_psql('postgres', q{
+$node->safe_psql(
+ 'postgres', q{
-- heap, btree, hash, sequence
CREATE TABLE t1 (a int GENERATED ALWAYS AS IDENTITY, b text);
CREATE INDEX i1a ON t1 USING btree (a);
my $tblspc_path = PostgreSQL::Test::Utils::tempdir_short();
-$node->safe_psql('postgres', qq{
+$node->safe_psql(
+ 'postgres', qq{
CREATE TABLESPACE ts1 LOCATION '$tblspc_path';
DROP TABLESPACE ts1;
});
-my ($end_lsn, $end_walfile) = split /\|/, $node->safe_psql('postgres', q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())});
+my ($end_lsn, $end_walfile) = split /\|/,
+ $node->safe_psql('postgres',
+ q{SELECT pg_current_wal_insert_lsn(), pg_walfile_name(pg_current_wal_insert_lsn())}
+ );
-my $default_ts_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_tablespace WHERE spcname = 'pg_default'});
-my $postgres_db_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_database WHERE datname = 'postgres'});
-my $rel_t1_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_class WHERE relname = 't1'});
-my $rel_i1a_oid = $node->safe_psql('postgres', q{SELECT oid FROM pg_class WHERE relname = 'i1a'});
+my $default_ts_oid = $node->safe_psql('postgres',
+ q{SELECT oid FROM pg_tablespace WHERE spcname = 'pg_default'});
+my $postgres_db_oid = $node->safe_psql('postgres',
+ q{SELECT oid FROM pg_database WHERE datname = 'postgres'});
+my $rel_t1_oid = $node->safe_psql('postgres',
+ q{SELECT oid FROM pg_class WHERE relname = 't1'});
+my $rel_i1a_oid = $node->safe_psql('postgres',
+ q{SELECT oid FROM pg_class WHERE relname = 'i1a'});
$node->stop;
# various ways of specifying WAL range
-command_fails_like([ 'pg_waldump', 'foo', 'bar' ], qr/error: could not locate WAL file "foo"/, 'start file not found');
-command_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile ], qr/./, 'runs with start segment specified');
-command_fails_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile, 'bar' ], qr/error: could not open file "bar"/, 'end file not found');
-command_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile, $node->data_dir . '/pg_wal/' . $end_walfile ], qr/./, 'runs with start and end segment specified');
-command_fails_like([ 'pg_waldump', '-p', $node->data_dir ], qr/error: no start WAL location given/, 'path option requires start location');
-command_like([ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end', $end_lsn ], qr/./, 'runs with path option and start and end locations');
-command_fails_like([ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn ], qr/error: error in WAL record at/, 'falling off the end of the WAL results in an error');
-
-command_like([ 'pg_waldump', '--quiet', $node->data_dir . '/pg_wal/' . $start_walfile ], qr/^$/, 'no output with --quiet option');
-command_fails_like([ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start', $start_lsn ], qr/error: error in WAL record at/, 'errors are shown with --quiet');
+command_fails_like(
+ [ 'pg_waldump', 'foo', 'bar' ],
+ qr/error: could not locate WAL file "foo"/,
+ 'start file not found');
+command_like([ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile ],
+ qr/./, 'runs with start segment specified');
+command_fails_like(
+ [ 'pg_waldump', $node->data_dir . '/pg_wal/' . $start_walfile, 'bar' ],
+ qr/error: could not open file "bar"/,
+ 'end file not found');
+command_like(
+ [
+ 'pg_waldump',
+ $node->data_dir . '/pg_wal/' . $start_walfile,
+ $node->data_dir . '/pg_wal/' . $end_walfile
+ ],
+ qr/./,
+ 'runs with start and end segment specified');
+command_fails_like(
+ [ 'pg_waldump', '-p', $node->data_dir ],
+ qr/error: no start WAL location given/,
+ 'path option requires start location');
+command_like(
+ [
+ 'pg_waldump', '-p', $node->data_dir, '--start',
+ $start_lsn, '--end', $end_lsn
+ ],
+ qr/./,
+ 'runs with path option and start and end locations');
+command_fails_like(
+ [ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn ],
+ qr/error: error in WAL record at/,
+ 'falling off the end of the WAL results in an error');
+
+command_like(
+ [
+ 'pg_waldump', '--quiet',
+ $node->data_dir . '/pg_wal/' . $start_walfile
+ ],
+ qr/^$/,
+ 'no output with --quiet option');
+command_fails_like(
+ [ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start', $start_lsn ],
+ qr/error: error in WAL record at/,
+ 'errors are shown with --quiet');
# Test for: Display a message that we're skipping data if `from`
my (@cmd, $stdout, $stderr, $result);
- @cmd = ( 'pg_waldump', '--start', $new_start, $node->data_dir . '/pg_wal/' . $start_walfile );
+ @cmd = (
+ 'pg_waldump', '--start', $new_start,
+ $node->data_dir . '/pg_wal/' . $start_walfile);
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
ok($result, "runs with start segment and start LSN specified");
like($stderr, qr/first record is after/, 'info message printed');
my (@cmd, $stdout, $stderr, $result, @lines);
- @cmd = ('pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end', $end_lsn);
+ @cmd = (
+ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end',
+ $end_lsn);
push @cmd, @opts;
$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
ok($result, "pg_waldump @opts: runs ok");
@lines = test_pg_waldump('--fork', 'init');
is(grep(!/fork init/, @lines), 0, 'only init fork lines');
-@lines = test_pg_waldump('--relation', "$default_ts_oid/$postgres_db_oid/$rel_t1_oid");
-is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines), 0, 'only lines for selected relation');
+@lines = test_pg_waldump('--relation',
+ "$default_ts_oid/$postgres_db_oid/$rel_t1_oid");
+is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines),
+ 0, 'only lines for selected relation');
-@lines = test_pg_waldump('--relation', "$default_ts_oid/$postgres_db_oid/$rel_i1a_oid", '--block', 1);
+@lines =
+ test_pg_waldump('--relation',
+ "$default_ts_oid/$postgres_db_oid/$rel_i1a_oid",
+ '--block', 1);
is(grep(!/\bblk 1\b/, @lines), 0, 'only lines for selected block');
WHERE end_lsn > '$summarized_lsn'
EOM
my @lines = split(/\n/, $details);
-is(0+@lines, 1, "got exactly one new WAL summary");
+is(0 + @lines, 1, "got exactly one new WAL summary");
my ($tli, $start_lsn, $end_lsn) = split(/\|/, $lines[0]);
note("examining summary for TLI $tli from $start_lsn to $end_lsn");
# Reconstruct the full pathname for the WAL summary file.
my $filename = sprintf "%s/pg_wal/summaries/%08s%08s%08s%08s%08s.summary",
- $node1->data_dir, $tli,
- split(m@/@, $start_lsn),
- split(m@/@, $end_lsn);
+ $node1->data_dir, $tli,
+ split(m@/@, $start_lsn),
+ split(m@/@, $end_lsn);
ok(-f $filename, "WAL summary file exists");
# Run pg_walsummary on it. We expect exactly two blocks to be modified,
@lines = split(/\n/, $stdout);
like($stdout, qr/FORK main: block 0$/m, "stdout shows block 0 modified");
is($stderr, '', 'stderr is empty');
-is(0+@lines, 2, "UPDATE modified 2 blocks");
+is(0 + @lines, 2, "UPDATE modified 2 blocks");
done_testing();
# Test --exit-on-abort
$node->safe_psql('postgres',
- 'CREATE TABLE counter(i int); '.
- 'INSERT INTO counter VALUES (0);'
-);
+ 'CREATE TABLE counter(i int); ' . 'INSERT INTO counter VALUES (0);');
$node->pgbench(
'-t 10 -c 2 -j 2 --exit-on-abort',
2,
[],
- [
- qr{division by zero},
- qr{Run was aborted due to an error in thread}
- ],
+ [ qr{division by zero}, qr{Run was aborted due to an error in thread} ],
'test --exit-on-abort',
{
'001_exit_on_abort' => q{
psql_like(
$node,
- sprintf(q{with x as (
+ sprintf(
+ q{with x as (
select now()-backend_start AS howlong
from pg_stat_activity
where pid = pg_backend_pid()
my $c1 = slurp_file($g_file);
like($c1, qr/one/);
-psql_like($node, "SELECT 'two' \\; SELECT 'three' \\g | $pipe_cmd", qr//, "two commands \\g");
+psql_like($node, "SELECT 'two' \\; SELECT 'three' \\g | $pipe_cmd",
+ qr//, "two commands \\g");
my $c2 = slurp_file($g_file);
like($c2, qr/two.*three/s);
-psql_like($node, "\\set SHOW_ALL_RESULTS 0\nSELECT 'four' \\; SELECT 'five' \\g | $pipe_cmd", qr//,
- "two commands \\g with only last result");
+psql_like(
+ $node,
+ "\\set SHOW_ALL_RESULTS 0\nSELECT 'four' \\; SELECT 'five' \\g | $pipe_cmd",
+ qr//,
+ "two commands \\g with only last result");
my $c3 = slurp_file($g_file);
like($c3, qr/five/);
unlike($c3, qr/four/);
psql_like($node, "copy (values ('foo'),('bar')) to stdout \\g | $pipe_cmd",
- qr//,
- "copy output passed to \\g pipe");
+ qr//, "copy output passed to \\g pipe");
my $c4 = slurp_file($g_file);
like($c4, qr/foo.*bar/s);
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
$node->command_ok([ 'clusterdb', '-a' ],
- 'invalid database not targeted by clusterdb -a');
+ 'invalid database not targeted by clusterdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 010_clusterdb.pl as well.
-$node->command_fails_like([ 'clusterdb', '-d', 'regression_invalid'],
- qr/FATAL: cannot connect to invalid database "regression_invalid"/,
- 'clusterdb cannot target invalid database');
+$node->command_fails_like(
+ [ 'clusterdb', '-d', 'regression_invalid' ],
+ qr/FATAL: cannot connect to invalid database "regression_invalid"/,
+ 'clusterdb cannot target invalid database');
$node->safe_psql('postgres',
'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x'
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
$node->command_ok([ 'dropdb', 'regression_invalid' ],
- 'invalid database can be dropped');
+ 'invalid database can be dropped');
done_testing();
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
$node->command_ok([ 'reindexdb', '-a' ],
- 'invalid database not targeted by reindexdb -a');
+ 'invalid database not targeted by reindexdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 090_reindexdb.pl as well.
-$node->command_fails_like([ 'reindexdb', '-d', 'regression_invalid'],
- qr/FATAL: cannot connect to invalid database "regression_invalid"/,
- 'reindexdb cannot target invalid database');
+$node->command_fails_like(
+ [ 'reindexdb', '-d', 'regression_invalid' ],
+ qr/FATAL: cannot connect to invalid database "regression_invalid"/,
+ 'reindexdb cannot target invalid database');
done_testing();
qr/^(?!.*VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar).*$/s,
'vacuumdb --exclude-schema');
$node->issues_sql_like(
- [ 'vacuumdb', '--exclude-schema', '"Foo"', '--exclude-schema', '"Bar"', 'postgres' ],
+ [
+ 'vacuumdb', '--exclude-schema', '"Foo"', '--exclude-schema',
+ '"Bar"', 'postgres'
+ ],
qr/^(?!.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar
| VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz).*$/sx,
'vacuumdb multiple --exclude-schema switches');
UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
));
$node->command_ok([ 'vacuumdb', '-a' ],
- 'invalid database not targeted by vacuumdb -a');
+ 'invalid database not targeted by vacuumdb -a');
# Doesn't quite belong here, but don't want to waste time by creating an
# invalid database in 010_vacuumdb.pl as well.
-$node->command_fails_like([ 'vacuumdb', '-d', 'regression_invalid'],
- qr/FATAL: cannot connect to invalid database "regression_invalid"/,
- 'vacuumdb cannot target invalid database');
+$node->command_fails_like(
+ [ 'vacuumdb', '-d', 'regression_invalid' ],
+ qr/FATAL: cannot connect to invalid database "regression_invalid"/,
+ 'vacuumdb cannot target invalid database');
done_testing();
{
size_t len;
char *prod;
-} td_entry;
+} td_entry;
#define TD_ENTRY(PROD) { sizeof(PROD) - 1, (PROD) }
{
/* JSON */
[OFS(JSON_NT_JSON)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_SCALAR_STRING),
- [OFS(JSON_NT_JSON)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_SCALAR_NUMBER),
- [OFS(JSON_NT_JSON)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_SCALAR_TRUE),
- [OFS(JSON_NT_JSON)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_SCALAR_FALSE),
- [OFS(JSON_NT_JSON)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_SCALAR_NULL),
- [OFS(JSON_NT_JSON)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY),
- [OFS(JSON_NT_JSON)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_OBJECT),
+ [OFS(JSON_NT_JSON)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_SCALAR_NUMBER),
+ [OFS(JSON_NT_JSON)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_SCALAR_TRUE),
+ [OFS(JSON_NT_JSON)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_SCALAR_FALSE),
+ [OFS(JSON_NT_JSON)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_SCALAR_NULL),
+ [OFS(JSON_NT_JSON)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY),
+ [OFS(JSON_NT_JSON)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_OBJECT),
/* ARRAY_ELEMENTS */
- [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
- [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
- [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
- [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
- [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
- [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
- [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
- [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
+ [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
+ [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
+ [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
+ [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
+ [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
+ [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
+ [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
+ [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
/* MORE_ARRAY_ELEMENTS */
- [OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_ARRAY_ELEMENTS),
- [OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
+ [OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_ARRAY_ELEMENTS),
+ [OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
/* KEY_PAIRS */
- [OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_KEY_PAIRS),
- [OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
+ [OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_KEY_PAIRS),
+ [OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
/* MORE_KEY_PAIRS */
- [OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_KEY_PAIRS),
- [OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
+ [OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_KEY_PAIRS),
+ [OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
};
/* the GOAL production. Not stored in the table, but will be the initial contents of the prediction stack */
pg_wchar
unicode_lowercase_simple(pg_wchar code)
{
- const pg_case_map *map = find_case_map(code);
+ const pg_case_map *map = find_case_map(code);
return map ? map->simplemap[CaseLower] : code;
}
pg_wchar
unicode_titlecase_simple(pg_wchar code)
{
- const pg_case_map *map = find_case_map(code);
+ const pg_case_map *map = find_case_map(code);
return map ? map->simplemap[CaseTitle] : code;
}
pg_wchar
unicode_uppercase_simple(pg_wchar code)
{
- const pg_case_map *map = find_case_map(code);
+ const pg_case_map *map = find_case_map(code);
return map ? map->simplemap[CaseUpper] : code;
}
{
pg_wchar u1 = utf8_to_unicode((unsigned char *) src + srcoff);
int u1len = unicode_utf8len(u1);
- const pg_case_map *casemap = find_case_map(u1);
+ const pg_case_map *casemap = find_case_map(u1);
if (str_casekind == CaseTitle)
{
Assert(lengthof(case_map) >= 0x80);
if (ucs < 0x80)
{
- const pg_case_map *map = &case_map[ucs];
+ const pg_case_map *map = &case_map[ucs];
Assert(map->codepoint == ucs);
return map;
#define PG_U_CHARACTER_TAB 0x09
-static bool range_search(const pg_unicode_range * tbl, size_t size,
+static bool range_search(const pg_unicode_range *tbl, size_t size,
pg_wchar code);
/*
* given table.
*/
static bool
-range_search(const pg_unicode_range * tbl, size_t size, pg_wchar code)
+range_search(const pg_unicode_range *tbl, size_t size, pg_wchar code)
{
int min = 0;
int mid;
aggfinalfn => 'interval_avg', aggcombinefn => 'interval_avg_combine',
aggserialfn => 'interval_avg_serialize',
aggdeserialfn => 'interval_avg_deserialize',
- aggmtransfn => 'interval_avg_accum', aggminvtransfn => 'interval_avg_accum_inv',
- aggmfinalfn => 'interval_avg', aggtranstype => 'internal',
- aggtransspace => '40', aggmtranstype => 'internal', aggmtransspace => '40' },
+ aggmtransfn => 'interval_avg_accum',
+ aggminvtransfn => 'interval_avg_accum_inv', aggmfinalfn => 'interval_avg',
+ aggtranstype => 'internal', aggtransspace => '40',
+ aggmtranstype => 'internal', aggmtransspace => '40' },
# sum
{ aggfnoid => 'sum(int8)', aggtransfn => 'int8_avg_accum',
aggfinalfn => 'interval_sum', aggcombinefn => 'interval_avg_combine',
aggserialfn => 'interval_avg_serialize',
aggdeserialfn => 'interval_avg_deserialize',
- aggmtransfn => 'interval_avg_accum', aggminvtransfn => 'interval_avg_accum_inv',
- aggmfinalfn => 'interval_sum', aggtranstype => 'internal',
- aggtransspace => '40', aggmtranstype => 'internal', aggmtransspace => '40'},
+ aggmtransfn => 'interval_avg_accum',
+ aggminvtransfn => 'interval_avg_accum_inv', aggmfinalfn => 'interval_sum',
+ aggtranstype => 'internal', aggtransspace => '40',
+ aggmtranstype => 'internal', aggmtransspace => '40' },
{ aggfnoid => 'sum(numeric)', aggtransfn => 'numeric_avg_accum',
aggfinalfn => 'numeric_sum', aggcombinefn => 'numeric_avg_combine',
aggserialfn => 'numeric_avg_serialize',
descr => 'sorts using the Unicode Collation Algorithm with default settings',
collname => 'unicode', collprovider => 'i', collencoding => '-1',
colllocale => 'und' },
-{ oid => '811', descr => 'sorts by Unicode code point; Unicode and POSIX character semantics',
+{ oid => '811',
+ descr => 'sorts by Unicode code point; Unicode and POSIX character semantics',
collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6',
colllocale => 'C.UTF-8', collversion => '1' },
descr => 'default template for new databases',
datname => 'template1', encoding => 'ENCODING',
datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't',
- datallowconn => 't', dathasloginevt => 'f', datconnlimit => '-1', datfrozenxid => '0',
- datminmxid => '1', dattablespace => 'pg_default', datcollate => 'LC_COLLATE',
- datctype => 'LC_CTYPE', datlocale => 'DATLOCALE',
+ datallowconn => 't', dathasloginevt => 'f', datconnlimit => '-1',
+ datfrozenxid => '0', datminmxid => '1', dattablespace => 'pg_default',
+ datcollate => 'LC_COLLATE', datctype => 'LC_CTYPE', datlocale => 'DATLOCALE',
daticurules => 'ICU_RULES', datacl => '_null_' },
]
prosrc => 'drandom_normal' },
{ oid => '9719', descr => 'random integer in range',
proname => 'random', provolatile => 'v', proparallel => 'r',
- prorettype => 'int4', proargtypes => 'int4 int4',
- proargnames => '{min,max}', prosrc => 'int4random' },
+ prorettype => 'int4', proargtypes => 'int4 int4', proargnames => '{min,max}',
+ prosrc => 'int4random' },
{ oid => '9720', descr => 'random bigint in range',
proname => 'random', provolatile => 'v', proparallel => 'r',
- prorettype => 'int8', proargtypes => 'int8 int8',
- proargnames => '{min,max}', prosrc => 'int8random' },
+ prorettype => 'int8', proargtypes => 'int8 int8', proargnames => '{min,max}',
+ prosrc => 'int8random' },
{ oid => '9721', descr => 'random numeric in range',
proname => 'random', provolatile => 'v', proparallel => 'r',
prorettype => 'numeric', proargtypes => 'numeric numeric',
prosrc => 'numeric_poly_stddev_samp' },
{ oid => '1843', descr => 'aggregate transition function',
- proname => 'interval_avg_accum', proisstrict => 'f',
- prorettype => 'internal', proargtypes => 'internal interval',
- prosrc => 'interval_avg_accum' },
+ proname => 'interval_avg_accum', proisstrict => 'f', prorettype => 'internal',
+ proargtypes => 'internal interval', prosrc => 'interval_avg_accum' },
{ oid => '3325', descr => 'aggregate combine function',
proname => 'interval_avg_combine', proisstrict => 'f',
prorettype => 'internal', proargtypes => 'internal internal',
prosrc => 'pg_stat_get_checkpointer_restartpoints_timed' },
{ oid => '8744',
descr => 'statistics: number of backend requested restartpoints started by the checkpointer',
- proname => 'pg_stat_get_checkpointer_restartpoints_requested', provolatile => 's',
- proparallel => 'r', prorettype => 'int8', proargtypes => '',
+ proname => 'pg_stat_get_checkpointer_restartpoints_requested',
+ provolatile => 's', proparallel => 'r', prorettype => 'int8',
+ proargtypes => '',
prosrc => 'pg_stat_get_checkpointer_restartpoints_requested' },
{ oid => '8745',
descr => 'statistics: number of backend performed restartpoints',
- proname => 'pg_stat_get_checkpointer_restartpoints_performed', provolatile => 's',
- proparallel => 'r', prorettype => 'int8', proargtypes => '',
+ proname => 'pg_stat_get_checkpointer_restartpoints_performed',
+ provolatile => 's', proparallel => 'r', prorettype => 'int8',
+ proargtypes => '',
prosrc => 'pg_stat_get_checkpointer_restartpoints_performed' },
{ oid => '2771',
descr => 'statistics: number of buffers written during checkpoints and restartpoints',
proname => 'pg_column_compression', provolatile => 's', prorettype => 'text',
proargtypes => 'any', prosrc => 'pg_column_compression' },
{ oid => '8393', descr => 'chunk ID of on-disk TOASTed value',
- proname => 'pg_column_toast_chunk_id', provolatile => 's', prorettype => 'oid',
- proargtypes => 'any', prosrc => 'pg_column_toast_chunk_id' },
+ proname => 'pg_column_toast_chunk_id', provolatile => 's',
+ prorettype => 'oid', proargtypes => 'any',
+ prosrc => 'pg_column_toast_chunk_id' },
{ oid => '2322',
descr => 'total disk space usage for the specified tablespace',
proname => 'pg_tablespace_size', provolatile => 'v', prorettype => 'int8',
proname => 'text', prorettype => 'text', proargtypes => 'xml',
prosrc => 'xmltotext' },
{ oid => '3813', descr => 'generate XML text node',
- proname => 'xmltext', proisstrict => 't', prorettype => 'xml',
- proargtypes => 'text', prosrc => 'xmltext' },
+ proname => 'xmltext', prorettype => 'xml', proargtypes => 'text',
+ prosrc => 'xmltext' },
{ oid => '2923', descr => 'map table contents to XML',
proname => 'table_to_xml', procost => '100', provolatile => 's',
prorettype => 'anyelement', proargtypes => 'anyelement jsonb',
prosrc => 'jsonb_populate_record' },
{ oid => '9558', descr => 'test get record fields from a jsonb object',
- proname => 'jsonb_populate_record_valid', proisstrict => 'f', provolatile => 's',
- prorettype => 'bool', proargtypes => 'anyelement jsonb',
+ proname => 'jsonb_populate_record_valid', proisstrict => 'f',
+ provolatile => 's', prorettype => 'bool', proargtypes => 'anyelement jsonb',
prosrc => 'jsonb_populate_record_valid' },
{ oid => '3475',
descr => 'get set of records with fields from a jsonb array of objects',
proname => 'pg_logical_emit_message', provolatile => 'v', proparallel => 'u',
prorettype => 'pg_lsn', proargtypes => 'bool text bytea bool',
prosrc => 'pg_logical_emit_message_bytea' },
-{ oid => '9929', descr => 'sync replication slots from the primary to the standby',
- proname => 'pg_sync_replication_slots', provolatile => 'v', proparallel => 'u',
- prorettype => 'void', proargtypes => '',
+{ oid => '9929',
+ descr => 'sync replication slots from the primary to the standby',
+ proname => 'pg_sync_replication_slots', provolatile => 'v',
+ proparallel => 'u', prorettype => 'void', proargtypes => '',
prosrc => 'pg_sync_replication_slots' },
# event triggers
proname => 'binary_upgrade_logical_slot_has_caught_up', provolatile => 'v',
proparallel => 'u', prorettype => 'bool', proargtypes => 'name',
prosrc => 'binary_upgrade_logical_slot_has_caught_up' },
-{ oid => '8404', descr => 'for use by pg_upgrade (relation for pg_subscription_rel)',
+{ oid => '8404',
+ descr => 'for use by pg_upgrade (relation for pg_subscription_rel)',
proname => 'binary_upgrade_add_sub_rel_state', proisstrict => 'f',
provolatile => 'v', proparallel => 'u', prorettype => 'void',
proargtypes => 'text oid char pg_lsn',
{ oid => '8405', descr => 'for use by pg_upgrade (remote_lsn for origin)',
proname => 'binary_upgrade_replorigin_advance', proisstrict => 'f',
provolatile => 'v', proparallel => 'u', prorettype => 'void',
- proargtypes => 'text pg_lsn',
- prosrc => 'binary_upgrade_replorigin_advance' },
+ proargtypes => 'text pg_lsn', prosrc => 'binary_upgrade_replorigin_advance' },
# conversion functions
{ oid => '4302',
proname => 'any_value_transfn', prorettype => 'anyelement',
proargtypes => 'anyelement anyelement', prosrc => 'any_value_transfn' },
-{ oid => '8436',
- descr => 'list of available WAL summary files',
- proname => 'pg_available_wal_summaries', prorows => '100',
- proretset => 't', provolatile => 'v', proparallel => 's',
- prorettype => 'record', proargtypes => '',
- proallargtypes => '{int8,pg_lsn,pg_lsn}',
- proargmodes => '{o,o,o}',
+{ oid => '8436', descr => 'list of available WAL summary files',
+ proname => 'pg_available_wal_summaries', prorows => '100', proretset => 't',
+ provolatile => 'v', prorettype => 'record', proargtypes => '',
+ proallargtypes => '{int8,pg_lsn,pg_lsn}', proargmodes => '{o,o,o}',
proargnames => '{tli,start_lsn,end_lsn}',
prosrc => 'pg_available_wal_summaries' },
-{ oid => '8437',
- descr => 'contents of a WAL summary file',
- proname => 'pg_wal_summary_contents', prorows => '100',
- proretset => 't', provolatile => 'v', proparallel => 's',
- prorettype => 'record', proargtypes => 'int8 pg_lsn pg_lsn',
+{ oid => '8437', descr => 'contents of a WAL summary file',
+ proname => 'pg_wal_summary_contents', prorows => '100', proretset => 't',
+ provolatile => 'v', prorettype => 'record',
+ proargtypes => 'int8 pg_lsn pg_lsn',
proallargtypes => '{int8,pg_lsn,pg_lsn,oid,oid,oid,int2,int8,bool}',
proargmodes => '{i,i,i,o,o,o,o,o,o}',
proargnames => '{tli,start_lsn,end_lsn,relfilenode,reltablespace,reldatabase,relforknumber,relblocknumber,is_limit_block}',
prosrc => 'pg_wal_summary_contents' },
-{ oid => '8438',
- descr => 'WAL summarizer state',
- proname => 'pg_get_wal_summarizer_state',
- provolatile => 'v', proparallel => 's',
+{ oid => '8438', descr => 'WAL summarizer state',
+ proname => 'pg_get_wal_summarizer_state', provolatile => 'v',
prorettype => 'record', proargtypes => '',
- proallargtypes => '{int8,pg_lsn,pg_lsn,int4}',
- proargmodes => '{o,o,o,o}',
+ proallargtypes => '{int8,pg_lsn,pg_lsn,int4}', proargmodes => '{o,o,o,o}',
proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}',
prosrc => 'pg_get_wal_summarizer_state' },
# GiST stratnum implementations
{ oid => '8047', descr => 'GiST support',
proname => 'gist_stratnum_identity', prorettype => 'int2',
- proargtypes => 'int2',
- prosrc => 'gist_stratnum_identity' },
+ proargtypes => 'int2', prosrc => 'gist_stratnum_identity' },
]
typname => 'polygon', typlen => '-1', typbyval => 'f', typcategory => 'G',
typinput => 'poly_in', typoutput => 'poly_out', typreceive => 'poly_recv',
typsend => 'poly_send', typalign => 'd', typstorage => 'x' },
-{ oid => '628', array_type_oid => '629', descr => 'geometric line, formats \'{A,B,C}\'/\'[point1,point2]\'',
+{ oid => '628', array_type_oid => '629',
+ descr => 'geometric line, formats \'{A,B,C}\'/\'[point1,point2]\'',
typname => 'line', typlen => '24', typbyval => 'f', typcategory => 'G',
typsubscript => 'raw_array_subscript_handler', typelem => 'float8',
typinput => 'line_in', typoutput => 'line_out', typreceive => 'line_recv',
typoutput => 'tsm_handler_out', typreceive => '-', typsend => '-',
typalign => 'i' },
{ oid => '269',
- typname => 'table_am_handler',
descr => 'pseudo-type for the result of a table AM handler function',
- typlen => '4', typbyval => 't', typtype => 'p',
+ typname => 'table_am_handler', typlen => '4', typbyval => 't', typtype => 'p',
typcategory => 'P', typinput => 'table_am_handler_in',
typoutput => 'table_am_handler_out', typreceive => '-', typsend => '-',
typalign => 'i' },
typoutput => 'brin_bloom_summary_out',
typreceive => 'brin_bloom_summary_recv', typsend => 'brin_bloom_summary_send',
typalign => 'i', typstorage => 'x', typcollation => 'default' },
-{ oid => '4601', descr => 'pseudo-type representing BRIN minmax-multi summary',
+{ oid => '4601',
+ descr => 'pseudo-type representing BRIN minmax-multi summary',
typname => 'pg_brin_minmax_multi_summary', typlen => '-1', typbyval => 'f',
typcategory => 'Z', typinput => 'brin_minmax_multi_summary_in',
typoutput => 'brin_minmax_multi_summary_out',
CaseTitle = 1,
CaseUpper = 2,
NCaseKind
-} CaseKind;
+} CaseKind;
typedef struct
{
pg_wchar codepoint; /* Unicode codepoint */
pg_wchar simplemap[NCaseKind];
-} pg_case_map;
+} pg_case_map;
/*
* Case mapping table. Dense for codepoints < 0x80 (enabling fast lookup),
uint32 first; /* Unicode codepoint */
uint32 last; /* Unicode codepoint */
uint8 category; /* General Category */
-} pg_category_range;
+} pg_category_range;
typedef struct
{
uint32 first; /* Unicode codepoint */
uint32 last; /* Unicode codepoint */
-} pg_unicode_range;
+} pg_unicode_range;
typedef struct
{
uint8 category;
uint8 properties;
-} pg_unicode_properties;
+} pg_unicode_properties;
/*
* The properties currently used, in no particular order. Fits in a uint8, but
# Only run the GSSAPI tests when compiled with GSSAPI support and
# PG_TEST_EXTRA includes 'kerberos'
my $gss_supported = $ENV{with_gssapi} eq 'yes';
-my $kerberos_enabled = $ENV{PG_TEST_EXTRA} && $ENV{PG_TEST_EXTRA} =~ /\bkerberos\b/;
+my $kerberos_enabled =
+ $ENV{PG_TEST_EXTRA} && $ENV{PG_TEST_EXTRA} =~ /\bkerberos\b/;
my $ssl_supported = $ENV{with_ssl} eq 'openssl';
###
my $realm = 'EXAMPLE.COM';
$krb = PostgreSQL::Test::Kerberos->new($host, $hostaddr, $realm);
- $node->append_conf('postgresql.conf', "krb_server_keyfile = '$krb->{keytab}'\n");
+ $node->append_conf('postgresql.conf',
+ "krb_server_keyfile = '$krb->{keytab}'\n");
}
if ($ssl_supported != 0)
# Helper function that returns the encryption method in use in the
# connection.
-$node->safe_psql('postgres', q{
+$node->safe_psql(
+ 'postgres', q{
CREATE FUNCTION current_enc() RETURNS text LANGUAGE plpgsql AS $$
DECLARE
ssl_in_use bool;
# Ok, all prepared. Run the tests.
-my @all_test_users = ('testuser', 'ssluser', 'nossluser', 'gssuser', 'nogssuser');
+my @all_test_users =
+ ('testuser', 'ssluser', 'nossluser', 'gssuser', 'nogssuser');
my @all_gssencmodes = ('disable', 'prefer', 'require');
my @all_sslmodes = ('disable', 'allow', 'prefer', 'require');
my @all_sslnegotiations = ('postgres', 'direct', 'requiredirect');
### Run tests with GSS and SSL disabled in the server
###
my $test_table;
-if ($ssl_supported) {
+if ($ssl_supported)
+{
$test_table = q{
# USER GSSENCMODE SSLMODE SSLNEGOTIATION EVENTS -> OUTCOME
testuser disable disable * connect, authok -> plain
. . . direct connect, directsslreject, reconnect, sslreject -> fail
. . . requiredirect connect, directsslreject -> fail
};
-} else {
+}
+else
+{
# Compiled without SSL support
$test_table = q{
# USER GSSENCMODE SSLMODE SSLNEGOTIATION EVENTS -> OUTCOME
note("Running tests with SSL and GSS disabled in the server");
test_matrix($node, $server_config,
- ['testuser'], \@all_gssencmodes, \@all_sslmodes, \@all_sslnegotiations,
- parse_table($test_table));
+ ['testuser'], \@all_gssencmodes, \@all_sslmodes, \@all_sslnegotiations,
+ parse_table($test_table));
###
$server_config->{server_ssl} = 1;
note("Running tests with SSL enabled in server");
- test_matrix($node, $server_config,
- ['testuser', 'ssluser', 'nossluser'],
- ['disable'], \@all_sslmodes, \@all_sslnegotiations,
- parse_table($test_table));
+ test_matrix(
+ $node, $server_config,
+ [ 'testuser', 'ssluser', 'nossluser' ], ['disable'],
+ \@all_sslmodes, \@all_sslnegotiations,
+ parse_table($test_table));
# Disable SSL again
$node->adjust_conf('postgresql.conf', 'ssl', 'off');
# even connecting to the server. Skip those, because we tested
# them earlier already.
my ($sslmodes, $sslnegotiations);
- if ($ssl_supported != 0) {
- ($sslmodes, $sslnegotiations) = (\@all_sslmodes, \@all_sslnegotiations);
- } else {
+ if ($ssl_supported != 0)
+ {
+ ($sslmodes, $sslnegotiations) =
+ (\@all_sslmodes, \@all_sslnegotiations);
+ }
+ else
+ {
($sslmodes, $sslnegotiations) = (['disable'], ['postgres']);
}
note("Running tests with GSS enabled in server");
- test_matrix($node, $server_config,
- ['testuser', 'gssuser', 'nogssuser'],
- \@all_gssencmodes, $sslmodes, $sslnegotiations,
- parse_table($test_table));
+ test_matrix($node, $server_config, [ 'testuser', 'gssuser', 'nogssuser' ],
+ \@all_gssencmodes, $sslmodes, $sslnegotiations,
+ parse_table($test_table));
}
###
skip "kerberos not enabled in PG_TEST_EXTRA" if $kerberos_enabled == 0;
# Sanity check that GSSAPI is still enabled from previous test.
- connect_test($node, 'user=testuser gssencmode=prefer sslmode=prefer', 'connect, gssaccept, authok -> gss');
+ connect_test(
+ $node,
+ 'user=testuser gssencmode=prefer sslmode=prefer',
+ 'connect, gssaccept, authok -> gss');
# Enable SSL
$node->adjust_conf('postgresql.conf', 'ssl', 'on');
};
note("Running tests with both GSS and SSL enabled in server");
- test_matrix($node, $server_config,
- ['testuser', 'gssuser', 'ssluser', 'nogssuser', 'nossluser'],
- \@all_gssencmodes, \@all_sslmodes, \@all_sslnegotiations,
- parse_table($test_table));
+ test_matrix(
+ $node,
+ $server_config,
+ [ 'testuser', 'gssuser', 'ssluser', 'nogssuser', 'nossluser' ],
+ \@all_gssencmodes,
+ \@all_sslmodes,
+ \@all_sslnegotiations,
+ parse_table($test_table));
}
###
# libpq doesn't attempt SSL or GSSAPI over Unix domain
# sockets. The server would reject them too.
- connect_test($node, "user=localuser gssencmode=prefer sslmode=prefer host=$unixdir", 'connect, authok -> plain');
- connect_test($node, "user=localuser gssencmode=require sslmode=prefer host=$unixdir", '- -> fail');
+ connect_test(
+ $node,
+ "user=localuser gssencmode=prefer sslmode=prefer host=$unixdir",
+ 'connect, authok -> plain');
+ connect_test($node,
+ "user=localuser gssencmode=require sslmode=prefer host=$unixdir",
+ '- -> fail');
}
done_testing();
local $Test::Builder::Level = $Test::Builder::Level + 1;
my ($pg_node, $node_conf,
- $test_users, $gssencmodes, $sslmodes, $sslnegotiations, %expected) = @_;
+ $test_users, $gssencmodes, $sslmodes, $sslnegotiations, %expected)
+ = @_;
foreach my $test_user (@{$test_users})
{
{
$key = "$test_user $gssencmode $client_mode $negotiation";
$expected_events = $expected{$key};
- if (!defined($expected_events)) {
- $expected_events = "";
+ if (!defined($expected_events))
+ {
+ $expected_events =
+ "";
}
- connect_test($pg_node, "user=$test_user gssencmode=$gssencmode sslmode=$client_mode sslnegotiation=$negotiation", $expected_events);
+ connect_test(
+ $pg_node,
+ "user=$test_user gssencmode=$gssencmode sslmode=$client_mode sslnegotiation=$negotiation",
+ $expected_events);
}
}
}
my $connstr_full = "";
$connstr_full .= "dbname=postgres " unless $connstr =~ m/dbname=/;
- $connstr_full .= "host=$host hostaddr=$hostaddr " unless $connstr =~ m/host=/;
+ $connstr_full .= "host=$host hostaddr=$hostaddr "
+ unless $connstr =~ m/host=/;
$connstr_full .= $connstr;
# Get the current size of the logfile before running the test.
my ($ret, $stdout, $stderr) = $node->psql(
'postgres',
'',
- extra_params => ['-w', '-c', 'SELECT current_enc()'],
+ extra_params => [ '-w', '-c', 'SELECT current_enc()' ],
connstr => "$connstr_full",
on_error_stop => 0);
# Check that the events and outcome match the expected events and
# outcome
my $events_and_outcome = join(', ', @events) . " -> $outcome";
- is($events_and_outcome, $expected_events_and_outcome, $test_name) or diag("$stderr");
+ is($events_and_outcome, $expected_events_and_outcome, $test_name)
+ or diag("$stderr");
}
# Parse a test table. See comment at top of the file for the format.
my %expected;
my ($user, $gssencmode, $sslmode, $sslnegotiation);
- foreach my $line (@lines) {
+ foreach my $line (@lines)
+ {
# Trim comments
$line =~ s/#.*$//;
# Ignore empty lines (includes comment-only lines)
next if $line eq '';
- $line =~ m/^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S.*)\s*->\s*(\S+)\s*$/ or die "could not parse line \"$line\"";
+ $line =~ m/^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S.*)\s*->\s*(\S+)\s*$/
+ or die "could not parse line \"$line\"";
$user = $1 unless $1 eq ".";
$gssencmode = $2 unless $2 eq ".";
$sslmode = $3 unless $3 eq ".";
my @events = split /,\s*/, $5;
my $outcome = $6;
my $events_str = join(', ', @events);
- $events_str =~ s/\s+$//; # trim whitespace
+ $events_str =~ s/\s+$//; # trim whitespace
my $events_and_outcome = "$events_str -> $outcome";
- my %expanded = expand_expected_line($user, $gssencmode, $sslmode, $sslnegotiation, $events_and_outcome);
+ my %expanded =
+ expand_expected_line($user, $gssencmode, $sslmode, $sslnegotiation,
+ $events_and_outcome);
%expected = (%expected, %expanded);
}
return %expected;
my ($user, $gssencmode, $sslmode, $sslnegotiation, $expected) = @_;
my %result;
- if ($user eq '*') {
- foreach my $x (@all_test_users) {
- %result = (%result, expand_expected_line($x, $gssencmode, $sslmode, $sslnegotiation, $expected));
+ if ($user eq '*')
+ {
+ foreach my $x (@all_test_users)
+ {
+ %result = (
+ %result,
+ expand_expected_line(
+ $x, $gssencmode, $sslmode, $sslnegotiation, $expected));
}
- } elsif ($gssencmode eq '*') {
- foreach my $x (@all_gssencmodes) {
- %result = (%result, expand_expected_line($user, $x, $sslmode, $sslnegotiation, $expected));
+ }
+ elsif ($gssencmode eq '*')
+ {
+ foreach my $x (@all_gssencmodes)
+ {
+ %result = (
+ %result,
+ expand_expected_line(
+ $user, $x, $sslmode, $sslnegotiation, $expected));
}
- } elsif ($sslmode eq '*') {
- foreach my $x (@all_sslmodes) {
- %result = (%result, expand_expected_line($user, $gssencmode, $x, $sslnegotiation, $expected));
+ }
+ elsif ($sslmode eq '*')
+ {
+ foreach my $x (@all_sslmodes)
+ {
+ %result = (
+ %result,
+ expand_expected_line(
+ $user, $gssencmode, $x, $sslnegotiation, $expected));
}
- } elsif ($sslnegotiation eq '*') {
- foreach my $x (@all_sslnegotiations) {
- %result = (%result, expand_expected_line($user, $gssencmode, $sslmode, $x, $expected));
+ }
+ elsif ($sslnegotiation eq '*')
+ {
+ foreach my $x (@all_sslnegotiations)
+ {
+ %result = (
+ %result,
+ expand_expected_line(
+ $user, $gssencmode, $sslmode, $x, $expected));
}
- } else {
+ }
+ else
+ {
$result{"$user $gssencmode $sslmode $sslnegotiation"} = $expected;
}
return %result;
my @events = ();
my @lines = split /\n/, $log_contents;
- foreach my $line (@lines) {
- push @events, "reconnect" if $line =~ /connection received/ && scalar(@events) > 0;
- push @events, "connect" if $line =~ /connection received/ && scalar(@events) == 0;
+ foreach my $line (@lines)
+ {
+ push @events, "reconnect"
+ if $line =~ /connection received/ && scalar(@events) > 0;
+ push @events, "connect"
+ if $line =~ /connection received/ && scalar(@events) == 0;
push @events, "sslaccept" if $line =~ /SSLRequest accepted/;
push @events, "sslreject" if $line =~ /SSLRequest rejected/;
- push @events, "directsslaccept" if $line =~ /direct SSL connection accepted/;
- push @events, "directsslreject" if $line =~ /direct SSL connection rejected/;
+ push @events, "directsslaccept"
+ if $line =~ /direct SSL connection accepted/;
+ push @events, "directsslreject"
+ if $line =~ /direct SSL connection rejected/;
push @events, "gssaccept" if $line =~ /GSSENCRequest accepted/;
push @events, "gssreject" if $line =~ /GSSENCRequest rejected/;
push @events, "authfail" if $line =~ /no pg_hba.conf entry/;
}
# No events at all is represented by "-"
- if (scalar @events == 0) {
- push @events, "-"
+ if (scalar @events == 0)
+ {
+ push @events, "-";
}
return @events;
const char *final;
int tail_idx;
__mmask64 bmask = ~UINT64CONST(0);
- const __m512i maskv = _mm512_set1_epi8(mask);
+ const __m512i maskv = _mm512_set1_epi8(mask);
/*
* Align buffer down to avoid double load overhead from unaligned access.
# (undumped) extension tables
privileged_internals => {
dump_cmd => [
- 'pg_dump', '--no-sync', "--file=$tempdir/privileged_internals.sql",
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/privileged_internals.sql",
# these two tables are irrelevant to the test case
'--exclude-table=regress_pg_dump_schema.external_tab',
'--exclude-table=regress_pg_dump_schema.extdependtab',
},
exclude_extension => {
dump_cmd => [
- 'pg_dump', '--no-sync', "--file=$tempdir/exclude_extension.sql",
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/exclude_extension.sql",
'--exclude-extension=test_pg_dump', 'postgres',
],
},
exclude_extension_filter => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump',
+ '--no-sync',
"--file=$tempdir/exclude_extension_filter.sql",
- "--filter=$tempdir/exclude_extension_filter.txt", 'postgres',
+ "--filter=$tempdir/exclude_extension_filter.txt",
+ 'postgres',
],
},
* Return the number of keys in the radix tree.
*/
static uint64
-rt_num_entries(rt_radix_tree * tree)
+rt_num_entries(rt_radix_tree *tree)
{
return tree->ctl->num_keys;
}
* false.
*/
for (int i = 0; i < children; i++)
- EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) & keys[i]));
+ EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) &keys[i]));
rt_stats(radixtree);
TestValueType update = keys[i] + 1;
/* rt_set should report the key found */
- EXPECT_TRUE(rt_set(radixtree, keys[i], (TestValueType *) & update));
+ EXPECT_TRUE(rt_set(radixtree, keys[i], (TestValueType *) &update));
}
/* delete and re-insert keys */
for (int i = 0; i < children; i++)
{
EXPECT_TRUE(rt_delete(radixtree, keys[i]));
- EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) & keys[i]));
+ EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) &keys[i]));
}
/* look up keys after deleting and re-inserting */
my $data_path = $self->data_dir;
if (defined $params{combine_with_prior})
{
- my @prior_backups = @{$params{combine_with_prior}};
+ my @prior_backups = @{ $params{combine_with_prior} };
my @prior_backup_path;
for my $prior_backup_name (@prior_backups)
{
push @prior_backup_path,
- $root_node->backup_dir . '/' . $prior_backup_name;
+ $root_node->backup_dir . '/' . $prior_backup_name;
}
local %ENV = $self->_get_env();
my @combineargs = ('pg_combinebackup', '-d');
if (exists $params{tablespace_map})
{
- while (my ($olddir, $newdir) = each %{$params{tablespace_map}})
+ while (my ($olddir, $newdir) = each %{ $params{tablespace_map} })
{
push @combineargs, "-T$olddir=$newdir";
}
# We need to generate a tablespace_map file.
open(my $tsmap, ">", "$data_path/tablespace_map")
- || die "$data_path/tablespace_map: $!";
+ || die "$data_path/tablespace_map: $!";
# Extract tarfiles and add tablespace_map entries
my @tstars = grep { /^\d+.tar/ }
- PostgreSQL::Test::Utils::slurp_dir($backup_path);
+ PostgreSQL::Test::Utils::slurp_dir($backup_path);
for my $tstar (@tstars)
{
my $tsoid = $tstar;
$tsoid =~ s/\.tar$//;
die "no tablespace mapping for $tstar"
- if !exists $params{tablespace_map} ||
- !exists $params{tablespace_map}{$tsoid};
+ if !exists $params{tablespace_map}
+ || !exists $params{tablespace_map}{$tsoid};
my $newdir = $params{tablespace_map}{$tsoid};
mkdir($newdir) || die "mkdir $newdir: $!";
- PostgreSQL::Test::Utils::system_or_bail($params{tar_program}, 'xf',
- $backup_path . '/' . $tstar, '-C', $newdir);
+ PostgreSQL::Test::Utils::system_or_bail($params{tar_program},
+ 'xf', $backup_path . '/' . $tstar,
+ '-C', $newdir);
my $escaped_newdir = $newdir;
$escaped_newdir =~ s/\\/\\\\/g;
# Copy the main backup. If we see a tablespace directory for which we
# have a tablespace mapping, skip it, but remember that we saw it.
- PostgreSQL::Test::RecursiveCopy::copypath($backup_path, $data_path,
+ PostgreSQL::Test::RecursiveCopy::copypath(
+ $backup_path,
+ $data_path,
'filterfn' => sub {
my ($path) = @_;
- if ($path =~ /^pg_tblspc\/(\d+)$/ &&
- exists $params{tablespace_map}{$1})
+ if ($path =~ /^pg_tblspc\/(\d+)$/
+ && exists $params{tablespace_map}{$1})
{
push @tsoids, $1;
return 0;
{
# We need to generate a tablespace_map file.
open(my $tsmap, ">", "$data_path/tablespace_map")
- || die "$data_path/tablespace_map: $!";
+ || die "$data_path/tablespace_map: $!";
# Now use the list of tablespace links to copy each tablespace.
for my $tsoid (@tsoids)
{
die "no tablespace mapping for $tsoid"
- if !exists $params{tablespace_map} ||
- !exists $params{tablespace_map}{$tsoid};
+ if !exists $params{tablespace_map}
+ || !exists $params{tablespace_map}{$tsoid};
my $olddir = $backup_path . '/pg_tblspc/' . $tsoid;
my $newdir = $params{tablespace_map}{$tsoid};
# -w is now the default but having it here does no harm and helps
# compatibility with older versions.
- $ret = PostgreSQL::Test::Utils::system_log(
- 'pg_ctl', '-w', '-D', $self->data_dir,
- '-l', $self->logfile, 'restart');
+ $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-w', '-D',
+ $self->data_dir, '-l', $self->logfile, 'restart');
if ($ret != 0)
{
my ($self, $slot_name, $reference_time) = @_;
my $name = $self->name;
- my $inactive_since = $self->safe_psql('postgres',
+ my $inactive_since = $self->safe_psql(
+ 'postgres',
qq(SELECT inactive_since FROM pg_replication_slots
WHERE slot_name = '$slot_name' AND inactive_since IS NOT NULL;)
- );
+ );
# Check that the inactive_since is sane
- is($self->safe_psql('postgres',
- qq[SELECT '$inactive_since'::timestamptz > to_timestamp(0) AND
+ is( $self->safe_psql(
+ 'postgres',
+ qq[SELECT '$inactive_since'::timestamptz > to_timestamp(0) AND
'$inactive_since'::timestamptz > '$reference_time'::timestamptz;]
),
't',
"last inactive time for slot $slot_name is valid on node $name")
- or die "could not validate captured inactive_since for slot $slot_name";
+ or die "could not validate captured inactive_since for slot $slot_name";
return $inactive_since;
}
use warnings FATAL => 'all';
use PostgreSQL::Test::Utils;
-our ($krb5_bin_dir, $krb5_sbin_dir, $krb5_config, $kinit, $klist,
- $kdb5_util, $kadmin_local, $krb5kdc,
- $krb5_conf, $kdc_conf, $krb5_cache, $krb5_log, $kdc_log,
- $kdc_port, $kdc_datadir, $kdc_pidfile, $keytab);
+our (
+ $krb5_bin_dir, $krb5_sbin_dir, $krb5_config, $kinit,
+ $klist, $kdb5_util, $kadmin_local, $krb5kdc,
+ $krb5_conf, $kdc_conf, $krb5_cache, $krb5_log,
+ $kdc_log, $kdc_port, $kdc_datadir, $kdc_pidfile,
+ $keytab);
INIT
{
key_stash_file = $kdc_datadir/_k5.$realm
}!);
- mkdir $kdc_datadir or BAIL_OUT("could not create directory \"$kdc_datadir\"");
+ mkdir $kdc_datadir
+ or BAIL_OUT("could not create directory \"$kdc_datadir\"");
# Ensure that we use test's config and cache files, not global ones.
$ENV{'KRB5_CONFIG'} = $krb5_conf;
system_or_bail $kdb5_util, 'create', '-s', '-P', 'secret0';
- system_or_bail $kadmin_local, '-q', "addprinc -randkey $service_principal";
+ system_or_bail $kadmin_local, '-q',
+ "addprinc -randkey $service_principal";
system_or_bail $kadmin_local, '-q', "ktadd -k $keytab $service_principal";
system_or_bail $krb5kdc, '-P', $kdc_pidfile;
# take care not to change the script's exit value
my $exit_code = $?;
- kill 'INT', `cat $kdc_pidfile` if defined($kdc_pidfile) && -f $kdc_pidfile;
+ kill 'INT', `cat $kdc_pidfile`
+ if defined($kdc_pidfile) && -f $kdc_pidfile;
$? = $exit_code;
}
$node_primary->safe_psql('postgres',
"CREATE UNLOGGED SEQUENCE ulseq; SELECT nextval('ulseq')");
$node_primary->wait_for_replay_catchup($node_standby_1);
-is($node_standby_1->safe_psql('postgres',
- "SELECT pg_sequence_last_value('ulseq'::regclass) IS NULL"),
- 't', 'pg_sequence_last_value() on unlogged sequence on standby 1');
+is( $node_standby_1->safe_psql(
+ 'postgres',
+ "SELECT pg_sequence_last_value('ulseq'::regclass) IS NULL"),
+ 't',
+ 'pg_sequence_last_value() on unlogged sequence on standby 1');
# Check that only READ-only queries can run on standbys
is($node_standby_1->psql('postgres', 'INSERT INTO tab_int VALUES (1)'),
# beyond the previous vacuum.
$alpha->safe_psql('postgres', 'create table test2 (a int, b bytea)');
$alpha->safe_psql('postgres',
- q{insert into test2 select generate_series(1,10000), sha256(random()::text::bytea)});
+ q{insert into test2 select generate_series(1,10000), sha256(random()::text::bytea)}
+);
$alpha->safe_psql('postgres', 'truncate test2');
# Wait again for all records to be replayed.
# Get inactive_since value after the slot's creation. Note that the slot is
# still inactive till it's used by the standby below.
my $inactive_since =
- $primary4->validate_slot_inactive_since($sb4_slot, $slot_creation_time);
+ $primary4->validate_slot_inactive_since($sb4_slot, $slot_creation_time);
$standby4->start;
# Get inactive_since value after the slot's creation. Note that the slot is
# still inactive till it's used by the subscriber below.
$inactive_since =
- $publisher4->validate_slot_inactive_since($lsub4_slot, $slot_creation_time);
+ $publisher4->validate_slot_inactive_since($lsub4_slot, $slot_creation_time);
$subscriber4->start;
$subscriber4->safe_psql('postgres',
$res = $node_standby->safe_psql(
'postgres', qq(
- select invalidation_reason from pg_replication_slots where slot_name = '$active_slot' and conflicting;));
+ select invalidation_reason from pg_replication_slots where slot_name = '$active_slot' and conflicting;)
+ );
is($res, "$reason", "$active_slot reason for conflict is $reason");
$res = $node_standby->safe_psql(
'postgres', qq(
- select invalidation_reason from pg_replication_slots where slot_name = '$inactive_slot' and conflicting;));
+ select invalidation_reason from pg_replication_slots where slot_name = '$inactive_slot' and conflicting;)
+ );
is($res, "$reason", "$inactive_slot reason for conflict is $reason");
}
##################################################
# Get the restart_lsn from an invalidated slot
-my $restart_lsn = $node_standby->safe_psql('postgres',
+my $restart_lsn = $node_standby->safe_psql(
+ 'postgres',
"SELECT restart_lsn FROM pg_replication_slots
WHERE slot_name = 'vacuum_full_activeslot' AND conflicting;"
);
qr/FATAL:\s+cannot connect to invalid database "regression_invalid"/,
"can't connect to invalid database - error message");
-is($node->psql('postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'),
- 2, "can't ALTER invalid database");
+is( $node->psql(
+ 'postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'),
+ 2,
+ "can't ALTER invalid database");
# check invalid database can't be used as a template
-is( $node->psql('postgres', 'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'),
+is( $node->psql(
+ 'postgres',
+ 'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'),
3,
"can't use invalid database as template");
# Capture the inactive_since of the slot from the primary. Note that the slot
# will be inactive since the corresponding subscription was dropped.
my $inactive_since_on_primary =
- $primary->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary);
+ $primary->validate_slot_inactive_since('lsub1_slot',
+ $slot_creation_time_on_primary);
# Wait for the standby to catch up so that the standby is not lagging behind
# the failover slots.
# Capture the inactive_since of the synced slot on the standby
my $inactive_since_on_standby =
- $standby1->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary);
+ $standby1->validate_slot_inactive_since('lsub1_slot',
+ $slot_creation_time_on_primary);
# Synced slot on the standby must get its own inactive_since
is( $standby1->safe_psql(
# Capture the inactive_since of the slot from the primary. Note that the slot
# will be inactive since the corresponding subscription was dropped.
$inactive_since_on_primary =
- $primary->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary);
+ $primary->validate_slot_inactive_since('lsub1_slot',
+ $slot_creation_time_on_primary);
# Wait for the standby to catch up so that the standby is not lagging behind
# the failover slots.
$standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();");
# Confirm that the invalidated slot has been dropped.
-$standby1->wait_for_log(qr/dropped replication slot "lsub1_slot" of dbid [0-9]+/,
- $log_offset);
+$standby1->wait_for_log(
+ qr/dropped replication slot "lsub1_slot" of dbid [0-9]+/, $log_offset);
# Confirm that the logical slot has been re-created on the standby and is
# flagged as 'synced'
"cannot sync slots if dbname is not specified in primary_conninfo");
# Add the dbname back to the primary_conninfo for further tests
-$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=postgres'");
+$standby1->append_conf('postgresql.conf',
+ "primary_conninfo = '$connstr_1 dbname=postgres'");
$standby1->reload;
##################################################
# synced slot. See the test where we promote standby (Promote the standby1 to
# primary.)
$primary->safe_psql('postgres',
- "SELECT pg_logical_emit_message(false, 'test', 'test');"
-);
+ "SELECT pg_logical_emit_message(false, 'test', 'test');");
# Get the confirmed_flush_lsn for the logical slot snap_test_slot on the primary
my $confirmed_flush_lsn = $primary->safe_psql('postgres',
- "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot';");
+ "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot';"
+);
$standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();");
# Verify that confirmed_flush_lsn of snap_test_slot slot is synced to the standby
ok( $standby1->poll_query_until(
'postgres',
- "SELECT '$confirmed_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot' AND synced AND NOT temporary;"),
+ "SELECT '$confirmed_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot' AND synced AND NOT temporary;"
+ ),
'confirmed_flush_lsn of slot snap_test_slot synced to standby');
##################################################
});
# Start the standby with changed primary_conninfo.
-$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=slotsync_test_db user=repl_role'");
+$standby1->append_conf('postgresql.conf',
+ "primary_conninfo = '$connstr_1 dbname=slotsync_test_db user=repl_role'");
$standby1->start;
# Run the synchronization function. If the sync flow was not prepared
# to handle such attacks, it would have failed during the validation
# of the primary_slot_name itself resulting in
# ERROR: slot synchronization requires valid primary_slot_name
-$standby1->safe_psql('slotsync_test_db', "SELECT pg_sync_replication_slots();");
+$standby1->safe_psql('slotsync_test_db',
+ "SELECT pg_sync_replication_slots();");
# Reset the dbname and user in primary_conninfo to the earlier values.
-$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=postgres'");
+$standby1->append_conf('postgresql.conf',
+ "primary_conninfo = '$connstr_1 dbname=postgres'");
$standby1->reload;
# Drop the newly created database.
-$primary->psql('postgres',
- q{DROP DATABASE slotsync_test_db;});
+$primary->psql('postgres', q{DROP DATABASE slotsync_test_db;});
##################################################
# Test to confirm that the slot sync worker exits on invalid GUC(s) and
$standby1->reload;
# Confirm that the slot sync worker is able to start.
-$standby1->wait_for_log(qr/slot sync worker started/,
- $log_offset);
+$standby1->wait_for_log(qr/slot sync worker started/, $log_offset);
$log_offset = -s $standby1->logfile;
# Disable another GUC required for slot sync.
-$standby1->append_conf( 'postgresql.conf', qq(hot_standby_feedback = off));
+$standby1->append_conf('postgresql.conf', qq(hot_standby_feedback = off));
$standby1->reload;
# Confirm that slot sync worker acknowledge the GUC change and logs the msg
# about wrong configuration.
-$standby1->wait_for_log(qr/slot sync worker will restart because of a parameter change/,
+$standby1->wait_for_log(
+ qr/slot sync worker will restart because of a parameter change/,
$log_offset);
-$standby1->wait_for_log(qr/slot synchronization requires hot_standby_feedback to be enabled/,
+$standby1->wait_for_log(
+ qr/slot synchronization requires hot_standby_feedback to be enabled/,
$log_offset);
$log_offset = -s $standby1->logfile;
$standby1->reload;
# Confirm that the slot sync worker is able to start now.
-$standby1->wait_for_log(qr/slot sync worker started/,
- $log_offset);
+$standby1->wait_for_log(qr/slot sync worker started/, $log_offset);
##################################################
# Test to confirm that confirmed_flush_lsn of the logical slot on the primary
# Do not allow any further advancement of the confirmed_flush_lsn for the
# lsub1_slot.
-$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 DISABLE");
+$subscriber1->safe_psql('postgres',
+ "ALTER SUBSCRIPTION regress_mysub1 DISABLE");
# Wait for the replication slot to become inactive on the publisher
$primary->poll_query_until(
# Get the confirmed_flush_lsn for the logical slot lsub1_slot on the primary
my $primary_flush_lsn = $primary->safe_psql('postgres',
- "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot';");
+ "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot';"
+);
# Confirm that confirmed_flush_lsn of lsub1_slot slot is synced to the standby
ok( $standby1->poll_query_until(
'postgres',
- "SELECT '$primary_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot' AND synced AND NOT temporary;"),
+ "SELECT '$primary_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot' AND synced AND NOT temporary;"
+ ),
'confirmed_flush_lsn of slot lsub1_slot synced to standby');
##################################################
$subscriber2->wait_for_subscription_sync;
-$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 ENABLE");
+$subscriber1->safe_psql('postgres',
+ "ALTER SUBSCRIPTION regress_mysub1 ENABLE");
my $offset = -s $primary->logfile;
# primary and keeps waiting for the standby specified in standby_slot_names
# (sb1_slot aka standby1).
$result =
- $subscriber1->safe_psql('postgres', "SELECT count(*) <> $primary_row_count FROM tab_int;");
+ $subscriber1->safe_psql('postgres',
+ "SELECT count(*) <> $primary_row_count FROM tab_int;");
is($result, 't',
"subscriber1 doesn't get data from primary until standby1 acknowledges changes"
);
# Disable the regress_mysub1 to prevent the logical walsender from generating
# more warnings.
-$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 DISABLE");
+$subscriber1->safe_psql('postgres',
+ "ALTER SUBSCRIPTION regress_mysub1 DISABLE");
# Wait for the replication slot to become inactive on the publisher
$primary->poll_query_until(
$back_q->quit;
$primary->safe_psql('postgres',
- "SELECT pg_drop_replication_slot('test_slot');"
-);
+ "SELECT pg_drop_replication_slot('test_slot');");
# Add the physical slot (sb1_slot) back to the standby_slot_names for further
# tests.
$primary->reload;
# Enable the regress_mysub1 for further tests
-$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 ENABLE");
+$subscriber1->safe_psql('postgres',
+ "ALTER SUBSCRIPTION regress_mysub1 ENABLE");
##################################################
# Test that logical replication will wait for the user-created inactive
# promotion. We do this check before the slot is enabled on the new primary
# below, otherwise, the slot gets active setting inactive_since to NULL.
my $inactive_since_on_new_primary =
- $standby1->validate_slot_inactive_since('lsub1_slot', $promotion_time_on_primary);
+ $standby1->validate_slot_inactive_since('lsub1_slot',
+ $promotion_time_on_primary);
is( $standby1->safe_psql(
'postgres',
"SELECT '$inactive_since_on_new_primary'::timestamptz > '$inactive_since_on_primary'::timestamptz"
),
"t",
- 'synchronized slot has got its own inactive_since on the new primary after promotion');
+ 'synchronized slot has got its own inactive_since on the new primary after promotion'
+);
# Update subscription with the new primary's connection info
my $standby1_conninfo = $standby1->connstr . ' dbname=postgres';
"ALTER SUBSCRIPTION regress_mysub1 CONNECTION '$standby1_conninfo';");
# Confirm the synced slot 'lsub1_slot' is retained on the new primary
-is($standby1->safe_psql('postgres',
- q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN ('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;}),
+is( $standby1->safe_psql(
+ 'postgres',
+ q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN ('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;}
+ ),
't',
'synced slot retained on the new primary');
$standby1->wait_for_catchup('regress_mysub1');
# Confirm that data in tab_int replicated on the subscriber
-is( $subscriber1->safe_psql('postgres', q{SELECT count(*) FROM tab_int;}),
- "20",
- 'data replicated from the new primary');
+is($subscriber1->safe_psql('postgres', q{SELECT count(*) FROM tab_int;}),
+ "20", 'data replicated from the new primary');
# Consume the data from the snap_test_slot. The synced slot should reach a
# consistent point by restoring the snapshot at the restart_lsn serialized
restart => 'no');
$result = $node->restart(fail_ok => 1);
-is($result, 0, 'restart fails with password-protected key file with wrong password');
+is($result, 0,
+ 'restart fails with password-protected key file with wrong password');
switch_server_cert(
$node,
# Update the rows on the publisher and check the additional columns on
# subscriber didn't change
-$node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = encode(sha256(b::bytea), 'hex')");
+$node_publisher->safe_psql('postgres',
+ "UPDATE test_tab SET b = encode(sha256(b::bytea), 'hex')");
$node_publisher->wait_for_catchup('tap_sub');
# Setup structure on subscriber
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE test_tab (a int primary key, b bytea, c INT, d INT, e INT)");
+ "CREATE TABLE test_tab (a int primary key, b bytea, c INT, d INT, e INT)"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
# Since disabling subscription doesn't wait for walsender to release the replication
# slot and exit, wait for the slot to become inactive.
-$node_publisher->poll_query_until(
- $db,
+$node_publisher->poll_query_until($db,
qq(SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = '$sub2_name' AND active_pid IS NULL))
) or die "slot never became inactive";
INSERT INTO tbl SELECT i, sha256(i::text::bytea) FROM generate_series(1, 10000) s(i);
COMMIT;
]);
-test_skip_lsn($node_publisher, $node_subscriber, "(4, sha256(4::text::bytea))",
+test_skip_lsn($node_publisher, $node_subscriber,
+ "(4, sha256(4::text::bytea))",
"4", "test skipping stream-commit");
$result = $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres',
"CREATE TABLE test_replica_id_full (x int, y text)");
$node_subscriber->safe_psql('postgres',
- "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full USING HASH (x)");
+ "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full USING HASH (x)"
+);
# insert some initial data
$node_publisher->safe_psql('postgres',
# the above grant doesn't help.
publish_insert("alice.unpartitioned", 14);
expect_failure(
- "alice.unpartitioned",
- 3,
- 7,
- 13,
+ "alice.unpartitioned", 3, 7, 13,
qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi,
"with no privileges cannot replicate");
));
$node_subscriber->wait_for_subscription_sync($node_publisher, 'sub1');
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT a, b FROM tab_default");
-is($result, qq(1|f
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab_default");
+is( $result, qq(1|f
2|t), 'check snapshot on subscriber');
# Update all rows in the table and ensure the rows with the missing `b`
# attribute replicate correctly.
-$node_publisher->safe_psql('postgres',
- "UPDATE tab_default SET a = a + 1");
+$node_publisher->safe_psql('postgres', "UPDATE tab_default SET a = a + 1");
$node_publisher->wait_for_catchup('sub1');
# When the bug is present, the `1|f` row will not be updated to `2|f` because
# the publisher incorrectly fills in `NULL` for `b` and publishes an update
# for `1|NULL`, which doesn't exist in the subscriber.
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT a, b FROM tab_default");
-is($result, qq(2|f
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab_default");
+is( $result, qq(2|f
3|t), 'check replicated update on subscriber');
$node_publisher->stop('fast');
BYTE
BY_HANDLE_FILE_INFORMATION
Backend
-BackendId
BackendParameters
BackendStartupData
BackendState
BaseBackupCmd
BaseBackupTargetHandle
BaseBackupTargetType
-BasicArchiveData
BeginDirectModify_function
BeginForeignInsert_function
BeginForeignModify_function
BlockIdData
BlockInfoRecord
BlockNumber
+BlockRefTable
+BlockRefTableBuffer
+BlockRefTableChunk
+BlockRefTableEntry
+BlockRefTableKey
+BlockRefTableReader
+BlockRefTableSerializedEntry
+BlockRefTableWriter
BlockSampler
BlockSamplerData
BlockedProcData
BlockedProcsData
+BlocktableEntry
BloomBuildState
BloomFilter
BloomMetaPageData
CancelRequestPacket
Cardinality
CaseExpr
+CaseKind
CaseTestExpr
CaseWhen
Cash
CopyFromStateData
CopyHeaderChoice
CopyInsertMethod
-CopyMethod
CopyLogVerbosityChoice
+CopyMethod
CopyMultiInsertBuffer
CopyMultiInsertInfo
CopyOnErrorChoice
DR_printtup
DR_sqlfunction
DR_transientrel
+DSMRegistryCtxStruct
+DSMRegistryEntry
DWORD
DataDirSyncMethod
DataDumperPtr
DataPageDeleteStack
+DataTypesUsageChecks
+DataTypesUsageVersionCheck
DatabaseInfo
DateADT
DateTimeErrorExtra
DropTableSpaceStmt
DropUserMappingStmt
DropdbStmt
-DSMRegistryCtxStruct
-DSMRegistryEntry
DumpComponents
DumpId
DumpOptions
FieldSelect
FieldStore
File
+FileBackupMethod
FileFdwExecutionState
FileFdwPlanState
FileNameMap
ImportForeignSchema_function
ImportQual
InProgressEnt
+InProgressIO
IncludeWal
InclusionOpaque
IncrementVarSublevelsUp_context
+IncrementalBackupInfo
IncrementalSort
IncrementalSortExecutionStatus
IncrementalSortGroupInfo
InjectionPointEntry
InjectionPointSharedState
InlineCodeBlock
-InProgressIO
InsertStmt
Instrumentation
Int128AggState
IsoConnInfo
IspellDict
Item
+ItemArray
ItemId
ItemIdData
ItemPointer
JoinCostWorkspace
JoinDomain
JoinExpr
-JsonFuncExpr
JoinHashEntry
JoinPath
JoinPathExtraData
JsonExprState
JsonFormat
JsonFormatType
+JsonFuncExpr
JsonHashEntry
JsonIncrementalState
JsonIsPredicate
JsonObjectAgg
JsonObjectConstructor
JsonOutput
-JsonParseExpr
JsonParseContext
JsonParseErrorType
+JsonParseExpr
JsonParserStack
JsonPath
JsonPathBool
-JsonPathDatatypeStatus
+JsonPathCountVarsCallback
JsonPathExecContext
JsonPathExecResult
+JsonPathGetVarCallback
JsonPathGinAddPathItemFunc
JsonPathGinContext
JsonPathGinExtractNodesFunc
JsonPathItem
JsonPathItemType
JsonPathKeyword
-JsonPathMutableContext
JsonPathParseItem
JsonPathParseResult
JsonPathPredicateCallback
LLVMAttributeRef
LLVMBasicBlockRef
LLVMBuilderRef
+LLVMContextRef
LLVMErrorRef
LLVMIntPredicate
LLVMJITEventListenerRef
ParallelHashJoinBatchAccessor
ParallelHashJoinState
ParallelIndexScanDesc
-ParallelReadyList
ParallelSlot
ParallelSlotArray
ParallelSlotResultHandler
PathCostComparison
PathHashStack
PathKey
+PathKeyInfo
PathKeysComparison
PathTarget
PatternInfo
PostParseColumnRefHook
PostgresPollingStatusType
PostingItem
-PostmasterChildType
PreParseColumnRefHook
PredClass
PredIterInfo
PrivateRefCountEntry
ProcArrayStruct
ProcLangInfo
+ProcNumber
ProcSignalBarrierType
ProcSignalHeader
ProcSignalReason
PromptInterruptContext
ProtocolVersion
PrsStorage
-PruneReason
PruneFreezeResult
+PruneReason
PruneState
PruneStepResult
PsqlScanCallbacks
ReadLocalXLogPageNoWaitPrivate
ReadReplicationSlotCmd
ReadStream
+ReadStreamBlockNumberCB
ReassignOwnedStmt
RecheckForeignScan_function
RecordCacheArrayEntry
ResourceReleaseCallback
ResourceReleaseCallbackItem
ResourceReleasePhase
+ResourceReleasePriority
RestoreOptions
RestorePass
RestrictInfo
SpinDelayStatus
SplitInterval
SplitLR
-SplitPartitionContext
SplitPageLayout
+SplitPartitionContext
SplitPoint
SplitTextOutputData
SplitVar
Subscription
SubscriptionInfo
SubscriptionRelState
+SummarizerReadLocalXLogPrivate
SupportRequestCost
SupportRequestIndexCondition
SupportRequestOptimizeWindowClause
SupportRequestSimplify
SupportRequestWFuncMonotonic
Syn
-SyncingTablesState
SyncOps
SyncRepConfigData
SyncRepStandbyData
SyncRequestHandler
SyncRequestType
+SyncingTablesState
SysFKRelationship
SysScanDesc
SyscacheCallbackFunction
+SysloggerStartupData
SystemRowsSamplerData
SystemSamplerData
SystemTimeSamplerData
TestDecodingData
TestDecodingTxnData
TestSpec
+TestValueType
TextFreq
TextPositionState
TheLexeme
TidRangeScanState
TidScan
TidScanState
+TidStore
+TidStoreIter
+TidStoreIterResult
TimeADT
TimeLineHistoryCmd
TimeLineHistoryEntry
TokenAuxData
TokenizedAuthLine
TrackItem
-TransamVariablesData
TransApplyAction
TransInvalidationInfo
TransState
TransactionStateData
TransactionStmt
TransactionStmtKind
+TransamVariablesData
TransformInfo
TransformJsonStringValuesState
TransitionCaptureState
TuplesortClusterArg
TuplesortDatumArg
TuplesortIndexArg
-TuplesortIndexBrinArg
TuplesortIndexBTreeArg
TuplesortIndexHashArg
TuplesortInstrumentation
UnresolvedTupData
UpdateContext
UpdateStmt
+UploadManifestCmd
UpperRelationKind
UpperUniquePath
UserAuth
Vsrt
WAIT_ORDER
WALAvailability
-WalInsertClass
WALInsertLock
WALInsertLockPadded
WALOpenSegment
WaitPMResult
WalCloseMethod
WalCompression
+WalInsertClass
WalLevel
WalRcvData
WalRcvExecResult
WalSndCtlData
WalSndSendDataCallback
WalSndState
+WalSummarizerData
+WalSummaryFile
+WalSummaryIO
WalSyncMethod
WalTimeSample
WalUsage
WindowStatePerFunc
WithCheckOption
WithClause
+WordBoundaryNext
WordEntry
WordEntryIN
WordEntryPos
_SPI_connection
_SPI_plan
__m128i
+__m512i
+__mmask64
__time64_t
_dev_t
_ino_t
_locale_t
_resultmap
_stringlist
+access_vector_t
acquireLocksOnSubLinks_context
add_nulling_relids_context
adjust_appendrel_attrs_context
amgettuple_function
aminitparallelscan_function
aminsert_function
+aminsertcleanup_function
ammarkpos_function
amoptions_function
amparallelrescan_function
auth_password_hook_typ
autovac_table
av_relation
+avc_cache
avl_dbase
avl_node
avl_tree
avw_dbase
backslashResult
+backup_file_entry
+backup_file_hash
backup_manifest_info
backup_manifest_option
+backup_wal_range
base_yy_extra_type
basebackup_options
bbsink
bits16
bits32
bits8
+blockreftable_hash
+blockreftable_iterator
bloom_filter
boolKEY
brin_column_state
canonicalize_state
cashKEY
catalogid_hash
+cb_cleanup_dir
+cb_options
+cb_tablespace
+cb_tablespace_mapping
check_agg_arguments_context
check_function_callback
check_network_data
dsa_segment_index
dsa_segment_map
dshash_compare_function
+dshash_copy_function
dshash_hash
dshash_hash_function
dshash_parameters
eval_const_expressions_context
exec_thread_arg
execution_state
+exit_function
explain_get_index_name_hook_type
f_smgr
fasthash_state
inet
inetKEY
inet_struct
+initRowMethod
init_function
inline_cte_walker_context
inline_error_callback_arg
int64
int64KEY
int8
+int8x16_t
internalPQconninfoOption
intptr_t
intset_internal_node
intset_leaf_node
intset_node
intvKEY
+io_callback_fn
io_stat_col
itemIdCompact
itemIdCompactData
json_manifest_error_callback
json_manifest_per_file_callback
json_manifest_per_wal_range_callback
+json_manifest_system_identifier_callback
+json_manifest_version_callback
json_ofield_action
json_scalar_action
json_struct_action
local_relopt
local_relopts
local_source
+local_ts_iter
+local_ts_radix_tree
locale_t
locate_agg_of_level_context
locate_var_of_level_context
macaddr
macaddr8
macaddr_sortsupport_state
+manifest_data
manifest_file
manifest_files_hash
manifest_files_iterator
manifest_wal_range
+manifest_writer
map_variable_attnos_context
max_parallel_hazard_context
mb2wchar_with_len_converter
pam_handle_t
parallel_worker_main_type
parse_error_callback_arg
-parser_context
partition_method_t
pendingPosition
+pending_label
pgParameterStatus
pg_atomic_flag
pg_atomic_uint32
pg_atomic_uint64
pg_be_sasl_mech
+pg_case_map
+pg_category_range
pg_checksum_context
pg_checksum_raw_context
pg_checksum_type
pg_tz
pg_tz_cache
pg_tzenum
+pg_unicode_category
pg_unicode_decompinfo
pg_unicode_decomposition
pg_unicode_norminfo
pg_unicode_normprops
+pg_unicode_properties
+pg_unicode_range
pg_unicode_recompinfo
pg_utf_to_local_combined
pg_uuid_t
rendezvousHashEntry
replace_rte_variables_callback
replace_rte_variables_context
+report_error_fn
ret_type
rewind_source
rewrite_event
rf_context
+rfile
rm_detail_t
-rt_node_class_test_elem
role_auth_extra
rolename_hash
row_security_policy_hook_type
rsv_callback
+rt_iter
+rt_node_class_test_elem
+rt_radix_tree
saophash_hash
save_buffer
scram_state
scram_state_enum
+security_class_t
sem_t
+sepgsql_context_info_t
sequence_magic
set_join_pathlist_hook_type
set_rel_pathlist_hook_type
+shared_ts_iter
+shared_ts_radix_tree
shm_mq
shm_mq_handle
shm_mq_iovec
substitute_phv_relids_context
symbol
tablespaceinfo
+td_entry
teSection
temp_tablespaces_extra
test_re_flags
uint128
uint16
uint16_t
+uint16x8_t
uint32
uint32_t
uint32x4_t
walrcv_exec_fn
walrcv_get_backend_pid_fn
walrcv_get_conninfo_fn
+walrcv_get_dbname_from_conninfo_fn
walrcv_get_senderinfo_fn
walrcv_identify_system_fn
walrcv_readtimelinehistoryfile_fn
wchar_t
win32_deadchild_waitinfo
wint_t
-worker_spi_state
worker_state
worktable
wrap
+ws_file_info
+ws_options
xl_brin_createidx
xl_brin_desummarize
xl_brin_insert
xmlBufferPtr
xmlChar
xmlDocPtr
+xmlError
xmlErrorPtr
xmlExternalEntityLoader
xmlGenericErrorFunc
z_stream
z_streamp
zic_t
-BlockRefTable
-BlockRefTableBuffer
-BlockRefTableEntry
-BlockRefTableKey
-BlockRefTableReader
-BlockRefTableSerializedEntry
-BlockRefTableWriter
-SummarizerReadLocalXLogPrivate
-SysloggerStartupData
-WalSummarizerData
-WalSummaryFile
-WalSummaryIO
-FileBackupMethod
-IncrementalBackupInfo
-UploadManifestCmd
-backup_file_entry
-backup_wal_range
-cb_cleanup_dir
-cb_options
-cb_tablespace
-cb_tablespace_mapping
-manifest_data
-manifest_writer
-rfile
-ws_options
-ws_file_info
-PathKeyInfo
-TidStore
-TidStoreIter
-TidStoreIterResult
-BlocktableEntry
-ItemArray