my ($relname) = @_;
my $pgdata = $node->data_dir;
- my $rel = $node->safe_psql('postgres',
+ my $rel = $node->safe_psql('postgres',
qq(SELECT pg_relation_filepath('$relname')));
die "path not found for relation $relname" unless defined $rel;
return "$pgdata/$rel";
for my $endblock (qw(NULL 0))
{
my $opts =
- "on_error_stop := $stop, "
+ "on_error_stop := $stop, "
. "check_toast := $check_toast, "
. "skip := $skip, "
. "startblock := $startblock, "
my $main_h = $node->background_psql('postgres');
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
BEGIN;
INSERT INTO tbl VALUES(0);
));
my $cic_h = $node->background_psql('postgres');
-$cic_h->query_until(qr/start/, q(
+$cic_h->query_until(
+ qr/start/, q(
\echo start
CREATE INDEX CONCURRENTLY idx ON tbl(i);
));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
PREPARE TRANSACTION 'a';
));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
BEGIN;
INSERT INTO tbl VALUES(0);
));
$node->safe_psql('postgres', q(COMMIT PREPARED 'a';));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
PREPARE TRANSACTION 'b';
BEGIN;
INSERT INTO tbl VALUES(0);
$node->safe_psql('postgres', q(COMMIT PREPARED 'b';));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
PREPARE TRANSACTION 'c';
COMMIT PREPARED 'c';
));
$node->restart;
my $reindex_h = $node->background_psql('postgres');
-$reindex_h->query_until(qr/start/, q(
+$reindex_h->query_until(
+ qr/start/, q(
\echo start
DROP INDEX CONCURRENTLY idx;
CREATE INDEX CONCURRENTLY idx ON tbl(i);
OffsetNumber successor[MaxOffsetNumber];
bool lp_valid[MaxOffsetNumber];
bool xmin_commit_status_ok[MaxOffsetNumber];
- XidCommitStatus xmin_commit_status[MaxOffsetNumber];
+ XidCommitStatus xmin_commit_status[MaxOffsetNumber];
CHECK_FOR_INTERRUPTS();
for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff;
ctx.offnum = OffsetNumberNext(ctx.offnum))
{
- BlockNumber nextblkno;
+ BlockNumber nextblkno;
OffsetNumber nextoffnum;
successor[ctx.offnum] = InvalidOffsetNumber;
/*
* Since we've checked that this redirect points to a line
- * pointer between FirstOffsetNumber and maxoff, it should
- * now be safe to fetch the referenced line pointer. We expect
- * it to be LP_NORMAL; if not, that's corruption.
+ * pointer between FirstOffsetNumber and maxoff, it should now
+ * be safe to fetch the referenced line pointer. We expect it
+ * to be LP_NORMAL; if not, that's corruption.
*/
rditem = PageGetItemId(ctx.page, rdoffnum);
if (!ItemIdIsUsed(rditem))
{
/*
* We should not have set successor[ctx.offnum] to a value
- * other than InvalidOffsetNumber unless that line pointer
- * is LP_NORMAL.
+ * other than InvalidOffsetNumber unless that line pointer is
+ * LP_NORMAL.
*/
Assert(ItemIdIsNormal(next_lp));
}
/*
- * If the next line pointer is a redirect, or if it's a tuple
- * but the XMAX of this tuple doesn't match the XMIN of the next
+ * If the next line pointer is a redirect, or if it's a tuple but
+ * the XMAX of this tuple doesn't match the XMIN of the next
* tuple, then the two aren't part of the same update chain and
* there is nothing more to do.
*/
}
/*
- * This tuple and the tuple to which it points seem to be part
- * of an update chain.
+ * This tuple and the tuple to which it points seem to be part of
+ * an update chain.
*/
predecessor[nextoffnum] = ctx.offnum;
}
/*
- * If the current tuple's xmin is aborted but the successor tuple's
- * xmin is in-progress or committed, that's corruption.
+ * If the current tuple's xmin is aborted but the successor
+ * tuple's xmin is in-progress or committed, that's corruption.
*/
if (xmin_commit_status_ok[ctx.offnum] &&
xmin_commit_status[ctx.offnum] == XID_ABORTED &&
HeapTupleHeader tuphdr = ctx->tuphdr;
ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */
- *xmin_commit_status_ok = false; /* have not yet proven otherwise */
+ *xmin_commit_status_ok = false; /* have not yet proven otherwise */
/* If xmin is normal, it should be within valid range */
xmin = HeapTupleHeaderGetXmin(tuphdr);
* therefore cannot check it.
*/
if (!check_tuple_visibility(ctx, xmin_commit_status_ok,
- xmin_commit_status))
+ xmin_commit_status))
return;
/*
diff = (int32) (ctx->next_xid - xid);
/*
- * In cases of corruption we might see a 32bit xid that is before epoch
- * 0. We can't represent that as a 64bit xid, due to 64bit xids being
+ * In cases of corruption we might see a 32bit xid that is before epoch 0.
+ * We can't represent that as a 64bit xid, due to 64bit xids being
* unsigned integers, without the modulo arithmetic of 32bit xid. There's
* no really nice way to deal with that, but it works ok enough to use
* FirstNormalFullTransactionId in that case, as a freshly initdb'd
local $ENV{PGOPTIONS} = join " ",
map { "-c $_=$params->{$_}" } keys %$params;
- my $log = $node->logfile();
+ my $log = $node->logfile();
my $offset = -s $log;
$node->safe_psql("postgres", $sql);
"SELECT * FROM pg_class;",
{
"auto_explain.log_verbose" => "on",
- "compute_query_id" => "on"
+ "compute_query_id" => "on"
});
like(
"SELECT * FROM pg_class;",
{
"auto_explain.log_verbose" => "on",
- "compute_query_id" => "regress"
+ "compute_query_id" => "regress"
});
unlike(
# This is only needed on Windows machines that don't use UNIX sockets.
$node->init(
'allows_streaming' => 1,
- 'auth_extra' => [ '--create-role', 'backupuser' ]);
+ 'auth_extra' => [ '--create-role', 'backupuser' ]);
$node->append_conf('postgresql.conf',
"shared_preload_libraries = 'basebackup_to_shell'");
'fails if basebackup_to_shell.command is not set');
# Configure basebackup_to_shell.command and reload the configuration file.
-my $backup_path = PostgreSQL::Test::Utils::tempdir;
+my $backup_path = PostgreSQL::Test::Utils::tempdir;
my $escaped_backup_path = $backup_path;
$escaped_backup_path =~ s{\\}{\\\\}g
if ($PostgreSQL::Test::Utils::windows_os);
MemoryContext basic_archive_context;
/*
- * If we didn't get to storing the pointer to our allocated state, we don't
- * have anything to clean up.
+ * If we didn't get to storing the pointer to our allocated state, we
+ * don't have anything to clean up.
*/
if (data == NULL)
return;
if (astate)
PG_RETURN_DATUM(makeArrayResult(astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
else
PG_RETURN_NULL();
}
$outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
}
my $sql =
- "select $outf from "
+ "select $outf from "
. join(', ', keys %table)
. " where "
. join(' AND ', @where) . ';';
print @plan;
}
-my $t0 = [gettimeofday];
+my $t0 = [gettimeofday];
my $count = 0;
-my $b = $opt{b};
+my $b = $opt{b};
$b ||= 1;
my @a;
foreach (1 .. $b)
EOT
-open(my $msg, '>', "message.tmp") || die;
+open(my $msg, '>', "message.tmp") || die;
open(my $map, '>', "message_section_map.tmp") || die;
srand(1);
ltree *left, ltree *right)
{
int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
- (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
+ (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
ltree_gist *result = palloc(size);
SET_VARSIZE(result, size);
ltree_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- ltree *res;
+ ltree *res;
if ((res = parse_ltree(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
*/
static bool
finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos,
- struct Node *escontext)
+ struct Node *escontext)
{
if (is_lquery)
{
lquery_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- lquery *res;
+ lquery *res;
if ((res = parse_lquery(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("word is too long")));
- if (! pushquery(state, type, ltree_crc32_sz(strval, lenval),
- state->curop - state->op, lenval, flag))
+ if (!pushquery(state, type, ltree_crc32_sz(strval, lenval),
+ state->curop - state->op, lenval, flag))
return false;
while (state->curop - state->op + lenval + 1 >= state->lenop)
Datum
ltxtq_in(PG_FUNCTION_ARGS)
{
- ltxtquery *res;
+ ltxtquery *res;
if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL)
PG_RETURN_NULL();
# setup
$node->safe_psql("postgres",
- "CREATE EXTENSION pg_prewarm;\n"
+ "CREATE EXTENSION pg_prewarm;\n"
. "CREATE TABLE test(c1 int);\n"
. "INSERT INTO test SELECT generate_series(1, 100);");
int block_id;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
RmgrData desc;
- const char *record_type;
- StringInfoData rec_desc;
+ const char *record_type;
+ StringInfoData rec_desc;
Assert(XLogRecHasAnyBlockRefs(record));
bool have_error; /* have any subxacts aborted in this xact? */
bool changing_xact_state; /* xact state change in process */
bool parallel_commit; /* do we commit (sub)xacts in parallel? */
- bool parallel_abort; /* do we abort (sub)xacts in parallel? */
+ bool parallel_abort; /* do we abort (sub)xacts in parallel? */
bool invalidated; /* true if reconnect is pending */
bool keep_connections; /* setting value of keep_connections
* server option */
/*
* Should never get called when the insert is being performed on a table
- * that is also among the target relations of an UPDATE operation,
- * because postgresBeginForeignInsert() currently rejects such insert
- * attempts.
+ * that is also among the target relations of an UPDATE operation, because
+ * postgresBeginForeignInsert() currently rejects such insert attempts.
*/
Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
*/
if (method != ANALYZE_SAMPLE_OFF)
{
- bool can_tablesample;
+ bool can_tablesample;
reltuples = postgresGetAnalyzeInfoForForeignTable(relation,
&can_tablesample);
/*
- * Make sure we're not choosing TABLESAMPLE when the remote relation does
- * not support that. But only do this for "auto" - if the user explicitly
- * requested BERNOULLI/SYSTEM, it's better to fail.
+ * Make sure we're not choosing TABLESAMPLE when the remote relation
+ * does not support that. But only do this for "auto" - if the user
+ * explicitly requested BERNOULLI/SYSTEM, it's better to fail.
*/
if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO))
method = ANALYZE_SAMPLE_RANDOM;
else
{
/*
- * All supported sampling methods require sampling rate,
- * not target rows directly, so we calculate that using
- * the remote reltuples value. That's imperfect, because
- * it might be off a good deal, but that's not something
- * we can (or should) address here.
+ * All supported sampling methods require sampling rate, not
+ * target rows directly, so we calculate that using the remote
+ * reltuples value. That's imperfect, because it might be off a
+ * good deal, but that's not something we can (or should) address
+ * here.
*
- * If reltuples is too low (i.e. when table grew), we'll
- * end up sampling more rows - but then we'll apply the
- * local sampling, so we get the expected sample size.
- * This is the same outcome as without remote sampling.
+ * If reltuples is too low (i.e. when table grew), we'll end up
+ * sampling more rows - but then we'll apply the local sampling,
+ * so we get the expected sample size. This is the same outcome as
+ * without remote sampling.
*
- * If reltuples is too high (e.g. after bulk DELETE), we
- * will end up sampling too few rows.
+ * If reltuples is too high (e.g. after bulk DELETE), we will end
+ * up sampling too few rows.
*
- * We can't really do much better here - we could try
- * sampling a bit more rows, but we don't know how off
- * the reltuples value is so how much is "a bit more"?
+ * We can't really do much better here - we could try sampling a
+ * bit more rows, but we don't know how off the reltuples value is
+ * so how much is "a bit more"?
*
- * Furthermore, the targrows value for partitions is
- * determined based on table size (relpages), which can
- * be off in different ways too. Adjusting the sampling
- * rate here might make the issue worse.
+ * Furthermore, the targrows value for partitions is determined
+ * based on table size (relpages), which can be off in different
+ * ways too. Adjusting the sampling rate here might make the issue
+ * worse.
*/
sample_frac = targrows / reltuples;
/*
* We should never get sampling rate outside the valid range
- * (between 0.0 and 1.0), because those cases should be covered
- * by the previous branch that sets ANALYZE_SAMPLE_OFF.
+ * (between 0.0 and 1.0), because those cases should be covered by
+ * the previous branch that sets ANALYZE_SAMPLE_OFF.
*/
Assert(sample_frac >= 0.0 && sample_frac <= 1.0);
}
/* See if we already cached the result. */
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
if (!entry)
{
* cache invalidation.
*/
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
entry->shippable = shippable;
}
use warnings;
my $integer = '[+-]?[0-9]+';
-my $real = '[+-]?[0-9]+\.[0-9]+';
+my $real = '[+-]?[0-9]+\.[0-9]+';
-my $RANGE = '(\.\.)(\.)?';
-my $PLUMIN = q(\'\+\-\');
-my $FLOAT = "(($integer)|($real))([eE]($integer))?";
+my $RANGE = '(\.\.)(\.)?';
+my $PLUMIN = q(\'\+\-\');
+my $FLOAT = "(($integer)|($real))([eE]($integer))?";
my $EXTENSION = '<|>|~';
-my $boundary = "($EXTENSION)?$FLOAT";
+my $boundary = "($EXTENSION)?$FLOAT";
my $deviation = $FLOAT;
my $rule_1 = $boundary . $PLUMIN . $deviation;
# replication statistics data is fine after restart.
$node->stop;
-my $datadir = $node->data_dir;
+my $datadir = $node->data_dir;
my $slot3_replslotdir = "$datadir/pg_replslot/regression_slot3";
rmtree($slot3_replslotdir);
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;