-
+
Performs an incremental
The / option instructs
- pg_cominebackup to figure out what would be done
+ pg_combinebackup to figure out what would be done
without actually creating the target directory or any output files.
It is particularly useful in combination with .
/*
* Before taking an incremental backup, the caller must supply the backup
- * manifest from a prior backup. Each chunk of manifest data recieved
+ * manifest from a prior backup. Each chunk of manifest data received
* from the client should be passed to this function.
*/
void
++deadcycles;
/*
- * If we've managed to wait for an entire minute withot the WAL
+ * If we've managed to wait for an entire minute without the WAL
* summarizer absorbing a single WAL record, error out; probably
* something is wrong.
*
* likely to catch a reasonable number of the things that can go wrong
* in practice (e.g. the summarizer process is completely hung, say
* because somebody hooked up a debugger to it or something) without
- * giving up too quickly when the sytem is just slow.
+ * giving up too quickly when the system is just slow.
*/
if (deadcycles >= 6)
ereport(ERROR,
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid timeline %lld", (long long) raw_tli));
- /* Prepare to read the specified WAL summry file. */
+ /* Prepare to read the specified WAL summary file. */
ws.tli = (TimeLineID) raw_tli;
ws.start_lsn = PG_GETARG_LSN(1);
ws.end_lsn = PG_GETARG_LSN(2);
}
/*
- * If the limit block is not InvalidBlockNumber, emit an exta row
+ * If the limit block is not InvalidBlockNumber, emit an extra row
* with that block number and limit_block = true.
*
* There is no point in doing this when the limit_block is
XLogRecPtr pending_lsn;
/*
- * This field handles its own synchronizaton.
+ * This field handles its own synchronization.
*/
ConditionVariable summary_file_cv;
} WalSummarizerData;
/*
* The sleep time will always be a multiple of 200ms and will not exceed
* thirty seconds (150 * 200 = 30 * 1000). Note that the timeout here needs
- * to be substntially less than the maximum amount of time for which an
+ * to be substantially less than the maximum amount of time for which an
* incremental backup will wait for this process to catch up. Otherwise, an
* incremental backup might time out on an idle system just because we sleep
* for too long.
/*
* Within this function, 'current_lsn' and 'current_tli' refer to the
* point from which the next WAL summary file should start. 'exact' is
- * true if 'current_lsn' is known to be the start of a WAL recod or WAL
+ * true if 'current_lsn' is known to be the start of a WAL record or WAL
* segment, and false if it might be in the middle of a record someplace.
*
* 'switch_lsn' and 'switch_tli', if set, are the LSN at which we need to
/*
* Sleep for 10 seconds before attempting to resume operations in
- * order to avoid excessing logging.
+ * order to avoid excessive logging.
*
* Many of the likely error conditions are things that will repeat
* every time. For example, if the WAL can't be read or the summary
return InvalidXLogRecPtr;
/*
- * Unless we need to reset the pending_lsn, we initally acquire the lock
+ * Unless we need to reset the pending_lsn, we initially acquire the lock
* in shared mode and try to fetch the required information. If we acquire
* in shared mode and find that the data structure hasn't been
* initialized, we reacquire the lock in exclusive mode so that we can
*
* 'start_lsn' is the point at which we should start summarizing. If this
* value comes from the end LSN of the previous record as returned by the
- * xlograder machinery, 'exact' should be true; otherwise, 'exact' should
+ * xlogreader machinery, 'exact' should be true; otherwise, 'exact' should
* be false, and this function will search forward for the start of a valid
* WAL record.
*
xlogreader->ReadRecPtr >= switch_lsn)
{
/*
- * Woops! We've read a record that *starts* after the switch LSN,
+ * Whoops! We've read a record that *starts* after the switch LSN,
* contrary to our goal of reading only until we hit the first
* record that ends at or after the switch LSN. Pretend we didn't
* read it after all by bailing out of this loop right here,
}
/*
- * Special handling for WAL recods with RM_XACT_ID.
+ * Special handling for WAL records with RM_XACT_ID.
*/
static void
SummarizeXactRecord(XLogReaderState *xlogreader, BlockRefTable *brtab)
}
/*
- * Special handling for WAL recods with RM_XLOG_ID.
+ * Special handling for WAL records with RM_XLOG_ID.
*/
static bool
SummarizeXlogRecord(XLogReaderState *xlogreader)
* sleep time to the minimum, but we don't want a handful of extra WAL
* records to provoke a strong reaction. We choose to reduce the sleep
* time by 1 quantum for each page read beyond the first, which is a
- * fairly arbitrary way of trying to be reactive without
- * overrreacting.
+ * fairly arbitrary way of trying to be reactive without overreacting.
*/
if (pages_read_since_last_sleep > sleep_quanta - 1)
sleep_quanta = 1;
pq_endmessage_reuse(&buf);
pq_flush();
- /* Recieve packets from client until done. */
+ /* Receive packets from client until done. */
while (HandleUploadManifestPacket(&buf, &offset, ib))
;
*
* We assume that MemoryContextDelete and MemoryContextSetParent won't
* fail, and thus we shouldn't end up bailing out of here in such a way as
- * to leave dangling pointrs.
+ * to leave dangling pointers.
*/
if (uploaded_manifest_mcxt != NULL)
MemoryContextDelete(uploaded_manifest_mcxt);
* The exact size limit that we impose here doesn't really matter --
* most of what's supposed to be in the file is fixed size and quite
* short. However, the length of the backup_label is limited (at least
- * by some parts of the code) to MAXGPATH, so include that value in
+ * by some parts of the code) to MAXPGPATH, so include that value in
* the maximum length that we tolerate.
*/
slurp_file(fd, pathbuf, buf, 10000 + MAXPGPATH);
if (!is_absolute_path(link_target))
pg_fatal("symbolic link \"%s\" is relative", tblspcdir);
- /* Caonicalize the link target. */
+ /* Canonicalize the link target. */
canonicalize_path(link_target);
/*
* we just record the paths within the data directories.
*/
snprintf(ts->old_dir, MAXPGPATH, "%s/%s", pg_tblspc, de->d_name);
- snprintf(ts->new_dir, MAXPGPATH, "%s/pg_tblpc/%s", opt->output,
+ snprintf(ts->new_dir, MAXPGPATH, "%s/pg_tblspc/%s", opt->output,
de->d_name);
ts->in_place = true;
}
# It would be much nicer if we could physically compare the data files, but
# that doesn't really work. The contents of the page hole aren't guaranteed to
# be identical, and there can be other discrepancies as well. To make this work
-# we'd need the equivalent of each AM's rm_mask functon written or at least
+# we'd need the equivalent of each AM's rm_mask function written or at least
# callable from Perl, and that doesn't seem practical.
#
# NB: We're just using the primary's backup directory for scratch space here.
slurp_file($node->backup_dir . '/csum_none/backup_manifest');
my $nocsum_count = (() = $nocsum_manifest =~ /Checksum-Algorithm/mig);
is($nocsum_count, 0,
- "Checksum_Algorithm is not mentioned in no-checksum manifest");
+ "Checksum-Algorithm is not mentioned in no-checksum manifest");
# OK, that's all.
done_testing();
}
/*
- * Encode bytes using two hexademical digits for each one.
+ * Encode bytes using two hexadecimal digits for each one.
*/
static size_t
hex_encode(const uint8 *src, size_t len, char *dst)
* 'chunk_size' is an array storing the allocated size of each chunk.
*
* 'chunk_usage' is an array storing the number of elements used in each
- * chunk. If that value is less than MAX_ENTRIES_PER_CHUNK, the corresonding
+ * chunk. If that value is less than MAX_ENTRIES_PER_CHUNK, the corresponding
* chunk is used as an array; else the corresponding chunk is used as a bitmap.
* When used as a bitmap, the least significant bit of the first array element
* is the status of the lowest-numbered block covered by this chunk.
* table reference file from disk.
*
* total_chunks means the number of chunks for the RelFileLocator/ForkNumber
- * combination that is curently being read, and consumed_chunks is the number
+ * combination that is currently being read, and consumed_chunks is the number
* of those that have been read. (We always read all the information for
* a single chunk at one time, so we don't need to be able to represent the
* state where a chunk has been partially read.)
* malformed. This is not used for I/O errors, which must be handled internally
* by read_callback.
*
- * 'error_callback_arg' is an opaque arguent to be passed to error_callback.
+ * 'error_callback_arg' is an opaque argument to be passed to error_callback.
*/
BlockRefTableReader *
CreateBlockRefTableReader(io_callback_fn read_callback,
/*
* Next, we need to discard any offsets within the chunk that would
- * contain the limit_block. We must handle this differenly depending on
+ * contain the limit_block. We must handle this differently depending on
* whether the chunk that would contain limit_block is a bitmap or an
* array of offsets.
*/
}
/*
- * Mark a block in a given BlkRefTableEntry as known to have been modified.
+ * Mark a block in a given BlockRefTableEntry as known to have been modified.
*/
void
BlockRefTableEntryMarkBlockModified(BlockRefTableEntry *entry,
}
/*
- * Release memory for a BlockRefTablEntry that was created by
+ * Release memory for a BlockRefTableEntry that was created by
* CreateBlockRefTableEntry.
*/
void
*/
/* yyyymmddN */
-#define CATALOG_VERSION_NO 202312071
+#define CATALOG_VERSION_NO 202312211
#endif
proargnames => '{tli,start_lsn,end_lsn}',
prosrc => 'pg_available_wal_summaries' },
{ oid => '8437',
- descr => 'contents of a WAL sumamry file',
+ descr => 'contents of a WAL summary file',
proname => 'pg_wal_summary_contents', prorows => '100',
proretset => 't', provolatile => 'v', proparallel => 's',
prorettype => 'record', proargtypes => 'int8 pg_lsn pg_lsn',