if (ndeletable > 0)
{
- TransactionId latestRemovedXid = InvalidTransactionId;
+ TransactionId snapshotConflictHorizon = InvalidTransactionId;
if (XLogStandbyInfoActive() && RelationNeedsWAL(rel))
- latestRemovedXid =
+ snapshotConflictHorizon =
index_compute_xid_horizon_for_tuples(rel, heapRel, buffer,
deletable, ndeletable);
recptr = gistXLogDelete(buffer,
deletable, ndeletable,
- latestRemovedXid);
+ snapshotConflictHorizon);
PageSetLSN(page, recptr);
}
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid,
+ ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon,
rlocator);
}
* PAGE_REUSE records exist to provide a conflict point when we reuse
* pages in the index via the FSM. That's all they do though.
*
- * latestRemovedXid was the page's deleteXid. The
+ * snapshotConflictHorizon was the page's deleteXid. The
* GlobalVisCheckRemovableFullXid(deleteXid) test in gistPageRecyclable()
* conceptually mirrors the PGPROC->xmin > limitXmin test in
* GetConflictingVirtualXIDs(). Consequently, one XID value achieves the
* same exclusion effect on primary and standby.
*/
if (InHotStandby)
- ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid,
+ ResolveRecoveryConflictWithSnapshotFullXid(xlrec->snapshotConflictHorizon,
xlrec->locator);
}
* Write XLOG record about reuse of a deleted page.
*/
void
-gistXLogPageReuse(Relation rel, BlockNumber blkno, FullTransactionId latestRemovedXid)
+gistXLogPageReuse(Relation rel, BlockNumber blkno, FullTransactionId deleteXid)
{
gistxlogPageReuse xlrec_reuse;
/* XLOG stuff */
xlrec_reuse.locator = rel->rd_locator;
xlrec_reuse.block = blkno;
- xlrec_reuse.latestRemovedFullXid = latestRemovedXid;
+ xlrec_reuse.snapshotConflictHorizon = deleteXid;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec_reuse, SizeOfGistxlogPageReuse);
*/
XLogRecPtr
gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete,
- TransactionId latestRemovedXid)
+ TransactionId snapshotConflictHorizon)
{
gistxlogDelete xlrec;
XLogRecPtr recptr;
- xlrec.latestRemovedXid = latestRemovedXid;
+ xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec.ntodelete = ntodelete;
XLogBeginInsert();
/*
* We need the target-offsets array whether or not we store the whole
- * buffer, to allow us to find the latestRemovedXid on a standby server.
+ * buffer, to allow us to find the snapshotConflictHorizon on a standby
+ * server.
*/
XLogRegisterData((char *) todelete, ntodelete * sizeof(OffsetNumber));
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rlocator);
+ ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon,
+ rlocator);
}
action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer);
if (ndeletable > 0)
{
- TransactionId latestRemovedXid;
+ TransactionId snapshotConflictHorizon;
- latestRemovedXid =
+ snapshotConflictHorizon =
index_compute_xid_horizon_for_tuples(rel, hrel, buf,
deletable, ndeletable);
xl_hash_vacuum_one_page xlrec;
XLogRecPtr recptr;
- xlrec.latestRemovedXid = latestRemovedXid;
+ xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec.ntuples = ndeletable;
XLogBeginInsert();
/*
* We need the target-offsets array whether or not we store the
- * whole buffer, to allow us to find the latestRemovedXid on a
- * standby server.
+ * whole buffer, to allow us to find the snapshotConflictHorizon
+ * on a standby server.
*/
XLogRegisterData((char *) deletable,
ndeletable * sizeof(OffsetNumber));
Page page = BufferGetPage(buffer);
Assert(ntuples > 0);
- Assert(TransactionIdIsValid(FreezeLimit));
+ Assert(TransactionIdIsNormal(FreezeLimit));
START_CRIT_SECTION();
int nplans;
xl_heap_freeze_page xlrec;
XLogRecPtr recptr;
- TransactionId latestRemovedXid;
+ TransactionId snapshotConflictHorizon;
/* Prepare deduplicated representation for use in WAL record */
nplans = heap_xlog_freeze_plan(tuples, ntuples, plans, offsets);
/*
- * latestRemovedXid describes the latest processed XID, whereas
* FreezeLimit is (approximately) the first XID not frozen by VACUUM.
* Back up caller's FreezeLimit to avoid false conflicts when
* FreezeLimit is precisely equal to VACUUM's OldestXmin cutoff.
*/
- latestRemovedXid = FreezeLimit;
- TransactionIdRetreat(latestRemovedXid);
+ snapshotConflictHorizon = FreezeLimit;
+ TransactionIdRetreat(snapshotConflictHorizon);
- xlrec.latestRemovedXid = latestRemovedXid;
+ xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec.nplans = nplans;
XLogBeginInsert();
}
/*
- * If 'tuple' contains any visible XID greater than latestRemovedXid,
- * ratchet forwards latestRemovedXid to the greatest one found.
- * This is used as the basis for generating Hot Standby conflicts, so
- * if a tuple was never visible then removing it should not conflict
- * with queries.
+ * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
+ * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
+ * that caller is in the process of physically removing, e.g. via HOT pruning
+ * or index deletion.
+ *
+ * Caller must initialize its value to InvalidTransactionId, which is
+ * generally interpreted as "definitely no need for a recovery conflict".
+ * Final value must reflect all heap tuples that caller will physically remove
+ * (or remove TID references to) via its ongoing pruning/deletion operation.
+ * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
+ * caller's WAL record) by REDO routine when it replays caller's operation.
*/
void
-HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
- TransactionId *latestRemovedXid)
+HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
+ TransactionId *snapshotConflictHorizon)
{
TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
if (tuple->t_infomask & HEAP_MOVED)
{
- if (TransactionIdPrecedes(*latestRemovedXid, xvac))
- *latestRemovedXid = xvac;
+ if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
+ *snapshotConflictHorizon = xvac;
}
/*
(!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
{
if (xmax != xmin &&
- TransactionIdFollows(xmax, *latestRemovedXid))
- *latestRemovedXid = xmax;
+ TransactionIdFollows(xmax, *snapshotConflictHorizon))
+ *snapshotConflictHorizon = xmax;
}
-
- /* *latestRemovedXid may still be invalid at end */
}
#ifdef USE_PREFETCH
heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
{
/* Initial assumption is that earlier pruning took care of conflict */
- TransactionId latestRemovedXid = InvalidTransactionId;
+ TransactionId snapshotConflictHorizon = InvalidTransactionId;
BlockNumber blkno = InvalidBlockNumber;
Buffer buf = InvalidBuffer;
Page page = NULL;
}
/*
- * Maintain latestRemovedXid value for deletion operation as a whole
- * by advancing current value using heap tuple headers. This is
+ * Maintain snapshotConflictHorizon value for deletion operation as a
+ * whole by advancing current value using heap tuple headers. This is
* loosely based on the logic for pruning a HOT chain.
*/
offnum = ItemPointerGetOffsetNumber(htid);
* LP_DEAD item. This is okay because the earlier pruning
* operation that made the line pointer LP_DEAD in the first place
* must have considered the original tuple header as part of
- * generating its own latestRemovedXid value.
+ * generating its own snapshotConflictHorizon value.
*
* Relying on XLOG_HEAP2_PRUNE records like this is the same
* strategy that index vacuuming uses in all cases. Index VACUUM
- * WAL records don't even have a latestRemovedXid field of their
- * own for this reason.
+ * WAL records don't even have a snapshotConflictHorizon field of
+ * their own for this reason.
*/
if (!ItemIdIsNormal(lp))
break;
!TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
break;
- HeapTupleHeaderAdvanceLatestRemovedXid(htup, &latestRemovedXid);
+ HeapTupleHeaderAdvanceConflictHorizon(htup,
+ &snapshotConflictHorizon);
/*
* If the tuple is not HOT-updated, then we are at the end of this
Assert(finalndeltids > 0 || delstate->bottomup);
delstate->ndeltids = finalndeltids;
- return latestRemovedXid;
+ return snapshotConflictHorizon;
}
/*
* corresponding visibility map block. Both should have already been modified
* and dirtied.
*
+ * snapshotConflictHorizon comes from the largest xmin on the page being
+ * marked all-visible. REDO routine uses it to generate recovery conflicts.
+ *
* If checksums or wal_log_hints are enabled, we may also generate a full-page
* image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
* REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
*/
XLogRecPtr
log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer,
- TransactionId cutoff_xid, uint8 vmflags)
+ TransactionId snapshotConflictHorizon, uint8 vmflags)
{
xl_heap_visible xlrec;
XLogRecPtr recptr;
Assert(BufferIsValid(heap_buffer));
Assert(BufferIsValid(vm_buffer));
- xlrec.cutoff_xid = cutoff_xid;
+ xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec.flags = vmflags;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
* no queries running for which the removed tuples are still visible.
*/
if (InHotStandby)
- ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
+ ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
+ rlocator);
/*
* If we have a full-page image, restore it (using a cleanup lock) and
* rather than killing the transaction outright.
*/
if (InHotStandby)
- ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rlocator);
+ ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
+ rlocator);
/*
* Read the heap page, if it still exists. If the heap file has dropped or
visibilitymap_pin(reln, blkno, &vmbuffer);
visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
- xlrec->cutoff_xid, xlrec->flags);
+ xlrec->snapshotConflictHorizon, xlrec->flags);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
+ ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
+ rlocator);
}
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
bool old_snap_used;
TransactionId new_prune_xid; /* new prune hint value for page */
- TransactionId latestRemovedXid; /* latest xid to be removed by this prune */
+ TransactionId snapshotConflictHorizon; /* latest xid removed */
int nredirected; /* numbers of entries in arrays below */
int ndead;
int nunused;
prstate.old_snap_xmin = old_snap_xmin;
prstate.old_snap_ts = old_snap_ts;
prstate.old_snap_used = false;
- prstate.latestRemovedXid = InvalidTransactionId;
+ prstate.snapshotConflictHorizon = InvalidTransactionId;
prstate.nredirected = prstate.ndead = prstate.nunused = 0;
memset(prstate.marked, 0, sizeof(prstate.marked));
xl_heap_prune xlrec;
XLogRecPtr recptr;
- xlrec.latestRemovedXid = prstate.latestRemovedXid;
+ xlrec.snapshotConflictHorizon = prstate.snapshotConflictHorizon;
xlrec.nredirected = prstate.nredirected;
xlrec.ndead = prstate.ndead;
!HeapTupleHeaderIsHotUpdated(htup))
{
heap_prune_record_unused(prstate, rootoffnum);
- HeapTupleHeaderAdvanceLatestRemovedXid(htup,
- &prstate->latestRemovedXid);
+ HeapTupleHeaderAdvanceConflictHorizon(htup,
+ &prstate->snapshotConflictHorizon);
ndeleted++;
}
if (tupdead)
{
latestdead = offnum;
- HeapTupleHeaderAdvanceLatestRemovedXid(htup,
- &prstate->latestRemovedXid);
+ HeapTupleHeaderAdvanceConflictHorizon(htup,
+ &prstate->snapshotConflictHorizon);
}
else if (!recent_dead)
break;
}
/*
- * Get the latestRemovedXid from the table entries pointed at by the index
- * tuples being deleted using an AM-generic approach.
+ * Get the snapshotConflictHorizon from the table entries pointed to by the
+ * index tuples being deleted using an AM-generic approach.
*
- * This is a table_index_delete_tuples() shim used by index AMs that have
- * simple requirements. These callers only need to consult the tableam to get
- * a latestRemovedXid value, and only expect to delete tuples that are already
- * known deletable. When a latestRemovedXid value isn't needed in index AM's
- * deletion WAL record, it is safe for it to skip calling here entirely.
+ * This is a table_index_delete_tuples() shim used by index AMs that only need
+ * to consult the tableam to get a snapshotConflictHorizon value, and only
+ * expect to delete index tuples that are already known deletable (typically
+ * due to having LP_DEAD bits set). When a snapshotConflictHorizon value
+ * isn't needed in index AM's deletion WAL record, it is safe for it to skip
+ * calling here entirely.
*
* We assume that caller index AM uses the standard IndexTuple representation,
* with table TIDs stored in the t_tid field. We also expect (and assert)
int nitems)
{
TM_IndexDeleteOp delstate;
- TransactionId latestRemovedXid = InvalidTransactionId;
+ TransactionId snapshotConflictHorizon = InvalidTransactionId;
Page ipage = BufferGetPage(ibuf);
IndexTuple itup;
}
/* determine the actual xid horizon */
- latestRemovedXid = table_index_delete_tuples(hrel, &delstate);
+ snapshotConflictHorizon = table_index_delete_tuples(hrel, &delstate);
/* assert tableam agrees that all items are deletable */
Assert(delstate.ndeltids == nitems);
pfree(delstate.deltids);
pfree(delstate.status);
- return latestRemovedXid;
+ return snapshotConflictHorizon;
}
no scan can lose its place from such a deletion. We separate the steps
because we allow LP_DEAD to be set with only a share lock (it's like a
hint bit for a heap tuple), but physically deleting tuples requires an
-exclusive lock. We also need to generate a latestRemovedXid value for
+exclusive lock. We also need to generate a snapshotConflictHorizon for
each deletion operation's WAL record, which requires additional
coordinating with the tableam when the deletion actually takes place.
-(This latestRemovedXid value may be used to generate a recovery conflict
-during subsequent REDO of the record by a standby.)
+(snapshotConflictHorizon value may be used to generate a conflict during
+subsequent REDO of the record by a standby.)
Delaying and batching index tuple deletion like this enables a further
optimization: opportunistic checking of "extra" nearby index tuples
(tuples that are not LP_DEAD-set) when they happen to be very cheap to
check in passing (because we already know that the tableam will be
-visiting their table block to generate a latestRemovedXid value). Any
+visiting their table block to generate a snapshotConflictHorizon). Any
index tuples that turn out to be safe to delete will also be deleted.
Simple deletion will behave as if the extra tuples that actually turn
out to be delete-safe had their LP_DEAD bits set right from the start.
static void _bt_log_reuse_page(Relation rel, BlockNumber blkno,
FullTransactionId safexid);
static void _bt_delitems_delete(Relation rel, Buffer buf,
- TransactionId latestRemovedXid,
+ TransactionId snapshotConflictHorizon,
OffsetNumber *deletable, int ndeletable,
BTVacuumPosting *updatable, int nupdatable);
static char *_bt_delitems_update(BTVacuumPosting *updatable, int nupdatable,
/* XLOG stuff */
xlrec_reuse.locator = rel->rd_locator;
xlrec_reuse.block = blkno;
- xlrec_reuse.latestRemovedFullXid = safexid;
+ xlrec_reuse.snapshotConflictHorizon = safexid;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec_reuse, SizeOfBtreeReusePage);
* (a version that lacks the TIDs that are to be deleted).
*
* We record VACUUMs and b-tree deletes differently in WAL. Deletes must
- * generate their own latestRemovedXid by accessing the table directly,
+ * generate their own snapshotConflictHorizon directly from the tableam,
* whereas VACUUMs rely on the initial VACUUM table scan performing
* WAL-logging that takes care of the issue for the table's indexes
* indirectly. Also, we remove the VACUUM cycle ID from pages, which b-tree
* (a version that lacks the TIDs that are to be deleted).
*
* This is nearly the same as _bt_delitems_vacuum as far as what it does to
- * the page, but it needs its own latestRemovedXid from caller (caller gets
- * this from tableam). This is used by the REDO routine to generate recovery
+ * the page, but it needs its own snapshotConflictHorizon (caller gets this
+ * from tableam). This is used by the REDO routine to generate recovery
* conflicts. The other difference is that only _bt_delitems_vacuum will
* clear page's VACUUM cycle ID.
*/
static void
-_bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestRemovedXid,
+_bt_delitems_delete(Relation rel, Buffer buf,
+ TransactionId snapshotConflictHorizon,
OffsetNumber *deletable, int ndeletable,
BTVacuumPosting *updatable, int nupdatable)
{
XLogRecPtr recptr;
xl_btree_delete xlrec_delete;
- xlrec_delete.latestRemovedXid = latestRemovedXid;
+ xlrec_delete.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec_delete.ndeleted = ndeletable;
xlrec_delete.nupdated = nupdatable;
TM_IndexDeleteOp *delstate)
{
Page page = BufferGetPage(buf);
- TransactionId latestRemovedXid;
+ TransactionId snapshotConflictHorizon;
OffsetNumber postingidxoffnum = InvalidOffsetNumber;
int ndeletable = 0,
nupdatable = 0;
BTVacuumPosting updatable[MaxIndexTuplesPerPage];
/* Use tableam interface to determine which tuples to delete first */
- latestRemovedXid = table_index_delete_tuples(heapRel, delstate);
+ snapshotConflictHorizon = table_index_delete_tuples(heapRel, delstate);
- /* Should not WAL-log latestRemovedXid unless it's required */
- if (!XLogStandbyInfoActive() || !RelationNeedsWAL(rel))
- latestRemovedXid = InvalidTransactionId;
+ /* Should not WAL-log snapshotConflictHorizon unless it's required */
+ if (!XLogStandbyInfoActive())
+ snapshotConflictHorizon = InvalidTransactionId;
/*
* Construct a leaf-page-wise description of what _bt_delitems_delete()
}
/* Physically delete tuples (or TIDs) using deletable (or updatable) */
- _bt_delitems_delete(rel, buf, latestRemovedXid, deletable, ndeletable,
- updatable, nupdatable);
+ _bt_delitems_delete(rel, buf, snapshotConflictHorizon,
+ deletable, ndeletable, updatable, nupdatable);
/* be tidy */
for (int i = 0; i < nupdatable; i++)
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
+ ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
+ rlocator);
}
/*
* xl_btree_reuse_page record at the point that a page is actually recycled
* and reused for an entirely unrelated page inside _bt_split(). These
* records include the same safexid value from the original deleted page,
- * stored in the record's latestRemovedFullXid field.
+ * stored in the record's snapshotConflictHorizon field.
*
* The GlobalVisCheckRemovableFullXid() test in BTPageIsRecyclable() is used
* to determine if it's safe to recycle a page. This mirrors our own test:
xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) XLogRecGetData(record);
if (InHotStandby)
- ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid,
+ ResolveRecoveryConflictWithSnapshotFullXid(xlrec->snapshotConflictHorizon,
xlrec->locator);
}
static void
out_gistxlogPageReuse(StringInfo buf, gistxlogPageReuse *xlrec)
{
- appendStringInfo(buf, "rel %u/%u/%u; blk %u; latestRemovedXid %u:%u",
+ appendStringInfo(buf, "rel %u/%u/%u; blk %u; snapshotConflictHorizon %u:%u",
xlrec->locator.spcOid, xlrec->locator.dbOid,
xlrec->locator.relNumber, xlrec->block,
- EpochFromFullTransactionId(xlrec->latestRemovedFullXid),
- XidFromFullTransactionId(xlrec->latestRemovedFullXid));
+ EpochFromFullTransactionId(xlrec->snapshotConflictHorizon),
+ XidFromFullTransactionId(xlrec->snapshotConflictHorizon));
}
static void
out_gistxlogDelete(StringInfo buf, gistxlogDelete *xlrec)
{
- appendStringInfo(buf, "delete: latestRemovedXid %u, nitems: %u",
- xlrec->latestRemovedXid, xlrec->ntodelete);
+ appendStringInfo(buf, "delete: snapshotConflictHorizon %u, nitems: %u",
+ xlrec->snapshotConflictHorizon, xlrec->ntodelete);
}
static void
{
xl_hash_vacuum_one_page *xlrec = (xl_hash_vacuum_one_page *) rec;
- appendStringInfo(buf, "ntuples %d, latestRemovedXid %u",
+ appendStringInfo(buf, "ntuples %d, snapshotConflictHorizon %u",
xlrec->ntuples,
- xlrec->latestRemovedXid);
+ xlrec->snapshotConflictHorizon);
break;
}
}
{
xl_heap_prune *xlrec = (xl_heap_prune *) rec;
- appendStringInfo(buf, "latestRemovedXid %u nredirected %u ndead %u",
- xlrec->latestRemovedXid,
+ appendStringInfo(buf, "snapshotConflictHorizon %u nredirected %u ndead %u",
+ xlrec->snapshotConflictHorizon,
xlrec->nredirected,
xlrec->ndead);
}
{
xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) rec;
- appendStringInfo(buf, "latestRemovedXid %u nplans %u",
- xlrec->latestRemovedXid, xlrec->nplans);
+ appendStringInfo(buf, "snapshotConflictHorizon %u nplans %u",
+ xlrec->snapshotConflictHorizon, xlrec->nplans);
}
else if (info == XLOG_HEAP2_VISIBLE)
{
xl_heap_visible *xlrec = (xl_heap_visible *) rec;
- appendStringInfo(buf, "cutoff xid %u flags 0x%02X",
- xlrec->cutoff_xid, xlrec->flags);
+ appendStringInfo(buf, "snapshotConflictHorizon %u flags 0x%02X",
+ xlrec->snapshotConflictHorizon, xlrec->flags);
}
else if (info == XLOG_HEAP2_MULTI_INSERT)
{
{
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
- appendStringInfo(buf, "latestRemovedXid %u; ndeleted %u; nupdated %u",
- xlrec->latestRemovedXid, xlrec->ndeleted, xlrec->nupdated);
+ appendStringInfo(buf, "snapshotConflictHorizon %u; ndeleted %u; nupdated %u",
+ xlrec->snapshotConflictHorizon,
+ xlrec->ndeleted, xlrec->nupdated);
break;
}
case XLOG_BTREE_MARK_PAGE_HALFDEAD:
{
xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) rec;
- appendStringInfo(buf, "rel %u/%u/%u; latestRemovedXid %u:%u",
+ appendStringInfo(buf, "rel %u/%u/%u; snapshotConflictHorizon %u:%u",
xlrec->locator.spcOid, xlrec->locator.dbOid,
xlrec->locator.relNumber,
- EpochFromFullTransactionId(xlrec->latestRemovedFullXid),
- XidFromFullTransactionId(xlrec->latestRemovedFullXid));
+ EpochFromFullTransactionId(xlrec->snapshotConflictHorizon),
+ XidFromFullTransactionId(xlrec->snapshotConflictHorizon));
break;
}
case XLOG_BTREE_META_CLEANUP:
{
spgxlogVacuumRedirect *xlrec = (spgxlogVacuumRedirect *) rec;
- appendStringInfo(buf, "ntoplaceholder: %u, firstplaceholder: %u, newestredirectxid: %u",
+ appendStringInfo(buf, "ntoplaceholder: %u, firstplaceholder: %u, snapshotConflictHorizon: %u",
xlrec->nToPlaceholder,
xlrec->firstPlaceholder,
- xlrec->newestRedirectXid);
+ xlrec->snapshotConflictHorizon);
}
break;
}
GlobalVisState *vistest;
xlrec.nToPlaceholder = 0;
- xlrec.newestRedirectXid = InvalidTransactionId;
+ xlrec.snapshotConflictHorizon = InvalidTransactionId;
/* XXX: providing heap relation would allow more pruning */
vistest = GlobalVisTestFor(NULL);
opaque->nPlaceholder++;
/* remember newest XID among the removed redirects */
- if (!TransactionIdIsValid(xlrec.newestRedirectXid) ||
- TransactionIdPrecedes(xlrec.newestRedirectXid, dt->xid))
- xlrec.newestRedirectXid = dt->xid;
+ if (!TransactionIdIsValid(xlrec.snapshotConflictHorizon) ||
+ TransactionIdPrecedes(xlrec.snapshotConflictHorizon, dt->xid))
+ xlrec.snapshotConflictHorizon = dt->xid;
ItemPointerSetInvalid(&dt->pointer);
*/
if (InHotStandby)
{
- if (TransactionIdIsValid(xldata->newestRedirectXid))
- {
- RelFileLocator locator;
+ RelFileLocator locator;
- XLogRecGetBlockTag(record, 0, &locator, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(xldata->newestRedirectXid,
- locator);
- }
+ XLogRecGetBlockTag(record, 0, &locator, NULL, NULL);
+ ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon,
+ locator);
}
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
* GetConflictingVirtualXIDs -- returns an array of currently active VXIDs.
*
* Usage is limited to conflict resolution during recovery on standby servers.
- * limitXmin is supplied as either latestRemovedXid, or InvalidTransactionId
- * in cases where we cannot accurately determine a value for latestRemovedXid.
+ * limitXmin is supplied as either a cutoff with snapshotConflictHorizon
+ * semantics, or InvalidTransactionId in cases where caller cannot accurately
+ * determine a safe snapshotConflictHorizon value.
*
* If limitXmin is InvalidTransactionId then we want to kill everybody,
* so we're not worried if they have a snapshot or not, nor does it really
- * matter what type of lock we hold.
+ * matter what type of lock we hold. Caller must avoid calling here with
+ * snapshotConflictHorizon style cutoffs that were set to InvalidTransactionId
+ * during original execution, since that actually indicates that there is
+ * definitely no need for a recovery conflict (the snapshotConflictHorizon
+ * convention for InvalidTransactionId values is the opposite of our own!).
*
* All callers that are checking xmins always now supply a valid and useful
* value for limitXmin. The limitXmin is always lower than the lowest
}
}
+/*
+ * Generate whatever recovery conflicts are needed to eliminate snapshots that
+ * might see XIDs <= snapshotConflictHorizon as still running.
+ *
+ * snapshotConflictHorizon cutoffs are our standard approach to generating
+ * granular recovery conflicts. Note that InvalidTransactionId values are
+ * interpreted as "definitely don't need any conflicts" here, which is a
+ * general convention that WAL records can (and often do) depend on.
+ */
void
-ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocator locator)
+ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon,
+ RelFileLocator locator)
{
VirtualTransactionId *backends;
* which is sufficient for the deletion operation must take place before
* replay of the deletion record itself).
*/
- if (!TransactionIdIsValid(latestRemovedXid))
+ if (!TransactionIdIsValid(snapshotConflictHorizon))
return;
- backends = GetConflictingVirtualXIDs(latestRemovedXid,
+ backends = GetConflictingVirtualXIDs(snapshotConflictHorizon,
locator.dbOid);
-
ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_SNAPSHOT,
WAIT_EVENT_RECOVERY_CONFLICT_SNAPSHOT,
* FullTransactionId values
*/
void
-ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXid,
+ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId snapshotConflictHorizon,
RelFileLocator locator)
{
/*
uint64 diff;
diff = U64FromFullTransactionId(nextXid) -
- U64FromFullTransactionId(latestRemovedFullXid);
+ U64FromFullTransactionId(snapshotConflictHorizon);
if (diff < MaxTransactionId / 2)
{
- TransactionId latestRemovedXid;
+ TransactionId truncated;
- latestRemovedXid = XidFromFullTransactionId(latestRemovedFullXid);
- ResolveRecoveryConflictWithSnapshot(latestRemovedXid, locator);
+ truncated = XidFromFullTransactionId(snapshotConflictHorizon);
+ ResolveRecoveryConflictWithSnapshot(truncated, locator);
}
}
OffsetNumber downlinkOffset);
extern void gistXLogPageReuse(Relation rel, BlockNumber blkno,
- FullTransactionId latestRemovedXid);
+ FullTransactionId deleteXid);
extern XLogRecPtr gistXLogUpdate(Buffer buffer,
OffsetNumber *todelete, int ntodelete,
Buffer leftchildbuf);
extern XLogRecPtr gistXLogDelete(Buffer buffer, OffsetNumber *todelete,
- int ntodelete, TransactionId latestRemovedXid);
+ int ntodelete, TransactionId snapshotConflictHorizon);
extern XLogRecPtr gistXLogSplit(bool page_is_leaf,
SplitedPageLayout *dist,
*/
typedef struct gistxlogDelete
{
- TransactionId latestRemovedXid;
+ TransactionId snapshotConflictHorizon;
uint16 ntodelete; /* number of deleted offsets */
/*
{
RelFileLocator locator;
BlockNumber block;
- FullTransactionId latestRemovedFullXid;
+ FullTransactionId snapshotConflictHorizon;
} gistxlogPageReuse;
-#define SizeOfGistxlogPageReuse (offsetof(gistxlogPageReuse, latestRemovedFullXid) + sizeof(FullTransactionId))
+#define SizeOfGistxlogPageReuse (offsetof(gistxlogPageReuse, snapshotConflictHorizon) + sizeof(FullTransactionId))
extern void gist_redo(XLogReaderState *record);
extern void gist_desc(StringInfo buf, XLogReaderState *record);
*/
typedef struct xl_hash_vacuum_one_page
{
- TransactionId latestRemovedXid;
+ TransactionId snapshotConflictHorizon;
int ntuples;
/* TARGET OFFSET NUMBERS FOLLOW AT THE END */
*/
typedef struct xl_heap_prune
{
- TransactionId latestRemovedXid;
+ TransactionId snapshotConflictHorizon;
uint16 nredirected;
uint16 ndead;
/* OFFSET NUMBERS are in the block reference 0 */
*/
typedef struct xl_heap_freeze_page
{
- TransactionId latestRemovedXid;
+ TransactionId snapshotConflictHorizon;
uint16 nplans;
/* FREEZE PLANS FOLLOW */
*/
typedef struct xl_heap_visible
{
- TransactionId cutoff_xid;
+ TransactionId snapshotConflictHorizon;
uint8 flags;
} xl_heap_visible;
XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */
} xl_heap_rewrite_mapping;
-extern void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
- TransactionId *latestRemovedXid);
+extern void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
+ TransactionId *snapshotConflictHorizon);
extern void heap_redo(XLogReaderState *record);
extern void heap_desc(StringInfo buf, XLogReaderState *record);
extern void heap_xlog_logical_rewrite(XLogReaderState *r);
extern XLogRecPtr log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer,
- Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags);
+ Buffer vm_buffer,
+ TransactionId snapshotConflictHorizon,
+ uint8 vmflags);
#endif /* HEAPAM_XLOG_H */
{
RelFileLocator locator;
BlockNumber block;
- FullTransactionId latestRemovedFullXid;
+ FullTransactionId snapshotConflictHorizon;
} xl_btree_reuse_page;
#define SizeOfBtreeReusePage (sizeof(xl_btree_reuse_page))
* when btinsert() is called.
*
* The records are very similar. The only difference is that xl_btree_delete
- * has to include a latestRemovedXid field to generate recovery conflicts.
+ * has a snapshotConflictHorizon field to generate recovery conflicts.
* (VACUUM operations can just rely on earlier conflicts generated during
* pruning of the table whose TIDs the to-be-deleted index tuples point to.
* There are also small differences between each REDO routine that we don't go
typedef struct xl_btree_delete
{
- TransactionId latestRemovedXid;
+ TransactionId snapshotConflictHorizon;
uint16 ndeleted;
uint16 nupdated;
{
uint16 nToPlaceholder; /* number of redirects to make placeholders */
OffsetNumber firstPlaceholder; /* first placeholder tuple to remove */
- TransactionId newestRedirectXid; /* newest XID of removed redirects */
+ TransactionId snapshotConflictHorizon; /* newest XID of removed redirects */
/* offsets of redirect tuples to make placeholders follow */
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER];
* marked as deletable. See comments above TM_IndexDelete and comments above
* TM_IndexDeleteOp for full details.
*
- * Returns a latestRemovedXid transaction ID that caller generally places in
+ * Returns a snapshotConflictHorizon transaction ID that caller places in
* its index deletion WAL record. This might be used during subsequent REDO
* of the WAL record when in Hot Standby mode -- a recovery conflict for the
* index deletion operation might be required on the standby.
extern void InitRecoveryTransactionEnvironment(void);
extern void ShutdownRecoveryTransactionEnvironment(void);
-extern void ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid,
+extern void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon,
RelFileLocator locator);
-extern void ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXid,
+extern void ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId snapshotConflictHorizon,
RelFileLocator locator);
extern void ResolveRecoveryConflictWithTablespace(Oid tsid);
extern void ResolveRecoveryConflictWithDatabase(Oid dbid);