-
- vacuum_cleanup_index_scale_factor (floating point)
-
-
vacuum_cleanup_index_scale_factor
- configuration parameter
-
-
-
- Specifies the fraction of the total number of heap tuples counted in
- the previous statistics collection that can be inserted without
- incurring an index scan at the VACUUM cleanup stage.
- This setting currently applies to B-tree indexes only.
-
-
- If no tuples were deleted from the heap, B-tree indexes are still
- scanned at the VACUUM cleanup stage when the
- index's statistics are stale. Index statistics are considered
- stale if the number of newly inserted tuples exceeds the
- vacuum_cleanup_index_scale_factor
- fraction of the total number of heap tuples detected by the previous
- statistics collection. The total number of heap tuples is stored in
- the index meta-page. Note that the meta-page does not include this data
- until VACUUM finds no dead tuples, so B-tree index
- scan at the cleanup stage can only be skipped if the second and
- subsequent VACUUM cycles detect no dead tuples.
-
-
- The value can range from 0 to
- 10000000000.
- When vacuum_cleanup_index_scale_factor is set to
- 0, index scans are never skipped during
- VACUUM cleanup. The default value is 0.1.
-
-
-
-
-
bytea_output (enum)
-
-
- vacuum_cleanup_index_scale_factor (floating point)
-
-
vacuum_cleanup_index_scale_factor
- storage parameter
-
-
-
- Per-index value for .
-
-
-
},
0, -1.0, DBL_MAX
},
- {
- {
- "vacuum_cleanup_index_scale_factor",
- "Number of tuple inserts prior to index cleanup as a fraction of reltuples.",
- RELOPT_KIND_BTREE,
- ShareUpdateExclusiveLock
- },
- -1, 0.0, 1e10
- },
/* list terminator */
{{NULL}}
};
xlmeta.fastroot = metad->btm_fastroot;
xlmeta.fastlevel = metad->btm_fastlevel;
xlmeta.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
- xlmeta.last_cleanup_num_heap_tuples =
- metad->btm_last_cleanup_num_heap_tuples;
xlmeta.allequalimage = metad->btm_allequalimage;
XLogRegisterBuffer(2, metabuf,
md.fastroot = rootblknum;
md.fastlevel = metad->btm_level;
md.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
- md.last_cleanup_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples;
md.allequalimage = metad->btm_allequalimage;
XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata));
* _bt_vacuum_needs_cleanup() to decide whether or not a btvacuumscan()
* call should go ahead for an entire VACUUM operation.
*
- * See btvacuumcleanup() and _bt_vacuum_needs_cleanup() for details of
- * the two fields that we maintain here.
- *
- * The information that we maintain for btvacuumcleanup() describes the
- * state of the index (as well as the table it indexes) just _after_ the
- * ongoing VACUUM operation. The next _bt_vacuum_needs_cleanup() call
- * will consider the information we saved for it during the next VACUUM
- * operation (assuming that there will be no btbulkdelete() call during
- * the next VACUUM operation -- if there is then the question of skipping
- * btvacuumscan() doesn't even arise).
+ * See btvacuumcleanup() and _bt_vacuum_needs_cleanup() for the
+ * definition of num_delpages.
*/
void
-_bt_set_cleanup_info(Relation rel, BlockNumber num_delpages,
- float8 num_heap_tuples)
+_bt_set_cleanup_info(Relation rel, BlockNumber num_delpages)
{
Buffer metabuf;
Page metapg;
BTMetaPageData *metad;
- bool rewrite = false;
- XLogRecPtr recptr;
/*
* On-disk compatibility note: The btm_last_cleanup_num_delpages metapage
* in reality there are only one or two. The worst that can happen is
* that there will be a call to btvacuumscan a little earlier, which will
* set btm_last_cleanup_num_delpages to a sane value when we're called.
+ *
+ * Note also that the metapage's btm_last_cleanup_num_heap_tuples field is
+ * no longer used as of PostgreSQL 14. We set it to -1.0 on rewrite, just
+ * to be consistent.
*/
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
metapg = BufferGetPage(metabuf);
metad = BTPageGetMeta(metapg);
- /* Always dynamically upgrade index/metapage when BTREE_MIN_VERSION */
- if (metad->btm_version < BTREE_NOVAC_VERSION)
- rewrite = true;
- else if (metad->btm_last_cleanup_num_delpages != num_delpages)
- rewrite = true;
- else if (metad->btm_last_cleanup_num_heap_tuples != num_heap_tuples)
- rewrite = true;
-
- if (!rewrite)
+ /* Don't miss chance to upgrade index/metapage when BTREE_MIN_VERSION */
+ if (metad->btm_version >= BTREE_NOVAC_VERSION &&
+ metad->btm_last_cleanup_num_delpages == num_delpages)
{
+ /* Usually means index continues to have num_delpages of 0 */
_bt_relbuf(rel, metabuf);
return;
}
/* update cleanup-related information */
metad->btm_last_cleanup_num_delpages = num_delpages;
- metad->btm_last_cleanup_num_heap_tuples = num_heap_tuples;
+ metad->btm_last_cleanup_num_heap_tuples = -1.0;
MarkBufferDirty(metabuf);
/* write wal record if needed */
if (RelationNeedsWAL(rel))
{
xl_btree_metadata md;
+ XLogRecPtr recptr;
XLogBeginInsert();
XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
md.fastroot = metad->btm_fastroot;
md.fastlevel = metad->btm_fastlevel;
md.last_cleanup_num_delpages = num_delpages;
- md.last_cleanup_num_heap_tuples = num_heap_tuples;
md.allequalimage = metad->btm_allequalimage;
XLogRegisterBufData(0, (char *) &md, sizeof(xl_btree_metadata));
md.fastroot = rootblkno;
md.fastlevel = 0;
md.last_cleanup_num_delpages = 0;
- md.last_cleanup_num_heap_tuples = -1.0;
md.allequalimage = metad->btm_allequalimage;
XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata));
xlmeta.fastroot = metad->btm_fastroot;
xlmeta.fastlevel = metad->btm_fastlevel;
xlmeta.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
- xlmeta.last_cleanup_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples;
xlmeta.allequalimage = metad->btm_allequalimage;
XLogRegisterBufData(4, (char *) &xlmeta, sizeof(xl_btree_metadata));
Buffer metabuf;
Page metapg;
BTMetaPageData *metad;
- BTOptions *relopts;
- float8 cleanup_scale_factor;
uint32 btm_version;
BlockNumber prev_num_delpages;
- float8 prev_num_heap_tuples;
/*
* Copy details from metapage to local variables quickly.
}
prev_num_delpages = metad->btm_last_cleanup_num_delpages;
- prev_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples;
_bt_relbuf(info->index, metabuf);
- /*
- * If the underlying table has received a sufficiently high number of
- * insertions since the last VACUUM operation that called btvacuumscan(),
- * then have the current VACUUM operation call btvacuumscan() now. This
- * happens when the statistics are deemed stale.
- *
- * XXX: We should have a more principled way of determining what
- * "staleness" means. The vacuum_cleanup_index_scale_factor GUC (and the
- * index-level storage param) seem hard to tune in a principled way.
- */
- relopts = (BTOptions *) info->index->rd_options;
- cleanup_scale_factor = (relopts &&
- relopts->vacuum_cleanup_index_scale_factor >= 0)
- ? relopts->vacuum_cleanup_index_scale_factor
- : vacuum_cleanup_index_scale_factor;
-
- if (cleanup_scale_factor <= 0 ||
- info->num_heap_tuples < 0 ||
- prev_num_heap_tuples <= 0 ||
- (info->num_heap_tuples - prev_num_heap_tuples) /
- prev_num_heap_tuples >= cleanup_scale_factor)
- return true;
-
/*
* Trigger cleanup in rare cases where prev_num_delpages exceeds 5% of the
* total size of the index. We can reasonably expect (though are not
/*
* Since we aren't going to actually delete any leaf items, there's no
- * need to go through all the vacuum-cycle-ID pushups here
+ * need to go through all the vacuum-cycle-ID pushups here.
+ *
+ * Posting list tuples are a source of inaccuracy for cleanup-only
+ * scans. btvacuumscan() will assume that the number of index tuples
+ * from each page can be used as num_index_tuples, even though
+ * num_index_tuples is supposed to represent the number of TIDs in the
+ * index. This naive approach can underestimate the number of tuples
+ * in the index significantly.
+ *
+ * We handle the problem by making num_index_tuples an estimate in
+ * cleanup-only case.
*/
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
btvacuumscan(info, stats, NULL, NULL, 0);
+ stats->estimated_count = true;
}
/*
* By here, we know for sure that this VACUUM operation won't be skipping
- * its btvacuumscan() call. Maintain the count of the current number of
- * heap tuples in the metapage. Also maintain the num_delpages value.
+ * its btvacuumscan() call. Maintain num_delpages value in metapage.
* This information will be used by _bt_vacuum_needs_cleanup() during
* future VACUUM operations that don't need to call btbulkdelete().
*
* num_delpages is the number of deleted pages now in the index that were
* not safe to place in the FSM to be recycled just yet. We expect that
* it will almost certainly be possible to place all of these pages in the
- * FSM during the next VACUUM operation. That factor alone might cause
- * _bt_vacuum_needs_cleanup() to force the next VACUUM to proceed with a
- * btvacuumscan() call.
- *
- * Note: We must delay the _bt_set_cleanup_info() call until this late
- * stage of VACUUM (the btvacuumcleanup() phase), to keep num_heap_tuples
- * accurate. The btbulkdelete()-time num_heap_tuples value is generally
- * just pg_class.reltuples for the heap relation _before_ VACUUM began.
- * In general cleanup info should describe the state of the index/table
- * _after_ VACUUM finishes.
+ * FSM during the next VACUUM operation. _bt_vacuum_needs_cleanup() will
+ * force the next VACUUM to consider this before allowing btvacuumscan()
+ * to be skipped entirely.
*/
Assert(stats->pages_deleted >= stats->pages_free);
num_delpages = stats->pages_deleted - stats->pages_free;
- _bt_set_cleanup_info(info->index, num_delpages, info->num_heap_tuples);
+ _bt_set_cleanup_info(info->index, num_delpages);
/*
* It's quite possible for us to be fooled by concurrent page splits into
* double-counting some index tuples, so disbelieve any total that exceeds
* the underlying heap's count ... if we know that accurately. Otherwise
* this might just make matters worse.
- *
- * Posting list tuples are another source of inaccuracy. Cleanup-only
- * btvacuumscan calls assume that the number of index tuples can be used
- * as num_index_tuples, even though num_index_tuples is supposed to
- * represent the number of TIDs in the index. This naive approach can
- * underestimate the number of tuples in the index.
*/
if (!info->estimated_count)
{
* pages in the index at the end of the VACUUM command.)
*/
stats->num_pages = 0;
- stats->estimated_count = false;
stats->num_index_tuples = 0;
stats->pages_deleted = 0;
stats->pages_free = 0;
* We don't count the number of live TIDs during cleanup-only calls to
* btvacuumscan (i.e. when callback is not set). We count the number
* of index tuples directly instead. This avoids the expense of
- * directly examining all of the tuples on each page.
+ * directly examining all of the tuples on each page. VACUUM will
+ * treat num_index_tuples as an estimate in cleanup-only case, so it
+ * doesn't matter that this underestimates num_index_tuples
+ * significantly in some cases.
*/
if (minoff > maxoff)
attempt_pagedel = (blkno == scanblkno);
{
static const relopt_parse_elt tab[] = {
{"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
- {"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
- offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
{"deduplicate_items", RELOPT_TYPE_BOOL,
offsetof(BTOptions, deduplicate_items)}
/* Cannot log BTREE_MIN_VERSION index metapage without upgrade */
Assert(md->btm_version >= BTREE_NOVAC_VERSION);
md->btm_last_cleanup_num_delpages = xlrec->last_cleanup_num_delpages;
- md->btm_last_cleanup_num_heap_tuples = xlrec->last_cleanup_num_heap_tuples;
+ md->btm_last_cleanup_num_heap_tuples = -1.0;
md->btm_allequalimage = xlrec->allequalimage;
pageop = (BTPageOpaque) PageGetSpecialPointer(metapg);
xlrec = (xl_btree_metadata *) XLogRecGetBlockData(record, 0,
NULL);
- appendStringInfo(buf, "last_cleanup_num_delpages %u; last_cleanup_num_heap_tuples: %f",
- xlrec->last_cleanup_num_delpages,
- xlrec->last_cleanup_num_heap_tuples);
+ appendStringInfo(buf, "last_cleanup_num_delpages %u",
+ xlrec->last_cleanup_num_delpages);
break;
}
}
int VacuumCostBalance = 0; /* working state for vacuum */
bool VacuumCostActive = false;
-
-double vacuum_cleanup_index_scale_factor;
NULL, NULL, NULL
},
- {
- {"vacuum_cleanup_index_scale_factor", PGC_USERSET, CLIENT_CONN_STATEMENT,
- gettext_noop("Number of tuple inserts prior to index cleanup as a fraction of reltuples."),
- NULL
- },
- &vacuum_cleanup_index_scale_factor,
- 0.1, 0.0, 1e10,
- NULL, NULL, NULL
- },
-
{
{"log_statement_sample_rate", PGC_SUSET, LOGGING_WHEN,
gettext_noop("Fraction of statements exceeding log_min_duration_sample to be logged."),
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
-#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
- # before index cleanup, 0 always performs
- # index cleanup
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
/* ALTER INDEX SET|RESET ( */
else if (Matches("ALTER", "INDEX", MatchAny, "RESET", "("))
COMPLETE_WITH("fillfactor",
- "vacuum_cleanup_index_scale_factor", "deduplicate_items", /* BTREE */
+ "deduplicate_items", /* BTREE */
"fastupdate", "gin_pending_list_limit", /* GIN */
"buffering", /* GiST */
"pages_per_range", "autosummarize" /* BRIN */
);
else if (Matches("ALTER", "INDEX", MatchAny, "SET", "("))
COMPLETE_WITH("fillfactor =",
- "vacuum_cleanup_index_scale_factor =", "deduplicate_items =", /* BTREE */
+ "deduplicate_items =", /* BTREE */
"fastupdate =", "gin_pending_list_limit =", /* GIN */
"buffering =", /* GiST */
"pages_per_range =", "autosummarize =" /* BRIN */
/* number of deleted, non-recyclable pages during last cleanup */
uint32 btm_last_cleanup_num_delpages;
- /* number of heap tuples during last cleanup */
+ /* number of heap tuples during last cleanup (deprecated) */
float8 btm_last_cleanup_num_heap_tuples;
bool btm_allequalimage; /* are all columns "equalimage"? */
{
int32 varlena_header_; /* varlena header (do not touch directly!) */
int fillfactor; /* page fill factor in percent (0..100) */
- /* fraction of newly inserted tuples needed to trigger index cleanup */
- float8 vacuum_cleanup_index_scale_factor;
bool deduplicate_items; /* Try to deduplicate items? */
} BTOptions;
*/
extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
bool allequalimage);
-extern void _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages,
- float8 num_heap_tuples);
+extern void _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages);
extern void _bt_upgrademetapage(Page page);
extern Buffer _bt_getroot(Relation rel, int access);
extern Buffer _bt_gettrueroot(Relation rel);
BlockNumber fastroot;
uint32 fastlevel;
uint32 last_cleanup_num_delpages;
- float8 last_cleanup_num_heap_tuples;
bool allequalimage;
} xl_btree_metadata;
/*
* Each page of XLOG file has a header like this:
*/
-#define XLOG_PAGE_MAGIC 0xD10A /* can be used as WAL version indicator */
+#define XLOG_PAGE_MAGIC 0xD10B /* can be used as WAL version indicator */
typedef struct XLogPageHeaderData
{
extern int VacuumCostBalance;
extern bool VacuumCostActive;
-extern double vacuum_cleanup_index_scale_factor;
-
/* in tcop/postgres.c */
create index btree_tall_idx on btree_tall_tbl (t, id) with (fillfactor = 10);
insert into btree_tall_tbl select g, repeat('x', 250)
from generate_series(1, 130) g;
---
--- Test vacuum_cleanup_index_scale_factor
---
--- Simple create
-create table btree_test(a int);
-create index btree_idx1 on btree_test(a) with (vacuum_cleanup_index_scale_factor = 40.0);
-select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass;
- reloptions
-------------------------------------------
- {vacuum_cleanup_index_scale_factor=40.0}
-(1 row)
-
--- Fail while setting improper values
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = -10.0);
-ERROR: value -10.0 out of bounds for option "vacuum_cleanup_index_scale_factor"
-DETAIL: Valid values are between "0.000000" and "10000000000.000000".
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 100.0);
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 'string');
-ERROR: invalid value for floating point option "vacuum_cleanup_index_scale_factor": string
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = true);
-ERROR: invalid value for floating point option "vacuum_cleanup_index_scale_factor": true
--- Simple ALTER INDEX
-alter index btree_idx1 set (vacuum_cleanup_index_scale_factor = 70.0);
-select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass;
- reloptions
-------------------------------------------
- {vacuum_cleanup_index_scale_factor=70.0}
-(1 row)
-
--
-- Test for multilevel page deletion
--
insert into btree_tall_tbl select g, repeat('x', 250)
from generate_series(1, 130) g;
---
--- Test vacuum_cleanup_index_scale_factor
---
-
--- Simple create
-create table btree_test(a int);
-create index btree_idx1 on btree_test(a) with (vacuum_cleanup_index_scale_factor = 40.0);
-select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass;
-
--- Fail while setting improper values
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = -10.0);
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 100.0);
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 'string');
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = true);
-
--- Simple ALTER INDEX
-alter index btree_idx1 set (vacuum_cleanup_index_scale_factor = 70.0);
-select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass;
-
--
-- Test for multilevel page deletion
--