From 9e189f97e9b447e20ad60dbd9e8e183c6f4f19ff Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Sun, 28 Mar 2021 20:55:55 -0700 Subject: [PATCH v11 2/2] Bypass index vacuuming in some cases. Bypass index vacuuming in two cases: The case where there are almost no dead tuples in indexes, as an optimization, and the case where a table's relfrozenxid is dangerously far in the past, as a failsafe to avoid wraparound failure. The failsafe is controlled by two new GUCs: vacuum_failsafe_age, and vacuum_multixact_failsafe_age. These specify the age at which VACUUM should take extraordinary measures in order to advance relfrozenxid and/or relminmxid before a system-wide wraparound failure takes place. Note also that the failsafe has VACUUM stop applying any cost-based delay that may be in affect. Author: Masahiko Sawada Author: Peter Geoghegan Discussion: https://postgr.es/m/CAD21AoD0SkE11fMw4jD4RENAwBMcw1wasVnwpJVw3tVqPOQgAw@mail.gmail.com Discussion: https://postgr.es/m/CAH2-WzmkebqPd4MVGuPTOS9bMFvp9MDs5cRTCOsv1rQJ3jCbXw@mail.gmail.com --- src/include/commands/vacuum.h | 4 + src/backend/access/heap/vacuumlazy.c | 306 +++++++++++++++++- src/backend/commands/vacuum.c | 64 ++++ src/backend/utils/misc/guc.c | 25 +- src/backend/utils/misc/postgresql.conf.sample | 2 + doc/src/sgml/config.sgml | 66 ++++ 6 files changed, 449 insertions(+), 18 deletions(-) diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index d029da5ac0..9179ad223f 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -235,6 +235,8 @@ extern int vacuum_freeze_min_age; extern int vacuum_freeze_table_age; extern int vacuum_multixact_freeze_min_age; extern int vacuum_multixact_freeze_table_age; +extern int vacuum_failsafe_age; +extern int vacuum_multixact_failsafe_age; /* Variables for cost-based parallel vacuum */ extern pg_atomic_uint32 *VacuumSharedCostBalance; @@ -270,6 +272,8 @@ extern void vacuum_set_xid_limits(Relation rel, TransactionId *xidFullScanLimit, MultiXactId *multiXactCutoff, MultiXactId *mxactFullScanLimit); +extern bool vacuum_xid_limit_emergency(TransactionId relfrozenxid, + MultiXactId relminmxid); extern void vac_update_datfrozenxid(void); extern void vacuum_delay_point(void); extern bool vacuum_is_relation_owner(Oid relid, Form_pg_class reltuple, diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 1d55d0ecf9..a27cdf1eb0 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -103,6 +103,19 @@ #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */ #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */ +/* + * Threshold that controls whether we bypass index vacuuming and heap + * vacuuming as an optimization + */ +#define BYPASS_THRESHOLD_PAGES 0.02 /* i.e. 2% of rel_pages */ + +/* + * When a table is small (i.e. smaller than this), save cycles by avoiding + * repeated emergency fail safe checks + */ +#define BYPASS_EMERGENCY_MIN_PAGES \ + ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ)) + /* * When a table has no indexes, vacuum the FSM after every 8GB, approximately * (it won't be exact because we only vacuum FSM after processing a heap page @@ -299,6 +312,7 @@ typedef struct LVRelState /* Do index vacuuming/cleanup? */ bool do_index_vacuuming; bool do_index_cleanup; + bool do_failsafe_speedup; /* Buffer access strategy and parallel state */ BufferAccessStrategy bstrategy; @@ -392,13 +406,14 @@ static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, GlobalVisState *vistest, LVPagePruneState *prunestate); -static void lazy_vacuum(LVRelState *vacrel); -static void lazy_vacuum_all_indexes(LVRelState *vacrel); +static void lazy_vacuum(LVRelState *vacrel, bool onecall); +static bool lazy_vacuum_all_indexes(LVRelState *vacrel); static void lazy_vacuum_heap_rel(LVRelState *vacrel); static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int tupindex, Buffer *vmbuffer); static bool lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel); +static bool should_speedup_failsafe(LVRelState *vacrel); static void do_parallel_lazy_vacuum_all_indexes(LVRelState *vacrel); static void do_parallel_lazy_cleanup_all_indexes(LVRelState *vacrel); static void do_parallel_vacuum_or_cleanup(LVRelState *vacrel, int nworkers); @@ -544,6 +559,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, &vacrel->indrels); vacrel->do_index_vacuuming = true; vacrel->do_index_cleanup = true; + vacrel->do_failsafe_speedup = false; if (params->index_cleanup == VACOPT_TERNARY_DISABLED) { vacrel->do_index_vacuuming = false; @@ -749,6 +765,29 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, (long long) VacuumPageHit, (long long) VacuumPageMiss, (long long) VacuumPageDirty); + if (vacrel->rel_pages > 0) + { + msgfmt = _(" %u pages from table (%.2f%% of total) had %lld dead item identifiers removed\n"); + + if (vacrel->nindexes == 0 || (vacrel->do_index_vacuuming && + vacrel->num_index_scans == 0)) + appendStringInfo(&buf, _("index scan not needed:")); + else if (vacrel->do_index_vacuuming && vacrel->num_index_scans > 0) + appendStringInfo(&buf, _("index scan needed:")); + else + { + msgfmt = _(" %u pages from table (%.2f%% of total) have %lld dead item identifiers\n"); + + if (!vacrel->do_failsafe_speedup) + appendStringInfo(&buf, _("index scan bypassed:")); + else + appendStringInfo(&buf, _("index scan bypassed due to emergency:")); + } + appendStringInfo(&buf, msgfmt, + vacrel->lpdead_item_pages, + 100.0 * vacrel->lpdead_item_pages / vacrel->rel_pages, + (long long) vacrel->lpdead_items); + } for (int i = 0; i < vacrel->nindexes; i++) { IndexBulkDeleteResult *istat = vacrel->indstats[i]; @@ -839,7 +878,8 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) next_fsm_block_to_vacuum; PGRUsage ru0; Buffer vmbuffer = InvalidBuffer; - bool skipping_blocks; + bool skipping_blocks, + have_vacuumed_indexes = false; StringInfoData buf; const int initprog_index[] = { PROGRESS_VACUUM_PHASE, @@ -975,6 +1015,12 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) else skipping_blocks = false; + /* + * Before beginning heap scan, check if it's already necessary to apply + * fail safe speedup + */ + should_speedup_failsafe(vacrel); + for (blkno = 0; blkno < nblocks; blkno++) { Buffer buf; @@ -1091,7 +1137,8 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) } /* Remove the collected garbage tuples from table and indexes */ - lazy_vacuum(vacrel); + lazy_vacuum(vacrel, false); + have_vacuumed_indexes = true; /* * Vacuum the Free Space Map to make newly-freed space visible on @@ -1311,12 +1358,17 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) * Periodically perform FSM vacuuming to make newly-freed * space visible on upper FSM pages. Note we have not yet * performed FSM processing for blkno. + * + * This is also a good time to call should_speedup_failsafe(), + * since we also don't want to do that too frequently or too + * infrequently. */ if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES) { FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno); next_fsm_block_to_vacuum = blkno; + should_speedup_failsafe(vacrel); } /* @@ -1450,6 +1502,14 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) * make available in cases where it's possible to truncate the * page's line pointer array. * + * Note: It's not in fact 100% certain that we really will call + * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip + * index vacuuming (and so must skip heap vacuuming). This is + * deemed okay because it only happens in emergencies, or when + * there is very little free space anyway. (Besides, we start + * recording free space in the FSM once index vacuuming has been + * abandoned.) + * * Note: The one-pass (no indexes) case is only supposed to make * it this far when there were no LP_DEAD items during pruning. */ @@ -1493,13 +1553,12 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) } /* If any tuples need to be deleted, perform final vacuum cycle */ - /* XXX put a threshold on min number of tuples here? */ if (dead_tuples->num_tuples > 0) - lazy_vacuum(vacrel); + lazy_vacuum(vacrel, !have_vacuumed_indexes); /* * Vacuum the remainder of the Free Space Map. We must do this whether or - * not there were indexes. + * not there were indexes, and whether or not we bypassed index vacuuming. */ if (blkno > next_fsm_block_to_vacuum) FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno); @@ -1526,6 +1585,16 @@ lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive) * If table has no indexes and at least one heap pages was vacuumed, make * log report that lazy_vacuum_heap_rel would've made had there been * indexes (having indexes implies using the two pass strategy). + * + * We deliberately don't do this in the case where there are indexes but + * index vacuuming was bypassed. We make a similar report at the point + * that index vacuuming is bypassed, but that's actually quite different + * in one important sense: it shows information about work we _haven't_ + * done. + * + * log_autovacuum output does things differently; it consistently presents + * information about LP_DEAD items for the VACUUM as a whole. We always + * report on each round of index and heap vacuuming separately, though. */ if (vacrel->nindexes == 0 && vacrel->lpdead_item_pages > 0) ereport(elevel, @@ -1953,10 +2022,19 @@ retry: /* * Remove the collected garbage tuples from the table and its indexes. + * + * We may choose to bypass index vacuuming at this point. + * + * In rare emergencies, the ongoing VACUUM operation can be made to skip both + * index vacuuming and index cleanup at the point we're called. This avoids + * having the whole system refuse to allocate further XIDs/MultiXactIds due to + * wraparound. */ static void -lazy_vacuum(LVRelState *vacrel) +lazy_vacuum(LVRelState *vacrel, bool onecall) { + bool do_bypass_optimization; + /* Should not end up here with no indexes */ Assert(vacrel->nindexes > 0); Assert(!IsParallelWorker()); @@ -1964,16 +2042,102 @@ lazy_vacuum(LVRelState *vacrel) if (!vacrel->do_index_vacuuming) { - Assert(!vacrel->do_index_cleanup); vacrel->dead_tuples->num_tuples = 0; return; } - /* Okay, we're going to do index vacuuming */ - lazy_vacuum_all_indexes(vacrel); + /* + * Consider bypassing index vacuuming (and heap vacuuming) entirely. + * + * We currently only do this in cases where the number of LP_DEAD items + * for the entire VACUUM operation is close to zero. This avoids sharp + * discontinuities in the duration and overhead of successive VACUUM + * operations that run against the same table with a fixed workload. + * Ideally, successive VACUUM operations will behave as if there are + * exactly zero LP_DEAD items in cases where there are close to zero. + * + * This is likely to be helpful with a table that is continually affected + * by UPDATEs that can mostly apply the HOT optimization, but occasionally + * have small aberrations that lead to just a few heap pages retaining + * only one or two LP_DEAD items. This is pretty common; even when the + * DBA goes out of their way to make UPDATEs use HOT, it is practically + * impossible to predict whether HOT will be applied in 100% of cases. + * It's far easier to ensure that 99%+ of all UPDATEs against a table use + * HOT through careful tuning. + */ + do_bypass_optimization = false; + if (onecall && vacrel->rel_pages > 0) + { + BlockNumber threshold; - /* Remove tuples from heap */ - lazy_vacuum_heap_rel(vacrel); + Assert(vacrel->num_index_scans == 0); + Assert(vacrel->lpdead_items == vacrel->dead_tuples->num_tuples); + Assert(vacrel->do_index_vacuuming); + Assert(vacrel->do_index_cleanup); + + /* + * This crossover point at which we'll start to do index vacuuming is + * expressed as a percentage of the total number of heap pages in the + * table that are known to have at least one LP_DEAD item. This is + * much more important than the total number of LP_DEAD items, since + * it's a proxy for the number of heap pages whose visibility map bits + * cannot be set on account of bypassing index and heap vacuuming. + * + * We apply one further precautionary test: the space currently used + * to store the TIDs (TIDs that now all point to LP_DEAD items) must + * not exceed 32MB. This limits the risk that we will bypass index + * vacuuming again and again until eventually there is a VACUUM whose + * dead_tuples space is not CPU cache resident. + */ + threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES; + do_bypass_optimization = + (vacrel->lpdead_item_pages < threshold && + vacrel->lpdead_items < MAXDEADTUPLES(32L * 1024L * 1024L)); + } + + if (do_bypass_optimization) + { + /* + * There are almost zero TIDs. Behave as if there were precisely + * zero: bypass index vacuuming, but do index cleanup. + * + * We expect that the ongoing VACUUM operation will finish very + * quickly, so there is no point in considering speeding up as a + * failsafe against wraparound failure. (Index cleanup is expected to + * finish very quickly in cases where there were no ambulkdelete() + * calls.) + */ + vacrel->do_index_vacuuming = false; + ereport(elevel, + (errmsg("\"%s\": index scan bypassed: %u pages from table (%.2f%% of total) have %lld dead item identifiers", + vacrel->relname, vacrel->rel_pages, + 100.0 * vacrel->lpdead_item_pages / vacrel->rel_pages, + (long long) vacrel->lpdead_items))); + } + else if (lazy_vacuum_all_indexes(vacrel)) + { + /* + * We successfully completed a round of index vacuuming. Do related + * heap vacuuming now. + */ + lazy_vacuum_heap_rel(vacrel); + } + else + { + /* + * Emergency case. + * + * we attempted index vacuuming, but didn't finish a full round/full + * index scan. This happens when relfrozenxid or relminmxid is too + * far in the past. + * + * From this point on the VACUUM operation will do no further index + * vacuuming or heap vacuuming. It will do any remaining pruning that + * may be required, plus other heap-related and relation-level + * maintenance tasks. But that's it. + */ + Assert(vacrel->do_failsafe_speedup); + } /* * Forget the now-vacuumed tuples -- just press on @@ -1983,10 +2147,17 @@ lazy_vacuum(LVRelState *vacrel) /* * lazy_vacuum_all_indexes() -- Main entry for index vacuuming + * + * Returns true in the common case when all indexes were successfully + * vacuumed. Returns false in rare cases where we determined that the ongoing + * VACUUM operation is at risk of taking too long to finish, leading to + * wraparound failure. */ -static void +static bool lazy_vacuum_all_indexes(LVRelState *vacrel) { + bool allindexes = true; + Assert(!IsParallelWorker()); Assert(vacrel->nindexes > 0); Assert(vacrel->do_index_vacuuming); @@ -1994,6 +2165,13 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) Assert(TransactionIdIsNormal(vacrel->relfrozenxid)); Assert(MultiXactIdIsValid(vacrel->relminmxid)); + /* Precheck for XID wraparound emergencies */ + if (should_speedup_failsafe(vacrel)) + { + /* Wraparound emergency -- don't even start an index scan */ + return false; + } + /* Report that we are now vacuuming indexes */ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_VACUUM_INDEX); @@ -2008,26 +2186,50 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) vacrel->indstats[idx] = lazy_vacuum_one_index(indrel, istat, vacrel->old_live_tuples, vacrel); + + if (should_speedup_failsafe(vacrel)) + { + /* Wraparound emergency -- end current index scan */ + allindexes = false; + break; + } } } else { /* Outsource everything to parallel variant */ do_parallel_lazy_vacuum_all_indexes(vacrel); + + /* + * Do a postcheck to consider applying wraparound failsafe now. Note + * that parallel VACUUM only gets the precheck and this postcheck. + */ + if (should_speedup_failsafe(vacrel)) + allindexes = false; } /* * We delete all LP_DEAD items from the first heap pass in all indexes on - * each call here. This makes the next call to lazy_vacuum_heap_rel() - * safe. + * each call here (except calls where we choose to do the fail safe). + * This makes the next call to lazy_vacuum_heap_rel() safe (except in the + * event of the fail safe triggering, which prevents the next call from + * taking place). */ Assert(vacrel->num_index_scans > 0 || vacrel->dead_tuples->num_tuples == vacrel->lpdead_items); + Assert(allindexes || vacrel->do_failsafe_speedup); - /* Increase and report the number of index scans */ + /* + * Increase and report the number of index scans. + * + * We deliberately include the case where we started a round of bulk + * deletes that we weren't able to finish due to the fail safe triggering. + */ vacrel->num_index_scans++; pgstat_progress_update_param(PROGRESS_VACUUM_NUM_INDEX_VACUUMS, vacrel->num_index_scans); + + return allindexes; } /* @@ -2320,6 +2522,76 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel) return (offnum <= maxoff); } +/* + * Determine if there is an unacceptable risk of wraparound failure due to the + * fact that the ongoing VACUUM is taking too long -- the table that is being + * vacuumed should not have a relfrozenxid or relminmxid that is too far in + * the past. + * + * Note that we deliberately don't vary our behavior based on factors like + * whether or not the ongoing VACUUM is aggressive. If it's not aggressive we + * probably won't be able to advance relfrozenxid during this VACUUM. If we + * can't, then an anti-wraparound VACUUM should take place immediately after + * we finish up. We should be able to bypass all index vacuuming for the + * later anti-wraparound VACUUM. + * + * If the user-configurable threshold has been crossed then hurry things up: + * Stop applying any VACUUM cost delay going forward, and remember to skip any + * further index vacuuming (and heap vacuuming, at least in the common case + * where table has indexes). + * + * Return true to inform caller of the emergency. Otherwise return false. + * + * Caller is expected to call here before and after vacuuming each index in + * the case of two-pass VACUUM, or every VACUUM_FSM_EVERY_PAGES blocks in the + * case of no-indexes/one-pass VACUUM. + */ +static bool +should_speedup_failsafe(LVRelState *vacrel) +{ + /* Avoid calling vacuum_xid_limit_emergency() very frequently */ + if (vacrel->num_index_scans == 0 && + vacrel->rel_pages <= BYPASS_EMERGENCY_MIN_PAGES) + return false; + + /* Don't warn more than once per VACUUM */ + if (vacrel->do_failsafe_speedup) + return true; + + if (unlikely(vacuum_xid_limit_emergency(vacrel->relfrozenxid, + vacrel->relminmxid))) + { + /* + * Wraparound emergency -- the table's relfrozenxid or relminmxid is + * too far in the past + */ + Assert(vacrel->do_index_vacuuming); + Assert(vacrel->do_index_cleanup); + + vacrel->do_index_vacuuming = false; + vacrel->do_index_cleanup = false; + vacrel->do_failsafe_speedup = true; + + ereport(WARNING, + (errmsg("abandoned index vacuuming of table \"%s.%s.%s\" as a fail safe after %d index scans", + get_database_name(MyDatabaseId), + vacrel->relnamespace, + vacrel->relname, + vacrel->num_index_scans), + errdetail("table's relfrozenxid or relminmxid is too far in the past"), + errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n" + "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs."))); + + /* Stop applying cost limits from this point on */ + VacuumCostActive = false; + VacuumCostBalance = 0; + + return true; + } + + return false; +} + /* * Perform lazy_vacuum_all_indexes() steps in parallel */ diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 25465b05dd..43eb84f538 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -62,6 +62,8 @@ int vacuum_freeze_min_age; int vacuum_freeze_table_age; int vacuum_multixact_freeze_min_age; int vacuum_multixact_freeze_table_age; +int vacuum_failsafe_age; +int vacuum_multixact_failsafe_age; /* A few variables that don't seem worth passing around as parameters */ @@ -1134,6 +1136,68 @@ vacuum_set_xid_limits(Relation rel, } } +/* + * vacuum_xid_limit_emergency() -- Used by VACUUM's fail safe emergency + * wraparound mechanism to determine if its table's relfrozenxid and + * relminmxid now are dangerously far in the past. + * + * When we return true, VACUUM caller will take extraordinary measures to + * avoid wraparound failure. + * + * Input parameters are the target relation's relfrozenxid and relminmxid. + */ +bool +vacuum_xid_limit_emergency(TransactionId relfrozenxid, MultiXactId relminmxid) +{ + TransactionId xid_skip_limit; + MultiXactId multi_skip_limit; + int skip_index_vacuum; + + Assert(TransactionIdIsNormal(relfrozenxid)); + Assert(MultiXactIdIsValid(relminmxid)); + + /* + * Determine the index skipping age to use. In any case not less than + * autovacuum_freeze_max_age * 1.05, so that VACUUM always does an + * aggressive scan. + */ + skip_index_vacuum = Max(vacuum_failsafe_age, autovacuum_freeze_max_age * 1.05); + + xid_skip_limit = ReadNextTransactionId() - skip_index_vacuum; + if (!TransactionIdIsNormal(xid_skip_limit)) + xid_skip_limit = FirstNormalTransactionId; + + if (TransactionIdPrecedes(relfrozenxid, xid_skip_limit)) + { + /* The table's relfrozenxid is too old */ + return true; + } + + /* + * Similar to above, determine the index skipping age to use for multixact. + * In any case not less than autovacuum_multixact_freeze_max_age * 1.05. + */ + skip_index_vacuum = Max(vacuum_multixact_failsafe_age, + autovacuum_multixact_freeze_max_age * 1.05); + + /* + * Compute the multixact age for which freezing is urgent. This is + * normally autovacuum_multixact_freeze_max_age, but may be less if we are + * short of multixact member space. + */ + multi_skip_limit = ReadNextMultiXactId() - skip_index_vacuum; + if (multi_skip_limit < FirstMultiXactId) + multi_skip_limit = FirstMultiXactId; + + if (MultiXactIdPrecedes(relminmxid, multi_skip_limit)) + { + /* The table's relminmxid is too old */ + return true; + } + + return false; +} + /* * vac_estimate_reltuples() -- estimate the new value for pg_class.reltuples * diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index c9c9da85f3..46a48ecbe1 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -2647,6 +2647,26 @@ static struct config_int ConfigureNamesInt[] = 0, 0, 1000000, /* see ComputeXidHorizons */ NULL, NULL, NULL }, + { + {"vacuum_failsafe_age", PGC_USERSET, CLIENT_CONN_STATEMENT, + gettext_noop("Age at which VACUUM should trigger failsafe to avoid a wraparound outage."), + NULL + }, + &vacuum_failsafe_age, + /* This upper-limit can be 1.05 of autovacuum_freeze_max_age */ + 1800000000, 0, 2100000000, + NULL, NULL, NULL + }, + { + {"vacuum_multixact_failsafe_age", PGC_USERSET, CLIENT_CONN_STATEMENT, + gettext_noop("Multixact age at which VACUUM should trigger failsafe to avoid a wraparound outage."), + NULL + }, + &vacuum_multixact_failsafe_age, + /* This upper-limit can be 1.05 of autovacuum_multixact_freeze_max_age */ + 1800000000, 0, 2100000000, + NULL, NULL, NULL + }, /* * See also CheckRequiredParameterValues() if this parameter changes @@ -3247,7 +3267,10 @@ static struct config_int ConfigureNamesInt[] = NULL }, &autovacuum_freeze_max_age, - /* see pg_resetwal if you change the upper-limit value */ + /* + * see pg_resetwal and vacuum_failsafe_age if you change the + * upper-limit value. + */ 200000000, 100000, 2000000000, NULL, NULL, NULL }, diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index 39da7cc942..445f696826 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -675,6 +675,8 @@ #vacuum_freeze_table_age = 150000000 #vacuum_multixact_freeze_min_age = 5000000 #vacuum_multixact_freeze_table_age = 150000000 +#vacuum_failsafe_age = 1800000000 +#vacuum_multixact_failsafe_age = 1800000000 #bytea_output = 'hex' # hex, escape #xmlbinary = 'base64' #xmloption = 'content' diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index effc60c07b..a772a5cda9 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -8605,6 +8605,39 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; + + vacuum_failsafe_age (integer) + + vacuum_failsafe_age configuration parameter + + + + + Specifies the maximum age (in transactions) that a table's + pg_class.relfrozenxid + field can attain before VACUUM takes + extraordinary measures to avoid system-wide transaction ID + wraparound failure. This is VACUUM's + strategy of last resort. The fail safe typically triggers + when an autovacuum to prevent transaction ID wraparound has + already been running for some time, though it's possible for + the fail safe to trigger during any VACUUM. + + + When the fail safe is triggered, any cost-based delay that is + in effect will no longer be applied, and further non-essential + maintenance tasks (such as index vacuuming) are bypassed. + + + The default is 1.8 billion transactions. Although users can + set this value anywhere from zero to 2.1 billion, + VACUUM will silently adjust the effective + value to no less than 105% of . + + + + vacuum_multixact_freeze_table_age (integer) @@ -8651,6 +8684,39 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; + + vacuum_multixact_failsafe_age (integer) + + vacuum_multixact_failsafe_age configuration parameter + + + + + Specifies the maximum age (in transactions) that a table's + pg_class.relminmxid + field can attain before VACUUM takes + extraordinary measures to avoid system-wide multixact ID + wraparound failure. This is VACUUM's + strategy of last resort. The fail safe typically triggers + when an autovacuum to prevent transaction ID wraparound has + already been running for some time, though it's possible for + the fail safe to trigger during any VACUUM. + + + When the fail safe is triggered, any cost-based delay that is + in effect will no longer be applied, and further non-essential + maintenance tasks (such as index vacuuming) are bypassed. + + + The default is 1.8 billion multixacts. Although users can set + this value anywhere from zero to 2.1 billion, + VACUUM will silently adjust the effective + value to no less than 105% of . + + + + bytea_output (enum) -- 2.27.0