From 543cade27fcc4913f53fbc8df5df87936f7d8294 Mon Sep 17 00:00:00 2001 From: Bertrand Drouvot Date: Mon, 19 Jan 2026 06:27:55 +0000 Subject: [PATCH v11 5/5] Change RELATION and DATABASE stats to anytime flush This commit allows mixing fields with different transaction behavior within the same RELATION or DATABASE statistics kind: some fields are transactional (e.g., tuple inserts/updates/deletes) while others are non-transactional (e.g., sequential scans, blocks read). It modifies the relation flush callback to handle the anytime_only parameter introduced in commit . Implementation details: - Change RELATION from FLUSH_AT_TXN_BOUNDARY to FLUSH_ANYTIME - Change DATABASE from FLUSH_AT_TXN_BOUNDARY to FLUSH_ANYTIME - Add a is_partial parameter to flush_pending_cb() to be able to distinguish partial flushes in pgstat_flush_pending_entries() - Modify pgstat_relation_flush_cb() to handle anytime_only parameter: when true, then flush only non-transactional stats and when false, then flush all the stats. When set to true, it clears flushed fields from pending stats to prevent double-counting at transaction boundary DATABASE stats inherit the anytime flush behavior so that relation-derived stats (tuples_returned, tuples_fetched, blocks_fetched, blocks_hit) are visible while transactions are in progress. Tests are added to verify the anytime flush behavior for mixed fields. --- doc/src/sgml/monitoring.sgml | 37 ++++++- src/backend/utils/activity/pgstat.c | 15 +-- src/backend/utils/activity/pgstat_database.c | 6 +- src/backend/utils/activity/pgstat_function.c | 6 +- src/backend/utils/activity/pgstat_relation.c | 92 ++++++++++++---- .../utils/activity/pgstat_subscription.c | 6 +- src/include/pgstat.h | 27 ++++- src/include/utils/pgstat_internal.h | 16 ++- src/test/isolation/expected/stats.out | 102 ++++++++++++++++++ src/test/isolation/expected/stats_1.out | 102 ++++++++++++++++++ src/test/isolation/specs/stats.spec | 27 ++++- .../test_custom_stats/test_custom_var_stats.c | 9 +- 12 files changed, 404 insertions(+), 41 deletions(-) 11.7% doc/src/sgml/ 26.8% src/backend/utils/activity/ 4.2% src/include/utils/ 5.4% src/include/ 45.1% src/test/isolation/expected/ 4.7% src/test/isolation/specs/ diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index b77d189a500..f2321b631b0 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -3767,6 +3767,19 @@ description | Waiting for a newly initialized WAL file to reach durable storage + + + Some statistics are updated while a transaction is in progress (for example, + blks_read, blks_hit, + tup_returned and tup_fetched). + Statistics that either do not depend on transactions or require transactional + consistency are updated only when the transaction ends. Statistics that require + transactional consistency include xact_commit, + xact_rollback, tup_inserted, + tup_updated and tup_deleted. + + + @@ -3956,8 +3969,8 @@ description | Waiting for a newly initialized WAL file to reach durable storage last_seq_scan timestamp with time zone - The time of the last sequential scan on this table, based on the - most recent transaction stop time + The approximate time of the last sequential scan on this table, updated + at least every stats_flush_interval @@ -3984,8 +3997,8 @@ description | Waiting for a newly initialized WAL file to reach durable storage last_idx_scan timestamp with time zone - The time of the last index scan on this table, based on the - most recent transaction stop time + The approximate time of the last index scan on this table, updated + at least every stats_flush_interval @@ -4223,6 +4236,15 @@ description | Waiting for a newly initialized WAL file to reach durable storage + + + The seq_scan, last_seq_scan, + seq_tup_read, idx_scan, + last_idx_scan and idx_tup_fetch + are updated while the transactions are in progress. + + + @@ -4404,6 +4426,13 @@ description | Waiting for a newly initialized WAL file to reach durable storage tuples (see ). + + + The idx_scan, last_idx_scan, + idx_tup_read and idx_tup_fetch + are updated while the transactions are in progress. + + EXPLAIN ANALYZE outputs the total number of index diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c index fd6ab0db16f..a8a905640d0 100644 --- a/src/backend/utils/activity/pgstat.c +++ b/src/backend/utils/activity/pgstat.c @@ -298,7 +298,7 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE] .fixed_amount = false, .write_to_file = true, - .flush_mode = FLUSH_AT_TXN_BOUNDARY, + .flush_mode = FLUSH_ANYTIME, /* so pg_stat_database entries can be seen in all databases */ .accessed_across_databases = true, @@ -316,7 +316,7 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE] .fixed_amount = false, .write_to_file = true, - .flush_mode = FLUSH_AT_TXN_BOUNDARY, + .flush_mode = FLUSH_ANYTIME, .shared_size = sizeof(PgStatShared_Relation), .shared_data_off = offsetof(PgStatShared_Relation, stats), @@ -1354,7 +1354,8 @@ pgstat_delete_pending_entry(PgStat_EntryRef *entry_ref) /* * Flush out pending variable-numbered stats. * - * If anytime_only is true, only flushes FLUSH_ANYTIME entries. + * If anytime_only is true, only flushes FLUSH_ANYTIME entries. For entries + * that support it, the callback may flush only non-transactional fields. * This is safe to call inside transactions. * * If anytime_only is false, flushes all entries. @@ -1385,6 +1386,7 @@ pgstat_flush_pending_entries(bool nowait, bool anytime_only) PgStat_Kind kind = key.kind; const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind); bool did_flush; + bool is_partial_flush = false; dlist_node *next; Assert(!kind_info->fixed_amount); @@ -1405,7 +1407,8 @@ pgstat_flush_pending_entries(bool nowait, bool anytime_only) } /* flush the stats, if possible */ - did_flush = kind_info->flush_pending_cb(entry_ref, nowait, anytime_only); + did_flush = kind_info->flush_pending_cb(entry_ref, nowait, + anytime_only, &is_partial_flush); Assert(did_flush || nowait); @@ -1415,8 +1418,8 @@ pgstat_flush_pending_entries(bool nowait, bool anytime_only) else next = NULL; - /* if successfully flushed, remove entry */ - if (did_flush) + /* if successfull non-partial flush, remove entry */ + if (did_flush && !is_partial_flush) pgstat_delete_pending_entry(entry_ref); else have_pending = true; diff --git a/src/backend/utils/activity/pgstat_database.c b/src/backend/utils/activity/pgstat_database.c index 8e86df60461..59dd0790fd7 100644 --- a/src/backend/utils/activity/pgstat_database.c +++ b/src/backend/utils/activity/pgstat_database.c @@ -435,7 +435,8 @@ pgstat_reset_database_timestamp(Oid dboid, TimestampTz ts) * false without flushing the entry. Otherwise returns true. */ bool -pgstat_database_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only) +pgstat_database_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, + bool anytime_only, bool *is_partial) { PgStatShared_Database *sharedent; PgStat_StatDBEntry *pendingent; @@ -443,6 +444,9 @@ pgstat_database_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o pendingent = (PgStat_StatDBEntry *) entry_ref->pending; sharedent = (PgStatShared_Database *) entry_ref->shared_stats; + /* this is not a partial flush */ + *is_partial = false; + if (!pgstat_lock_entry(entry_ref, nowait)) return false; diff --git a/src/backend/utils/activity/pgstat_function.c b/src/backend/utils/activity/pgstat_function.c index 5ba4958382f..44193c93fc7 100644 --- a/src/backend/utils/activity/pgstat_function.c +++ b/src/backend/utils/activity/pgstat_function.c @@ -190,7 +190,8 @@ pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu, bool finalize) * false without flushing the entry. Otherwise returns true. */ bool -pgstat_function_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only) +pgstat_function_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, + bool anytime_only, bool *is_partial) { PgStat_FunctionCounts *localent; PgStatShared_Function *shfuncent; @@ -200,6 +201,9 @@ pgstat_function_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o localent = (PgStat_FunctionCounts *) entry_ref->pending; shfuncent = (PgStatShared_Function *) entry_ref->shared_stats; + /* this is not a partial flush */ + *is_partial = false; + /* localent always has non-zero content */ if (!pgstat_lock_entry(entry_ref, nowait)) diff --git a/src/backend/utils/activity/pgstat_relation.c b/src/backend/utils/activity/pgstat_relation.c index ae2952cae89..62363dacfe1 100644 --- a/src/backend/utils/activity/pgstat_relation.c +++ b/src/backend/utils/activity/pgstat_relation.c @@ -47,7 +47,19 @@ static void add_tabstat_xact_level(PgStat_TableStatus *pgstat_info, int nest_lev static void ensure_tabstat_xact_level(PgStat_TableStatus *pgstat_info); static void save_truncdrop_counters(PgStat_TableXactStatus *trans, bool is_drop); static void restore_truncdrop_counters(PgStat_TableXactStatus *trans); +static void flush_relation_anytime_stats(PgStat_StatTabEntry *tabentry, + PgStat_TableCounts *counts, bool anytime_only); +/* + * Update database statistics with non-transactional stats. + */ +#define UPDATE_DATABASE_ANYTIME_STATS(dbentry, counts) \ + do { \ + (dbentry)->tuples_returned += (counts)->tuples_returned; \ + (dbentry)->tuples_fetched += (counts)->tuples_fetched; \ + (dbentry)->blocks_fetched += (counts)->blocks_fetched; \ + (dbentry)->blocks_hit += (counts)->blocks_hit; \ + } while (0) /* * Copy stats between relations. This is used for things like REINDEX @@ -789,6 +801,29 @@ pgstat_twophase_postabort(FullTransactionId fxid, uint16 info, rec->tuples_inserted + rec->tuples_updated; } +/* + * Helper function to flush non-transactional statistics. + */ +static void +flush_relation_anytime_stats(PgStat_StatTabEntry *tabentry, PgStat_TableCounts *counts, + bool anytime_only) +{ + TimestampTz t; + + tabentry->numscans += counts->numscans; + if (counts->numscans) + { + t = anytime_only ? GetCurrentTimestamp() : GetCurrentTransactionStopTimestamp(); + if (t > tabentry->lastscan) + tabentry->lastscan = t; + } + + tabentry->tuples_returned += counts->tuples_returned; + tabentry->tuples_fetched += counts->tuples_fetched; + tabentry->blocks_fetched += counts->blocks_fetched; + tabentry->blocks_hit += counts->blocks_hit; +} + /* * Flush out pending stats for the entry * @@ -797,9 +832,17 @@ pgstat_twophase_postabort(FullTransactionId fxid, uint16 info, * * Some of the stats are copied to the corresponding pending database stats * entry when successfully flushing. + * + * If anytime_only is true, only non-transactional fields are flushed + * (numscans, tuples_returned, tuples_fetched, blocks_fetched, blocks_hit). + * Transactional fields remain pending until transaction boundary. + * + * Some of the stats are copied to the corresponding pending database stats + * entry when successfully flushing. */ bool -pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only) +pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, + bool anytime_only, bool *is_partial) { Oid dboid; PgStat_TableStatus *lstats; /* pending stats entry */ @@ -807,12 +850,13 @@ pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o PgStat_StatTabEntry *tabentry; /* table entry of shared stats */ PgStat_StatDBEntry *dbentry; /* pending database entry */ - Assert(!anytime_only); - dboid = entry_ref->shared_entry->key.dboid; lstats = (PgStat_TableStatus *) entry_ref->pending; shtabstats = (PgStatShared_Relation *) entry_ref->shared_stats; + /* this is a partial flush if in anytime only mode */ + *is_partial = anytime_only; + /* * Ignore entries that didn't accumulate any actual counts, such as * indexes that were opened by the planner but not used. @@ -824,19 +868,36 @@ pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o if (!pgstat_lock_entry(entry_ref, nowait)) return false; - /* add the values to the shared entry. */ tabentry = &shtabstats->stats; - tabentry->numscans += lstats->counts.numscans; - if (lstats->counts.numscans) + if (anytime_only) { - TimestampTz t = GetCurrentTransactionStopTimestamp(); - if (t > tabentry->lastscan) - tabentry->lastscan = t; + /* Flush non-transactional statistics */ + flush_relation_anytime_stats(tabentry, &lstats->counts, true); + + pgstat_unlock_entry(entry_ref); + + /* Also update the corresponding fields in database stats */ + dbentry = pgstat_prep_database_pending(dboid); + UPDATE_DATABASE_ANYTIME_STATS(dbentry, &lstats->counts); + + /* + * Clear the flushed fields from pending stats to prevent + * double-counting when we flush all fields at transaction boundary. + */ + lstats->counts.numscans = 0; + lstats->counts.tuples_returned = 0; + lstats->counts.tuples_fetched = 0; + lstats->counts.blocks_fetched = 0; + lstats->counts.blocks_hit = 0; + + return true; } - tabentry->tuples_returned += lstats->counts.tuples_returned; - tabentry->tuples_fetched += lstats->counts.tuples_fetched; + + /* Flush non-transactional statistics */ + flush_relation_anytime_stats(tabentry, &lstats->counts, false); + tabentry->tuples_inserted += lstats->counts.tuples_inserted; tabentry->tuples_updated += lstats->counts.tuples_updated; tabentry->tuples_deleted += lstats->counts.tuples_deleted; @@ -866,9 +927,6 @@ pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o */ tabentry->ins_since_vacuum += lstats->counts.tuples_inserted; - tabentry->blocks_fetched += lstats->counts.blocks_fetched; - tabentry->blocks_hit += lstats->counts.blocks_hit; - /* Clamp live_tuples in case of negative delta_live_tuples */ tabentry->live_tuples = Max(tabentry->live_tuples, 0); /* Likewise for dead_tuples */ @@ -878,13 +936,11 @@ pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_o /* The entry was successfully flushed, add the same to database stats */ dbentry = pgstat_prep_database_pending(dboid); - dbentry->tuples_returned += lstats->counts.tuples_returned; - dbentry->tuples_fetched += lstats->counts.tuples_fetched; + UPDATE_DATABASE_ANYTIME_STATS(dbentry, &lstats->counts); + dbentry->tuples_inserted += lstats->counts.tuples_inserted; dbentry->tuples_updated += lstats->counts.tuples_updated; dbentry->tuples_deleted += lstats->counts.tuples_deleted; - dbentry->blocks_fetched += lstats->counts.blocks_fetched; - dbentry->blocks_hit += lstats->counts.blocks_hit; return true; } diff --git a/src/backend/utils/activity/pgstat_subscription.c b/src/backend/utils/activity/pgstat_subscription.c index 6b6eec7578d..bb32782a9d3 100644 --- a/src/backend/utils/activity/pgstat_subscription.c +++ b/src/backend/utils/activity/pgstat_subscription.c @@ -117,7 +117,8 @@ pgstat_fetch_stat_subscription(Oid subid) * false without flushing the entry. Otherwise returns true. */ bool -pgstat_subscription_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only) +pgstat_subscription_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, + bool anytime_only, bool *is_partial) { PgStat_BackendSubEntry *localent; PgStatShared_Subscription *shsubent; @@ -127,6 +128,9 @@ pgstat_subscription_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anyti localent = (PgStat_BackendSubEntry *) entry_ref->pending; shsubent = (PgStatShared_Subscription *) entry_ref->shared_stats; + /* this is not a partial flush */ + *is_partial = false; + /* localent always has non-zero content */ if (!pgstat_lock_entry(entry_ref, nowait)) diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 90237c70829..d26ff26e3e3 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -20,6 +20,7 @@ #include "utils/backend_status.h" /* for backward compatibility */ /* IWYU pragma: export */ #include "utils/pgstat_kind.h" #include "utils/relcache.h" +#include "utils/timeout.h" #include "utils/wait_event.h" /* for backward compatibility */ /* IWYU pragma: export */ @@ -536,10 +537,11 @@ extern void pgstat_report_anytime_stat(bool force); extern void pgstat_force_next_flush(void); /* - * Schedule the next anytime stats update timeout. + * Schedule the next anytime stats update timeout and mark that we have + * mixed anytime stats pending. * * This should be called whenever accumulating statistics that support - * FLUSH_ANYTIME flushing mode. + * FLUSH_ANYTIME or FLUSH_MIXED flushing modes. */ #define pgstat_schedule_anytime_update() \ do { \ @@ -705,37 +707,58 @@ extern void pgstat_report_analyze(Relation rel, #define pgstat_count_heap_scan(rel) \ do { \ if (pgstat_should_count_relation(rel)) \ + { \ (rel)->pgstat_info->counts.numscans++; \ + pgstat_schedule_anytime_update(); \ + } \ } while (0) #define pgstat_count_heap_getnext(rel) \ do { \ if (pgstat_should_count_relation(rel)) \ + { \ (rel)->pgstat_info->counts.tuples_returned++; \ + pgstat_schedule_anytime_update(); \ + } \ } while (0) #define pgstat_count_heap_fetch(rel) \ do { \ if (pgstat_should_count_relation(rel)) \ + { \ (rel)->pgstat_info->counts.tuples_fetched++; \ + pgstat_schedule_anytime_update(); \ + } \ } while (0) #define pgstat_count_index_scan(rel) \ do { \ if (pgstat_should_count_relation(rel)) \ + { \ (rel)->pgstat_info->counts.numscans++; \ + pgstat_schedule_anytime_update(); \ + } \ } while (0) #define pgstat_count_index_tuples(rel, n) \ do { \ if (pgstat_should_count_relation(rel)) \ + { \ (rel)->pgstat_info->counts.tuples_returned += (n); \ + pgstat_schedule_anytime_update(); \ + } \ } while (0) #define pgstat_count_buffer_read(rel) \ do { \ if (pgstat_should_count_relation(rel)) \ + { \ (rel)->pgstat_info->counts.blocks_fetched++; \ + pgstat_schedule_anytime_update(); \ + } \ } while (0) #define pgstat_count_buffer_hit(rel) \ do { \ if (pgstat_should_count_relation(rel)) \ + { \ (rel)->pgstat_info->counts.blocks_hit++; \ + pgstat_schedule_anytime_update(); \ + } \ } while (0) extern void pgstat_count_heap_insert(Relation rel, PgStat_Counter n); diff --git a/src/include/utils/pgstat_internal.h b/src/include/utils/pgstat_internal.h index 607f4255268..1a2114aad8a 100644 --- a/src/include/utils/pgstat_internal.h +++ b/src/include/utils/pgstat_internal.h @@ -322,8 +322,10 @@ typedef struct PgStat_KindInfo * that cannot use PgStat_EntryRef->pending. * * The anytime_only parameter indicates whether this is an anytime flush. + * The is_partial parameter indicates whether this is a partial flush. */ - bool (*flush_pending_cb) (PgStat_EntryRef *sr, bool nowait, bool anytime_only); + bool (*flush_pending_cb) (PgStat_EntryRef *sr, bool nowait, + bool anytime_only, bool *is_partial); /* * For variable-numbered stats: delete pending stats. Optional. @@ -757,7 +759,8 @@ extern void AtEOXact_PgStat_Database(bool isCommit, bool parallel); extern PgStat_StatDBEntry *pgstat_prep_database_pending(Oid dboid); extern void pgstat_reset_database_timestamp(Oid dboid, TimestampTz ts); -extern bool pgstat_database_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only); +extern bool pgstat_database_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, + bool anytime_only, bool *is_partial); extern void pgstat_database_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts); @@ -765,7 +768,8 @@ extern void pgstat_database_reset_timestamp_cb(PgStatShared_Common *header, Time * Functions in pgstat_function.c */ -extern bool pgstat_function_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only); +extern bool pgstat_function_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, + bool anytime_only, bool *is_partial); extern void pgstat_function_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts); @@ -790,7 +794,8 @@ extern void AtEOSubXact_PgStat_Relations(PgStat_SubXactStatus *xact_state, bool extern void AtPrepare_PgStat_Relations(PgStat_SubXactStatus *xact_state); extern void PostPrepare_PgStat_Relations(PgStat_SubXactStatus *xact_state); -extern bool pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only); +extern bool pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, + bool anytime_only, bool *is_partial); extern void pgstat_relation_delete_pending_cb(PgStat_EntryRef *entry_ref); extern void pgstat_relation_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts); @@ -858,7 +863,8 @@ extern void pgstat_wal_snapshot_cb(void); * Functions in pgstat_subscription.c */ -extern bool pgstat_subscription_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only); +extern bool pgstat_subscription_flush_cb(PgStat_EntryRef *entry_ref, bool nowait, + bool anytime_only, bool *is_partial); extern void pgstat_subscription_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts); diff --git a/src/test/isolation/expected/stats.out b/src/test/isolation/expected/stats.out index cfad309ccf3..11e3e57806d 100644 --- a/src/test/isolation/expected/stats.out +++ b/src/test/isolation/expected/stats.out @@ -2245,6 +2245,108 @@ seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum (1 row) +starting permutation: s2_begin s2_table_select s1_sleep s1_table_stats s2_track_counts_off s2_table_select s1_sleep s1_table_stats s2_track_counts_on s2_table_select s1_sleep s1_table_stats s2_table_drop s2_commit +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_begin: BEGIN; +step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value; +key|value +---+----- +k0 | 1 +(1 row) + +step s1_sleep: SELECT pg_sleep(1.5); +pg_sleep +-------- + +(1 row) + +step s1_table_stats: + SELECT + pg_stat_get_numscans(tso.oid) AS seq_scan, + pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read, + pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins, + pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd, + pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del, + pg_stat_get_live_tuples(tso.oid) AS n_live_tup, + pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup, + pg_stat_get_vacuum_count(tso.oid) AS vacuum_count + FROM test_stat_oid AS tso + WHERE tso.name = 'test_stat_tab' + +seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count +--------+------------+---------+---------+---------+----------+----------+------------ + 1| 1| 1| 0| 0| 1| 0| 0 +(1 row) + +step s2_track_counts_off: SET track_counts = off; +step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value; +key|value +---+----- +k0 | 1 +(1 row) + +step s1_sleep: SELECT pg_sleep(1.5); +pg_sleep +-------- + +(1 row) + +step s1_table_stats: + SELECT + pg_stat_get_numscans(tso.oid) AS seq_scan, + pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read, + pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins, + pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd, + pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del, + pg_stat_get_live_tuples(tso.oid) AS n_live_tup, + pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup, + pg_stat_get_vacuum_count(tso.oid) AS vacuum_count + FROM test_stat_oid AS tso + WHERE tso.name = 'test_stat_tab' + +seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count +--------+------------+---------+---------+---------+----------+----------+------------ + 1| 1| 1| 0| 0| 1| 0| 0 +(1 row) + +step s2_track_counts_on: SET track_counts = on; +step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value; +key|value +---+----- +k0 | 1 +(1 row) + +step s1_sleep: SELECT pg_sleep(1.5); +pg_sleep +-------- + +(1 row) + +step s1_table_stats: + SELECT + pg_stat_get_numscans(tso.oid) AS seq_scan, + pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read, + pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins, + pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd, + pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del, + pg_stat_get_live_tuples(tso.oid) AS n_live_tup, + pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup, + pg_stat_get_vacuum_count(tso.oid) AS vacuum_count + FROM test_stat_oid AS tso + WHERE tso.name = 'test_stat_tab' + +seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count +--------+------------+---------+---------+---------+----------+----------+------------ + 2| 2| 1| 0| 0| 1| 0| 0 +(1 row) + +step s2_table_drop: DROP TABLE test_stat_tab; +step s2_commit: COMMIT; + starting permutation: s1_track_counts_off s1_table_stats s1_track_counts_on pg_stat_force_next_flush ------------------------ diff --git a/src/test/isolation/expected/stats_1.out b/src/test/isolation/expected/stats_1.out index e1d937784cb..aef582e7582 100644 --- a/src/test/isolation/expected/stats_1.out +++ b/src/test/isolation/expected/stats_1.out @@ -2253,6 +2253,108 @@ seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum (1 row) +starting permutation: s2_begin s2_table_select s1_sleep s1_table_stats s2_track_counts_off s2_table_select s1_sleep s1_table_stats s2_track_counts_on s2_table_select s1_sleep s1_table_stats s2_table_drop s2_commit +pg_stat_force_next_flush +------------------------ + +(1 row) + +step s2_begin: BEGIN; +step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value; +key|value +---+----- +k0 | 1 +(1 row) + +step s1_sleep: SELECT pg_sleep(1.5); +pg_sleep +-------- + +(1 row) + +step s1_table_stats: + SELECT + pg_stat_get_numscans(tso.oid) AS seq_scan, + pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read, + pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins, + pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd, + pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del, + pg_stat_get_live_tuples(tso.oid) AS n_live_tup, + pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup, + pg_stat_get_vacuum_count(tso.oid) AS vacuum_count + FROM test_stat_oid AS tso + WHERE tso.name = 'test_stat_tab' + +seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count +--------+------------+---------+---------+---------+----------+----------+------------ + 1| 1| 1| 0| 0| 1| 0| 0 +(1 row) + +step s2_track_counts_off: SET track_counts = off; +step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value; +key|value +---+----- +k0 | 1 +(1 row) + +step s1_sleep: SELECT pg_sleep(1.5); +pg_sleep +-------- + +(1 row) + +step s1_table_stats: + SELECT + pg_stat_get_numscans(tso.oid) AS seq_scan, + pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read, + pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins, + pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd, + pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del, + pg_stat_get_live_tuples(tso.oid) AS n_live_tup, + pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup, + pg_stat_get_vacuum_count(tso.oid) AS vacuum_count + FROM test_stat_oid AS tso + WHERE tso.name = 'test_stat_tab' + +seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count +--------+------------+---------+---------+---------+----------+----------+------------ + 1| 1| 1| 0| 0| 1| 0| 0 +(1 row) + +step s2_track_counts_on: SET track_counts = on; +step s2_table_select: SELECT * FROM test_stat_tab ORDER BY key, value; +key|value +---+----- +k0 | 1 +(1 row) + +step s1_sleep: SELECT pg_sleep(1.5); +pg_sleep +-------- + +(1 row) + +step s1_table_stats: + SELECT + pg_stat_get_numscans(tso.oid) AS seq_scan, + pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read, + pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins, + pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd, + pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del, + pg_stat_get_live_tuples(tso.oid) AS n_live_tup, + pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup, + pg_stat_get_vacuum_count(tso.oid) AS vacuum_count + FROM test_stat_oid AS tso + WHERE tso.name = 'test_stat_tab' + +seq_scan|seq_tup_read|n_tup_ins|n_tup_upd|n_tup_del|n_live_tup|n_dead_tup|vacuum_count +--------+------------+---------+---------+---------+----------+----------+------------ + 2| 2| 1| 0| 0| 1| 0| 0 +(1 row) + +step s2_table_drop: DROP TABLE test_stat_tab; +step s2_commit: COMMIT; + starting permutation: s1_track_counts_off s1_table_stats s1_track_counts_on pg_stat_force_next_flush ------------------------ diff --git a/src/test/isolation/specs/stats.spec b/src/test/isolation/specs/stats.spec index da16710da0f..47414eb6009 100644 --- a/src/test/isolation/specs/stats.spec +++ b/src/test/isolation/specs/stats.spec @@ -50,6 +50,8 @@ step s1_rollback { ROLLBACK; } step s1_prepare_a { PREPARE TRANSACTION 'a'; } step s1_commit_prepared_a { COMMIT PREPARED 'a'; } step s1_rollback_prepared_a { ROLLBACK PREPARED 'a'; } +# Has to be greater than session 2 stats_flush_interval +step s1_sleep { SELECT pg_sleep(1.5); } # Function stats steps step s1_ff { SELECT pg_stat_force_next_flush(); } @@ -132,12 +134,16 @@ step s1_slru_check_stats { session s2 -setup { SET stats_fetch_consistency = 'none'; } +setup { + SET stats_fetch_consistency = 'none'; + SET stats_flush_interval = '1s'; +} step s2_begin { BEGIN; } step s2_commit { COMMIT; } step s2_commit_prepared_a { COMMIT PREPARED 'a'; } step s2_rollback_prepared_a { ROLLBACK PREPARED 'a'; } step s2_ff { SELECT pg_stat_force_next_flush(); } +step s2_table_drop { DROP TABLE test_stat_tab; } # Function stats steps step s2_track_funcs_all { SET track_functions = 'all'; } @@ -156,6 +162,8 @@ step s2_func_stats { } # Relation stats steps +step s2_track_counts_on { SET track_counts = on; } +step s2_track_counts_off { SET track_counts = off; } step s2_table_select { SELECT * FROM test_stat_tab ORDER BY key, value; } step s2_table_update_k1 { UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1';} @@ -435,6 +443,23 @@ permutation s1_table_drop s1_table_stats +### Check that some stats are updated (seq_scan and seq_tup_read) +### while the transaction is still running +permutation + s2_begin + s2_table_select + s1_sleep + s1_table_stats + s2_track_counts_off + s2_table_select + s1_sleep + s1_table_stats + s2_track_counts_on + s2_table_select + s1_sleep + s1_table_stats + s2_table_drop + s2_commit ### Check that we don't count changes with track counts off, but allow access ### to prior stats diff --git a/src/test/modules/test_custom_stats/test_custom_var_stats.c b/src/test/modules/test_custom_stats/test_custom_var_stats.c index e9f1bda6b32..59f531df5f7 100644 --- a/src/test/modules/test_custom_stats/test_custom_var_stats.c +++ b/src/test/modules/test_custom_stats/test_custom_var_stats.c @@ -85,7 +85,8 @@ static dsa_area *custom_stats_description_dsa = NULL; /* Flush callback: merge pending stats into shared memory */ static bool test_custom_stats_var_flush_pending_cb(PgStat_EntryRef *entry_ref, - bool nowait, bool anytime_only); + bool nowait, bool anytime_only, + bool *is_partial); /* Serialization callback: write auxiliary entry data */ static void test_custom_stats_var_to_serialized_data(const PgStat_HashKey *key, @@ -153,7 +154,8 @@ _PG_init(void) * Returns false only if nowait=true and lock acquisition fails. */ static bool -test_custom_stats_var_flush_pending_cb(PgStat_EntryRef *entry_ref, bool nowait, bool anytime_only) +test_custom_stats_var_flush_pending_cb(PgStat_EntryRef *entry_ref, bool nowait, + bool anytime_only, bool *is_partial) { PgStat_StatCustomVarEntry *pending_entry; PgStatShared_CustomVarEntry *shared_entry; @@ -161,6 +163,9 @@ test_custom_stats_var_flush_pending_cb(PgStat_EntryRef *entry_ref, bool nowait, pending_entry = (PgStat_StatCustomVarEntry *) entry_ref->pending; shared_entry = (PgStatShared_CustomVarEntry *) entry_ref->shared_stats; + /* this is not a partial flush */ + *is_partial = false; + if (!pgstat_lock_entry(entry_ref, nowait)) return false; -- 2.34.1