From e044bacedab503d1cd732146e1b9947406191bb6 Mon Sep 17 00:00:00 2001 From: Reid Thompson Date: Sat, 4 Jun 2022 22:23:59 -0400 Subject: [PATCH 2/2] Add the ability to limit the amount of memory that can be allocated to backends. This builds on the work that adds backend memory allocated to pg_stat_activity. Add GUC variable max_total_backend_memory. Specifies a limit to the amount of memory (in MB) that may be allocated to backends in total (i.e. this is not a per user or per backend limit). If unset, or set to 0 it is disabled. It is intended as a resource to help avoid the OOM killer on LINUX and manage resources in general. A backend request that would push the total over the limit will be denied with an out of memory error causing that backend's current query/transaction to fail. Due to the dynamic nature of memory allocations, this limit is not exact. If within 1.5MB of the limit and two backends request 1MB each at the same time both may be allocated, and exceed the limit. Further requests will not be allocated until dropping below the limit. Keep this in mind when setting this value. This limit does not affect auxiliary backend processes. Backend memory allocations are displayed in the pg_stat_activity view. --- doc/src/sgml/config.sgml | 26 ++ src/backend/postmaster/autovacuum.c | 8 +- src/backend/postmaster/postmaster.c | 17 +- src/backend/postmaster/syslogger.c | 4 +- src/backend/storage/ipc/dsm_impl.c | 35 ++- src/backend/storage/lmgr/proc.c | 3 + src/backend/utils/activity/backend_status.c | 223 +++++++++++++++++- src/backend/utils/misc/guc_tables.c | 11 + src/backend/utils/misc/postgresql.conf.sample | 3 + src/backend/utils/mmgr/aset.c | 43 +++- src/backend/utils/mmgr/generation.c | 21 +- src/backend/utils/mmgr/slab.c | 21 +- src/include/storage/proc.h | 7 + src/include/utils/backend_status.h | 222 +++++++++++++++-- 14 files changed, 560 insertions(+), 84 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index e5c41cc6c6..1bff68b1ec 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -2113,6 +2113,32 @@ include_dir 'conf.d' + + max_total_backend_memory (integer) + + max_total_backend_memory configuration parameter + + + + + Specifies a limit to the amount of memory (MB) that may be allocated to + backends in total (i.e. this is not a per user or per backend limit). + If unset, or set to 0 it is disabled. A backend request that would + push the total over the limit will be denied with an out of memory + error causing that backend's current query/transaction to fail. Due to + the dynamic nature of memory allocations, this limit is not exact. If + within 1.5MB of the limit and two backends request 1MB each at the same + time both may be allocated, and exceed the limit. Further requests will + not be allocated until dropping below the limit. Keep this in mind when + setting this value. This limit does not affect auxiliary backend + processes . Backend memory + allocations (allocated_bytes) are displayed in the + pg_stat_activity + view. + + + + diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 59c9bcf8c4..ee03d08dd9 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -407,8 +407,8 @@ StartAutoVacLauncher(void) #ifndef EXEC_BACKEND case 0: - /* Zero allocated bytes to avoid double counting parent allocation */ - pgstat_zero_my_allocated_bytes(); + /* Init allocated bytes to avoid double counting parent allocation */ + pgstat_init_allocated_bytes(); /* in postmaster child ... */ InitPostmasterChild(); @@ -1488,8 +1488,8 @@ StartAutoVacWorker(void) #ifndef EXEC_BACKEND case 0: - /* Zero allocated bytes to avoid double counting parent allocation */ - pgstat_zero_my_allocated_bytes(); + /* Init allocated bytes to avoid double counting parent allocation */ + pgstat_init_allocated_bytes(); /* in postmaster child ... */ InitPostmasterChild(); diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 1f09781be8..358a7fa980 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -4167,8 +4167,8 @@ BackendStartup(Port *port) { free(bn); - /* Zero allocated bytes to avoid double counting parent allocation */ - pgstat_zero_my_allocated_bytes(); + /* Init allocated bytes to avoid double counting parent allocation */ + pgstat_init_allocated_bytes(); /* Detangle from postmaster */ InitPostmasterChild(); @@ -5377,10 +5377,11 @@ StartChildProcess(AuxProcType type) MemoryContextDelete(PostmasterContext); PostmasterContext = NULL; - /* Zero allocated bytes to avoid double counting parent allocation. + /* + * Init allocated bytes to avoid double counting parent allocation. * Needs to be after the MemoryContextDelete(PostmasterContext) above. */ - pgstat_zero_my_allocated_bytes(); + pgstat_init_allocated_bytes(); AuxiliaryProcessMain(type); /* does not return */ } @@ -5775,10 +5776,12 @@ do_start_bgworker(RegisteredBgWorker *rw) MemoryContextDelete(PostmasterContext); PostmasterContext = NULL; - /* Zero allocated bytes to avoid double counting parent allocation. - * Needs to be after the MemoryContextDelete(PostmasterContext) above. + /* + * Init allocated bytes to avoid double counting parent + * allocation. Needs to be after the + * MemoryContextDelete(PostmasterContext) above. */ - pgstat_zero_my_allocated_bytes(); + pgstat_init_allocated_bytes(); StartBackgroundWorker(); diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c index 9081ae140f..e8e31ce403 100644 --- a/src/backend/postmaster/syslogger.c +++ b/src/backend/postmaster/syslogger.c @@ -679,8 +679,8 @@ SysLogger_Start(void) #ifndef EXEC_BACKEND case 0: - /* Zero allocated bytes to avoid double counting parent allocation */ - pgstat_zero_my_allocated_bytes(); + /* Init allocated bytes to avoid double counting parent allocation */ + pgstat_init_allocated_bytes(); /* in postmaster child ... */ InitPostmasterChild(); diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c index 22885c7bd2..1131de06c0 100644 --- a/src/backend/storage/ipc/dsm_impl.c +++ b/src/backend/storage/ipc/dsm_impl.c @@ -240,7 +240,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, * allocation. */ if (op == DSM_OP_DESTROY) - pgstat_report_allocated_bytes(*mapped_size, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(*mapped_size, PG_ALLOC_DSM); *mapped_address = NULL; *mapped_size = 0; if (op == DSM_OP_DESTROY && shm_unlink(name) != 0) @@ -254,6 +254,10 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, return true; } + /* Do not exceed maximum allowed memory allocation */ + if (op == DSM_OP_CREATE && exceeds_max_total_bkend_mem(request_size)) + return false; + /* * Create new segment or open an existing one for attach. * @@ -362,13 +366,10 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, * allocation increase. */ if (request_size > *mapped_size) - { - pgstat_report_allocated_bytes(request_size - *mapped_size, - PG_ALLOC_INCREASE); - } + pgstat_report_allocated_bytes_increase(request_size - *mapped_size, PG_ALLOC_DSM); #else - pgstat_report_allocated_bytes(*mapped_size, PG_ALLOC_DECREASE); - pgstat_report_allocated_bytes(request_size, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_decrease(*mapped_size, PG_ALLOC_DSM); + pgstat_report_allocated_bytes_increase(request_size, PG_ALLOC_DSM); #endif } *mapped_address = address; @@ -525,6 +526,10 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, int flags = IPCProtection; size_t segsize; + /* Do not exceed maximum allowed memory allocation */ + if (op == DSM_OP_CREATE && exceeds_max_total_bkend_mem(request_size)) + return false; + /* * Allocate the memory BEFORE acquiring the resource, so that we don't * leak the resource if memory allocation fails. @@ -583,7 +588,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, * allocation. */ if (op == DSM_OP_DESTROY) - pgstat_report_allocated_bytes(*mapped_size, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(*mapped_size, PG_ALLOC_DSM); *mapped_address = NULL; *mapped_size = 0; if (op == DSM_OP_DESTROY && shmctl(ident, IPC_RMID, NULL) < 0) @@ -637,7 +642,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, * allocated in pg_stat_activity for the creator process. */ if (op == DSM_OP_CREATE) - pgstat_report_allocated_bytes(request_size, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(request_size, PG_ALLOC_DSM); *mapped_address = address; *mapped_size = request_size; @@ -712,13 +717,17 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, * allocation. */ if (op == DSM_OP_DESTROY) - pgstat_report_allocated_bytes(*mapped_size, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(*mapped_size, PG_ALLOC_DSM); *impl_private = NULL; *mapped_address = NULL; *mapped_size = 0; return true; } + /* Do not exceed maximum allowed memory allocation */ + if (op == DSM_OP_CREATE && exceeds_max_total_bkend_mem(request_size)) + return false; + /* Create new segment or open an existing one for attach. */ if (op == DSM_OP_CREATE) { @@ -834,7 +843,7 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, * allocated in pg_stat_activity for the creator process. */ if (op == DSM_OP_CREATE) - pgstat_report_allocated_bytes(info.RegionSize, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(info.RegionSize, PG_ALLOC_DSM); *mapped_address = address; *mapped_size = info.RegionSize; *impl_private = hmap; @@ -885,7 +894,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, * shown allocated in pg_stat_activity when the creator destroys the * allocation. */ - pgstat_report_allocated_bytes(*mapped_size, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(*mapped_size, PG_ALLOC_DSM); *mapped_address = NULL; *mapped_size = 0; if (op == DSM_OP_DESTROY && unlink(name) != 0) @@ -1013,7 +1022,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, * allocated in pg_stat_activity for the creator process. */ if (op == DSM_OP_CREATE) - pgstat_report_allocated_bytes(request_size, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(request_size, PG_ALLOC_DSM); *mapped_address = address; *mapped_size = request_size; diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 22b4278610..2f43bbb4c4 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -180,6 +180,9 @@ InitProcGlobal(void) ProcGlobal->checkpointerLatch = NULL; pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PGPROCNO); pg_atomic_init_u32(&ProcGlobal->clogGroupFirst, INVALID_PGPROCNO); + /* Convert max_total_bkend_mem to bytes and store */ + if (max_total_bkend_mem > 0) + pg_atomic_init_u64(&ProcGlobal->max_total_bkend_mem_bytes, max_total_bkend_mem * 1024 * 1024); /* * Create and initialize all the PGPROC structures we'll need. There are diff --git a/src/backend/utils/activity/backend_status.c b/src/backend/utils/activity/backend_status.c index 7baf2db57d..ba5c92b573 100644 --- a/src/backend/utils/activity/backend_status.c +++ b/src/backend/utils/activity/backend_status.c @@ -45,13 +45,57 @@ bool pgstat_track_activities = false; int pgstat_track_activity_query_size = 1024; +/* + * Max backend memory allocation allowed (MB). 0 = disabled. + * Centralized bucket ProcGlobal->max_total_bkend_mem is initialized + * as a byte representation of this value in InitProcGlobal(). + */ +int max_total_bkend_mem = 0; /* exposed so that backend_progress.c can access it */ PgBackendStatus *MyBEEntry = NULL; -/* Memory allocated to this backend prior to pgstats initialization */ -uint64 local_my_allocated_bytes = 0; -uint64 *my_allocated_bytes = &local_my_allocated_bytes; +/* + * Define initial allocation allowance for a backend. + * + * NOTE: initial_allocation_allowance && allocation_allowance_refill_qty + * may be candidates for future GUC variables. Arbitrary 1MB selected initially. + */ +uint64 initial_allocation_allowance = 1024 * 1024; +uint64 allocation_allowance_refill_qty = 1024 * 1024; + +/* + * Local counter to manage shared memory allocations. At backend startup, set to + * initial_allocation_allowance via pgstat_init_allocated_bytes(). Decrease as + * memory is malloc'd. When exhausted, atomically refill if available from + * ProcGlobal->max_total_bkend_mem via exceeds_max_total_bkend_mem(). + */ +uint64 allocation_allowance = 0; + +/* + * Local counter of free'd shared memory. Return to global + * max_total_bkend_mem when return threshold is met. Arbitrary 1MB bytes + * selected initially. + */ +uint64 allocation_return = 0; +uint64 allocation_return_threshold = 1024 * 1024; + +/* + * Memory allocated to this backend prior to pgstats initialization. Migrated to + * shared memory on pgstats initialization. + */ +uint64 local_my_allocated_bytes = 0; +uint64 *my_allocated_bytes = &local_my_allocated_bytes; + +/* Memory allocated to this backend by type */ +/* + * TODO: add code to present these along with the global shared counter via a + * new system view + */ +uint64 aset_allocated_bytes = 0; +uint64 dsm_allocated_bytes = 0; +uint64 generation_allocated_bytes = 0; +uint64 slab_allocated_bytes = 0; static PgBackendStatus *BackendStatusArray = NULL; static char *BackendAppnameBuffer = NULL; @@ -403,11 +447,18 @@ pgstat_bestart(void) lbeentry.st_progress_command_target = InvalidOid; lbeentry.st_query_id = UINT64CONST(0); - /* Alter allocation reporting from local_my_allocated_bytes to shared memory */ + /* + * Alter allocation reporting from local_my_allocated_bytes to shared + * memory + */ pgstat_set_allocated_bytes_storage(&MyBEEntry->allocated_bytes); - /* Populate sum of memory allocated prior to pgstats initialization to pgstats - * and zero the local variable. + /* + * Populate sum of memory allocated prior to pgstats initialization to + * pgstats and zero the local variable. This is a += assignment because + * InitPostgres allocates memory after pgstat_beinit but prior to + * pgstat_bestart so we have allocations to both local and shared memory + * to combine. */ lbeentry.allocated_bytes += local_my_allocated_bytes; local_my_allocated_bytes = 0; @@ -472,7 +523,8 @@ pgstat_beshutdown_hook(int code, Datum arg) volatile PgBackendStatus *beentry = MyBEEntry; /* - * Stop reporting memory allocation changes to &MyBEEntry->allocated_bytes + * Stop reporting memory allocation changes to shared memory + * &MyBEEntry->allocated_bytes */ pgstat_reset_allocated_bytes_storage(); @@ -1221,21 +1273,170 @@ pgstat_clip_activity(const char *raw_activity) * my_allocated_bytes into shared memory. */ void -pgstat_set_allocated_bytes_storage(uint64 *new_allocated_bytes) +pgstat_set_allocated_bytes_storage(uint64 *allocated_bytes) { - my_allocated_bytes = new_allocated_bytes; - *new_allocated_bytes = local_my_allocated_bytes; + my_allocated_bytes = allocated_bytes; + *allocated_bytes = local_my_allocated_bytes; + + return; } /* * Reset allocated bytes storage location. * * Expected to be called during backend shutdown, before the location set up - * by pgstat_set_allocated_bytes_storage() becomes invalid. + * by pgstat_set_allocated_bytes_storage becomes invalid. */ void pgstat_reset_allocated_bytes_storage(void) { + /* + * When limiting maximum backend memory, return this backend's memory + * allocations to global. + */ + if (max_total_bkend_mem) + { + volatile PROC_HDR *procglobal = ProcGlobal; + + pg_atomic_add_fetch_u64(&procglobal->max_total_bkend_mem_bytes, + *my_allocated_bytes + allocation_allowance + + allocation_return); + + /* Reset memory allocation variables */ + allocation_allowance = 0; + allocation_return = 0; + aset_allocated_bytes = 0; + dsm_allocated_bytes = 0; + generation_allocated_bytes = 0; + slab_allocated_bytes = 0; + } + + /* Reset memory allocation variables */ + *my_allocated_bytes = local_my_allocated_bytes = 0; + + /* Point my_allocated_bytes from shared memory back to local */ my_allocated_bytes = &local_my_allocated_bytes; + + return; } +/* + * Determine if allocation request will exceed max backend memory allowed. + * Do not apply to auxiliary processes. + * Refill allocation request bucket when needed/possible. + */ +bool +exceeds_max_total_bkend_mem(uint64 allocation_request) +{ + bool result = false; + + /* + * When limiting maximum backend memory, attempt to refill allocation + * request bucket if needed. + */ + if (max_total_bkend_mem && allocation_request > allocation_allowance) + { + volatile PROC_HDR *procglobal = ProcGlobal; + uint64 available_max_total_bkend_mem = 0; + bool sts = false; + + /* + * If allocation request is larger than memory refill quantity then + * attempt to increase allocation allowance with requested amount, + * otherwise fall through. If this refill fails we do not have enough + * memory to meet the request. + */ + if (allocation_request >= allocation_allowance_refill_qty) + { + while ((available_max_total_bkend_mem = pg_atomic_read_u64(&procglobal->max_total_bkend_mem_bytes)) >= allocation_request) + { + if ((result = pg_atomic_compare_exchange_u64(&procglobal->max_total_bkend_mem_bytes, + &available_max_total_bkend_mem, + available_max_total_bkend_mem - allocation_request))) + { + allocation_allowance = allocation_allowance + allocation_request; + break; + } + } + + /* + * If the atomic exchange fails, we do not have enough reserve + * memory to meet the request. Negate result to return the proper + * value. + */ + return !result; + } + + /* + * Attempt to increase allocation allowance by memory refill quantity. + * If available memory is/becomes less than memory refill quantity, + * fall through to attempt to allocate remaining available memory. + */ + while ((available_max_total_bkend_mem = pg_atomic_read_u64(&procglobal->max_total_bkend_mem_bytes)) >= allocation_allowance_refill_qty) + { + if ((sts = pg_atomic_compare_exchange_u64(&procglobal->max_total_bkend_mem_bytes, + &available_max_total_bkend_mem, + available_max_total_bkend_mem - allocation_allowance_refill_qty))) + { + allocation_allowance = allocation_allowance + allocation_allowance_refill_qty; + break; + } + } + + if (!sts) + { + /* + * If available_max_total_bkend_mem is 0, no memory is currently + * available to refill with, otherwise attempt to allocate + * remaining memory available if it exceeds the requested amount + * or the requested amount if more than requested amount gets + * returned while looping. + */ + while ((available_max_total_bkend_mem = (int64) pg_atomic_read_u64(&procglobal->max_total_bkend_mem_bytes)) > 0) + { + uint64 newval = 0; + + /* + * If available memory is less than requested allocation we + * cannot fulfil request. + */ + if (available_max_total_bkend_mem < allocation_request) + break; + + /* + * If we happen to loop and a large chunk of memory has been + * returned to global, allocate request amount only. + */ + if (available_max_total_bkend_mem > allocation_request) + newval = available_max_total_bkend_mem - allocation_request; + + /* Allocate memory */ + if ((sts = pg_atomic_compare_exchange_u64(&procglobal->max_total_bkend_mem_bytes, + &available_max_total_bkend_mem, + newval))) + { + allocation_allowance = allocation_allowance + + newval == 0 ? available_max_total_bkend_mem : allocation_request; + + break; + } + } + } + + /* + * If refill is not successful, we return true, memory limit exceeded + */ + if (!sts) + result = true; + } + + /* + * Exclude auxiliary processes from the check. Return false. While we want + * to exclude them from the check, we do not want to exclude them from the + * above allocation handling. + */ + if (MyAuxProcType != NotAnAuxProcess) + result = false; + + return result; +} diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index 1c0583fe26..639b63138b 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -3468,6 +3468,17 @@ struct config_int ConfigureNamesInt[] = NULL, NULL, NULL }, + { + {"max_total_backend_memory", PGC_SU_BACKEND, RESOURCES_MEM, + gettext_noop("Restrict total backend memory allocations to this max."), + gettext_noop("0 turns this feature off."), + GUC_UNIT_MB + }, + &max_total_bkend_mem, + 0, 0, INT_MAX, + NULL, NULL, NULL + }, + /* End-of-list marker */ { {NULL, 0, 0, NULL, NULL}, NULL, 0, 0, 0, NULL, NULL, NULL diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index d06074b86f..bc2d449c87 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -156,6 +156,9 @@ # mmap # (change requires restart) #min_dynamic_shared_memory = 0MB # (change requires restart) +#max_total_backend_memory = 0MB # Restrict total backend memory allocations + # to this max (in MB). 0 turns this feature + # off. # - Disk - diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index 1a2d86239c..4d2dead51f 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -440,6 +440,10 @@ AllocSetContextCreateInternal(MemoryContext parent, else firstBlockSize = Max(firstBlockSize, initBlockSize); + /* Do not exceed maximum allowed memory allocation */ + if (exceeds_max_total_bkend_mem(firstBlockSize)) + return NULL; + /* * Allocate the initial block. Unlike other aset.c blocks, it starts with * the context header and its block header follows that. @@ -522,7 +526,7 @@ AllocSetContextCreateInternal(MemoryContext parent, name); ((MemoryContext) set)->mem_allocated = firstBlockSize; - pgstat_report_allocated_bytes(firstBlockSize, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(firstBlockSize, PG_ALLOC_ASET); return (MemoryContext) set; } @@ -599,7 +603,7 @@ AllocSetReset(MemoryContext context) } Assert(context->mem_allocated == keepersize); - pgstat_report_allocated_bytes(deallocation, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(deallocation, PG_ALLOC_ASET); /* Reset block size allocation sequence, too */ set->nextBlockSize = set->initBlockSize; @@ -663,7 +667,7 @@ AllocSetDelete(MemoryContext context) free(oldset); } Assert(freelist->num_free == 0); - pgstat_report_allocated_bytes(deallocation, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(deallocation, PG_ALLOC_ASET); } /* Now add the just-deleted context to the freelist. */ @@ -696,7 +700,7 @@ AllocSetDelete(MemoryContext context) } Assert(context->mem_allocated == keepersize); - pgstat_report_allocated_bytes(deallocation + context->mem_allocated, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(deallocation + context->mem_allocated, PG_ALLOC_ASET); /* Finally, free the context header, including the keeper block */ free(set); @@ -741,12 +745,17 @@ AllocSetAlloc(MemoryContext context, Size size) #endif blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ; + + /* Do not exceed maximum allowed memory allocation */ + if (exceeds_max_total_bkend_mem(blksize)) + return NULL; + block = (AllocBlock) malloc(blksize); if (block == NULL) return NULL; context->mem_allocated += blksize; - pgstat_report_allocated_bytes(blksize, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(blksize, PG_ALLOC_ASET); block->aset = set; block->freeptr = block->endptr = ((char *) block) + blksize; @@ -938,6 +947,10 @@ AllocSetAlloc(MemoryContext context, Size size) while (blksize < required_size) blksize <<= 1; + /* Do not exceed maximum allowed memory allocation */ + if (exceeds_max_total_bkend_mem(blksize)) + return NULL; + /* Try to allocate it */ block = (AllocBlock) malloc(blksize); @@ -957,7 +970,7 @@ AllocSetAlloc(MemoryContext context, Size size) return NULL; context->mem_allocated += blksize; - pgstat_report_allocated_bytes(blksize, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(blksize, PG_ALLOC_ASET); block->aset = set; block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ; @@ -1055,7 +1068,7 @@ AllocSetFree(void *pointer) block->next->prev = block->prev; set->header.mem_allocated -= block->endptr - ((char *) block); - pgstat_report_allocated_bytes(block->endptr - ((char *) block), PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(block->endptr - ((char *) block), PG_ALLOC_ASET); #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, block->freeptr - ((char *) block)); @@ -1176,6 +1189,18 @@ AllocSetRealloc(void *pointer, Size size) blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ; oldblksize = block->endptr - ((char *) block); + /* + * Do not exceed maximum allowed memory allocation. NOTE: checking for + * the full size here rather than just the amount of increased + * allocation to prevent a potential underflow of *my_allocation + * allowance in cases where blksize - oldblksize does not trigger a + * refill but blksize is greater than *my_allocation_allowance. + * Underflow would occur with the call below to + * pgstat_report_allocated_bytes_increase() + */ + if (blksize > oldblksize && exceeds_max_total_bkend_mem(blksize)) + return NULL; + block = (AllocBlock) realloc(block, blksize); if (block == NULL) { @@ -1186,9 +1211,9 @@ AllocSetRealloc(void *pointer, Size size) /* updated separately, not to underflow when (oldblksize > blksize) */ set->header.mem_allocated -= oldblksize; - pgstat_report_allocated_bytes(oldblksize, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(oldblksize, PG_ALLOC_ASET); set->header.mem_allocated += blksize; - pgstat_report_allocated_bytes(blksize, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(blksize, PG_ALLOC_ASET); block->freeptr = block->endptr = ((char *) block) + blksize; diff --git a/src/backend/utils/mmgr/generation.c b/src/backend/utils/mmgr/generation.c index b06fb0c6a4..8f9d56eb0f 100644 --- a/src/backend/utils/mmgr/generation.c +++ b/src/backend/utils/mmgr/generation.c @@ -201,6 +201,9 @@ GenerationContextCreate(MemoryContext parent, else allocSize = Max(allocSize, initBlockSize); + if (exceeds_max_total_bkend_mem(allocSize)) + return NULL; + /* * Allocate the initial block. Unlike other generation.c blocks, it * starts with the context header and its block header follows that. @@ -268,7 +271,7 @@ GenerationContextCreate(MemoryContext parent, name); ((MemoryContext) set)->mem_allocated = firstBlockSize; - pgstat_report_allocated_bytes(firstBlockSize, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(firstBlockSize, PG_ALLOC_GENERATION); return (MemoryContext) set; } @@ -314,7 +317,7 @@ GenerationReset(MemoryContext context) } } - pgstat_report_allocated_bytes(deallocation, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(deallocation, PG_ALLOC_GENERATION); /* set it so new allocations to make use of the keeper block */ set->block = set->keeper; @@ -337,7 +340,7 @@ GenerationDelete(MemoryContext context) /* Reset to release all releasable GenerationBlocks */ GenerationReset(context); - pgstat_report_allocated_bytes(context->mem_allocated, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(context->mem_allocated, PG_ALLOC_GENERATION); /* And free the context header and keeper block */ free(context); @@ -380,12 +383,15 @@ GenerationAlloc(MemoryContext context, Size size) { Size blksize = required_size + Generation_BLOCKHDRSZ; + if (exceeds_max_total_bkend_mem(blksize)) + return NULL; + block = (GenerationBlock *) malloc(blksize); if (block == NULL) return NULL; context->mem_allocated += blksize; - pgstat_report_allocated_bytes(blksize, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(blksize, PG_ALLOC_GENERATION); /* block with a single (used) chunk */ block->context = set; @@ -483,13 +489,16 @@ GenerationAlloc(MemoryContext context, Size size) if (blksize < required_size) blksize = pg_nextpower2_size_t(required_size); + if (exceeds_max_total_bkend_mem(blksize)) + return NULL; + block = (GenerationBlock *) malloc(blksize); if (block == NULL) return NULL; context->mem_allocated += blksize; - pgstat_report_allocated_bytes(blksize, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(blksize, PG_ALLOC_GENERATION); /* initialize the new block */ GenerationBlockInit(set, block, blksize); @@ -742,7 +751,7 @@ GenerationFree(void *pointer) dlist_delete(&block->node); set->header.mem_allocated -= block->blksize; - pgstat_report_allocated_bytes(block->blksize, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(block->blksize, PG_ALLOC_GENERATION); free(block); } diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c index 15d3380640..de85781479 100644 --- a/src/backend/utils/mmgr/slab.c +++ b/src/backend/utils/mmgr/slab.c @@ -356,9 +356,12 @@ SlabContextCreate(MemoryContext parent, elog(ERROR, "block size %zu for slab is too small for %zu-byte chunks", blockSize, chunkSize); - + /* Do not exceed maximum allowed memory allocation */ + if (exceeds_max_total_bkend_mem(Slab_CONTEXT_HDRSZ(chunksPerBlock))) + return NULL; slab = (SlabContext *) malloc(Slab_CONTEXT_HDRSZ(chunksPerBlock)); + if (slab == NULL) { MemoryContextStats(TopMemoryContext); @@ -418,8 +421,7 @@ SlabContextCreate(MemoryContext parent, * If SlabContextCreate is updated to add context header size to * context->mem_allocated, then update here and SlabDelete appropriately */ - pgstat_report_allocated_bytes(Slab_CONTEXT_HDRSZ(slab->chunksPerBlock), - PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(Slab_CONTEXT_HDRSZ(slab->chunksPerBlock), PG_ALLOC_SLAB); return (MemoryContext) slab; } @@ -479,7 +481,7 @@ SlabReset(MemoryContext context) } } - pgstat_report_allocated_bytes(deallocation, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(deallocation, PG_ALLOC_SLAB); slab->curBlocklistIndex = 0; Assert(context->mem_allocated == 0); @@ -500,8 +502,7 @@ SlabDelete(MemoryContext context) * Until context header allocation is included in context->mem_allocated, * cast to slab and decrement the header allocation */ - pgstat_report_allocated_bytes(Slab_CONTEXT_HDRSZ(((SlabContext *)context)->chunksPerBlock), - PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(Slab_CONTEXT_HDRSZ(((SlabContext *) context)->chunksPerBlock), PG_ALLOC_SLAB); /* And free the context header */ free(context); @@ -560,6 +561,10 @@ SlabAlloc(MemoryContext context, Size size) } else { + /* Do not exceed maximum allowed memory allocation */ + if (exceeds_max_total_bkend_mem(slab->blockSize)) + return NULL; + block = (SlabBlock *) malloc(slab->blockSize); if (unlikely(block == NULL)) @@ -567,7 +572,7 @@ SlabAlloc(MemoryContext context, Size size) block->slab = slab; context->mem_allocated += slab->blockSize; - pgstat_report_allocated_bytes(slab->blockSize, PG_ALLOC_INCREASE); + pgstat_report_allocated_bytes_increase(slab->blockSize, PG_ALLOC_SLAB); /* use the first chunk in the new block */ chunk = SlabBlockGetChunk(slab, block, 0); @@ -754,7 +759,7 @@ SlabFree(void *pointer) #endif free(block); slab->header.mem_allocated -= slab->blockSize; - pgstat_report_allocated_bytes(slab->blockSize, PG_ALLOC_DECREASE); + pgstat_report_allocated_bytes_decrease(slab->blockSize, PG_ALLOC_SLAB); } /* diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 4258cd92c9..bacf879294 100644 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -404,6 +404,13 @@ typedef struct PROC_HDR int spins_per_delay; /* Buffer id of the buffer that Startup process waits for pin on, or -1 */ int startupBufferPinWaitBufId; + + /* + * Max backend memory allocation tracker. Used/Initialized when + * max_total_bkend_mem > 0 as max_total_bkend_mem (MB) converted to bytes. + * Decreases/increases with free/malloc of backend memory. + */ + pg_atomic_uint64 max_total_bkend_mem_bytes; } PROC_HDR; extern PGDLLIMPORT PROC_HDR *ProcGlobal; diff --git a/src/include/utils/backend_status.h b/src/include/utils/backend_status.h index 754ff0dc62..32a1149007 100644 --- a/src/include/utils/backend_status.h +++ b/src/include/utils/backend_status.h @@ -14,6 +14,7 @@ #include "libpq/pqcomm.h" #include "miscadmin.h" /* for BackendType */ #include "storage/backendid.h" +#include "storage/proc.h" #include "utils/backend_progress.h" #include "common/int.h" @@ -33,12 +34,14 @@ typedef enum BackendState STATE_DISABLED } BackendState; -/* Enum helper for reporting memory allocated bytes */ -enum allocation_direction +/* Enum helper for reporting memory allocator type */ +enum pg_allocator_type { - PG_ALLOC_DECREASE = -1, - PG_ALLOC_IGNORE, - PG_ALLOC_INCREASE, + PG_ALLOC_ASET = 1, + PG_ALLOC_DSM, + PG_ALLOC_GENERATION, + PG_ALLOC_SLAB, + PG_ALLOC_ONSHUTDOWN, }; /* ---------- @@ -297,6 +300,7 @@ typedef struct LocalPgBackendStatus */ extern PGDLLIMPORT bool pgstat_track_activities; extern PGDLLIMPORT int pgstat_track_activity_query_size; +extern PGDLLIMPORT int max_total_bkend_mem; /* ---------- @@ -305,7 +309,15 @@ extern PGDLLIMPORT int pgstat_track_activity_query_size; */ extern PGDLLIMPORT PgBackendStatus *MyBEEntry; extern PGDLLIMPORT uint64 *my_allocated_bytes; +extern PGDLLIMPORT uint64 allocation_allowance; +extern PGDLLIMPORT uint64 initial_allocation_allowance; +extern PGDLLIMPORT uint64 allocation_return; +extern PGDLLIMPORT uint64 allocation_return_threshold; +extern PGDLLIMPORT uint64 aset_allocated_bytes; +extern PGDLLIMPORT uint64 dsm_allocated_bytes; +extern PGDLLIMPORT uint64 generation_allocated_bytes; +extern PGDLLIMPORT uint64 slab_allocated_bytes; /* ---------- * Functions called from postmaster @@ -338,6 +350,7 @@ extern const char *pgstat_get_crashed_backend_activity(int pid, char *buffer, extern uint64 pgstat_get_my_query_id(void); extern void pgstat_set_allocated_bytes_storage(uint64 *allocated_bytes); extern void pgstat_reset_allocated_bytes_storage(void); +extern void decrease_max_total_bkend_mem(int64 decrease); /* ---------- * Support functions for the SQL-callable functions to @@ -348,53 +361,214 @@ extern int pgstat_fetch_stat_numbackends(void); extern PgBackendStatus *pgstat_fetch_stat_beentry(BackendId beid); extern LocalPgBackendStatus *pgstat_fetch_stat_local_beentry(int beid); extern char *pgstat_clip_activity(const char *raw_activity); +extern bool exceeds_max_total_bkend_mem(uint64 allocation_request); /* ---------- - * pgstat_report_allocated_bytes() - - * - * Called to report change in memory allocated for this backend. + * pgstat_report_allocated_bytes_decrease() - + * Called to report decrease in memory allocated for this backend. * * my_allocated_bytes initially points to local memory, making it safe to call - * this before pgstats has been initialized. allocation_direction is a - * positive/negative multiplier enum defined above. + * this before pgstats has been initialized. * ---------- */ static inline void -pgstat_report_allocated_bytes(int64 allocated_bytes, int allocation_direction) +pgstat_report_allocated_bytes_decrease(int64 proc_allocated_bytes, int pg_allocator_type) { uint64 temp; /* - * Avoid *my_allocated_bytes unsigned integer overflow on - * PG_ALLOC_DECREASE + * Avoid allocated_bytes unsigned integer overflow on decrease. */ - if (allocation_direction == PG_ALLOC_DECREASE && - pg_sub_u64_overflow(*my_allocated_bytes, allocated_bytes, &temp)) + if (pg_sub_u64_overflow(*my_allocated_bytes, proc_allocated_bytes, &temp)) { + /* Add free'd memory to allocation return counter. */ + allocation_return += proc_allocated_bytes; + + /* On overflow, reset pgstat count of allocated bytes to zero */ *my_allocated_bytes = 0; - ereport(LOG, - errmsg("Backend %d deallocated %lld bytes, exceeding the %llu bytes it is currently reporting allocated. Setting reported to 0.", - MyProcPid, (long long) allocated_bytes, - (unsigned long long) *my_allocated_bytes)); + + /* Reset allocator type allocated bytes */ + switch (pg_allocator_type) + { + case PG_ALLOC_ASET: + aset_allocated_bytes = 0; + break; + case PG_ALLOC_DSM: + dsm_allocated_bytes = 0; + break; + case PG_ALLOC_GENERATION: + generation_allocated_bytes = 0; + break; + case PG_ALLOC_SLAB: + slab_allocated_bytes = 0; + break; + case PG_ALLOC_ONSHUTDOWN: + break; + } + + /* + * Return free'd memory to the global counter when return threshold is + * met or process end. + */ + if (max_total_bkend_mem && allocation_return >= allocation_return_threshold) + { + if (ProcGlobal) + { + volatile PROC_HDR *procglobal = ProcGlobal; + + /* Add to global */ + pg_atomic_add_fetch_u64(&procglobal->max_total_bkend_mem_bytes, + allocation_return); + + /* Restart count */ + allocation_return = 0; + } + } } else - *my_allocated_bytes += (allocated_bytes) * allocation_direction; + { + /* Add free'd memory to allocation return counter. */ + allocation_return += proc_allocated_bytes; + + /* Decrease pgstat count of allocated bytes */ + *my_allocated_bytes -= (proc_allocated_bytes); + + /* + * Decrease allocator type allocated bytes. NOTE: per hackers dsm + * memory allocations lifespan may exceed process lifespan, so we may + * implement a long lived tracker for it ala max_total_bkend_mem_bytes + */ + switch (pg_allocator_type) + { + case PG_ALLOC_ASET: + aset_allocated_bytes -= (proc_allocated_bytes); + break; + case PG_ALLOC_DSM: + dsm_allocated_bytes -= (proc_allocated_bytes); + break; + case PG_ALLOC_GENERATION: + generation_allocated_bytes -= (proc_allocated_bytes); + break; + case PG_ALLOC_SLAB: + slab_allocated_bytes -= (proc_allocated_bytes); + break; + case PG_ALLOC_ONSHUTDOWN: + break; + } + + /* + * Return free'd memory to the global counter when return threshold is + * met or process end. + */ + if (max_total_bkend_mem && allocation_return >= allocation_return_threshold) + { + if (ProcGlobal) + { + volatile PROC_HDR *procglobal = ProcGlobal; + + /* Add to global */ + pg_atomic_add_fetch_u64(&procglobal->max_total_bkend_mem_bytes, + allocation_return); + + /* Restart count */ + allocation_return = 0; + } + } + } + + return; +} + +/* ---------- + * pgstat_report_allocated_bytes_increase() - + * Called to report increase in memory allocated for this backend. + * + * my_allocated_bytes initially points to local memory, making it safe to call + * this before pgstats has been initialized. + * ---------- + */ +static inline void +pgstat_report_allocated_bytes_increase(int64 proc_allocated_bytes, + int pg_allocator_type) +{ + /* Remove allocated memory from local allocation allowance */ + allocation_allowance -= proc_allocated_bytes; + + /* Increase pgstat count of allocated bytes */ + *my_allocated_bytes += (proc_allocated_bytes); + + /* + * Increase allocator type allocated bytes. NOTE: per hackers dsm memory + * allocations lifespan may exceed process lifespan, so we may implement a + * long lived tracker for it ala max_total_bkend_mem_bytes + */ + switch (pg_allocator_type) + { + case PG_ALLOC_ASET: + aset_allocated_bytes += (proc_allocated_bytes); + break; + case PG_ALLOC_DSM: + dsm_allocated_bytes += (proc_allocated_bytes); + break; + case PG_ALLOC_GENERATION: + generation_allocated_bytes += (proc_allocated_bytes); + break; + case PG_ALLOC_SLAB: + slab_allocated_bytes += (proc_allocated_bytes); + break; + case PG_ALLOC_ONSHUTDOWN: + break; + } return; } /* --------- - * pgstat_zero_my_allocated_bytes() - + * pgstat_init_allocated_bytes() - * - * Called to zero out local allocated bytes variable after fork to avoid double - * counting allocations. + * Called to initialize allocated bytes variables after fork and to + * avoid double counting allocations. * --------- */ static inline void -pgstat_zero_my_allocated_bytes(void) +pgstat_init_allocated_bytes(void) { *my_allocated_bytes = 0; + /* If we're limiting backend memory */ + if (max_total_bkend_mem) + { + volatile PROC_HDR *procglobal = ProcGlobal; + uint64 available_max_total_bkend_mem = 0; + + allocation_return = 0; + aset_allocated_bytes = 0; + dsm_allocated_bytes = 0; + generation_allocated_bytes = 0; + slab_allocated_bytes = 0; + allocation_allowance = 0; + + /* Account for the initial allocation allowance */ + while ((available_max_total_bkend_mem = pg_atomic_read_u64(&procglobal->max_total_bkend_mem_bytes)) >= allocation_allowance) + { + if (pg_atomic_compare_exchange_u64(&procglobal->max_total_bkend_mem_bytes, + &available_max_total_bkend_mem, + available_max_total_bkend_mem - + initial_allocation_allowance)) + { + /* + * On success populate allocation_allowance. Failure here will + * result in the backend's first invocation of + * exceeds_max_total_bkend_mem allocating requested, default, + * or available memory or result in an out of memory error. + */ + allocation_allowance = initial_allocation_allowance; + + break; + } + } + } + return; } -- 2.25.1