From ba55020bd3e68e1d7a3c786b3f5aa692750ee38f Mon Sep 17 00:00:00 2001 From: David Christensen Date: Tue, 29 Aug 2023 16:09:37 -0400 Subject: [PATCH v2 3/3] Capture rename of symbols --- contrib/amcheck/expected/check_btree.out | 2 +- contrib/amcheck/sql/check_btree.sql | 2 +- contrib/amcheck/verify_heapam.c | 12 ++-- contrib/amcheck/verify_nbtree.c | 6 +- contrib/bloom/blinsert.c | 2 +- contrib/bloom/bloom.h | 4 +- contrib/bloom/blutils.c | 2 +- contrib/file_fdw/file_fdw.c | 6 +- contrib/pageinspect/btreefuncs.c | 6 +- contrib/pageinspect/expected/checksum.out | 2 +- contrib/pageinspect/expected/checksum_1.out | 2 +- contrib/pageinspect/rawpage.c | 10 ++-- contrib/pageinspect/sql/checksum.sql | 2 +- contrib/pg_walinspect/pg_walinspect.c | 6 +- contrib/pgstattuple/pgstatapprox.c | 6 +- contrib/pgstattuple/pgstatindex.c | 4 +- contrib/pgstattuple/pgstattuple.c | 10 ++-- contrib/postgres_fdw/deparse.c | 2 +- contrib/postgres_fdw/postgres_fdw.c | 2 +- doc/src/sgml/catalogs.sgml | 12 ++-- doc/src/sgml/config.sgml | 32 +++++------ doc/src/sgml/limits.sgml | 2 +- doc/src/sgml/monitoring.sgml | 2 +- doc/src/sgml/pgfreespacemap.sgml | 2 +- doc/src/sgml/storage.sgml | 8 +-- src/backend/access/brin/brin_bloom.c | 2 +- src/backend/access/brin/brin_pageops.c | 4 +- src/backend/access/common/bufmask.c | 4 +- src/backend/access/common/reloptions.c | 2 +- src/backend/access/common/syncscan.c | 2 +- src/backend/access/common/toast_internals.c | 4 +- src/backend/access/gin/ginbtree.c | 12 ++-- src/backend/access/gin/gindatapage.c | 18 +++--- src/backend/access/gin/ginfast.c | 8 +-- src/backend/access/gin/ginget.c | 6 +- src/backend/access/gin/ginvacuum.c | 2 +- src/backend/access/gin/ginxlog.c | 4 +- src/backend/access/gist/gistbuild.c | 16 +++--- src/backend/access/gist/gistbuildbuffers.c | 10 ++-- src/backend/access/gist/gistutil.c | 2 +- src/backend/access/hash/hash.c | 2 +- src/backend/access/hash/hashpage.c | 2 +- src/backend/access/heap/README.HOT | 2 +- src/backend/access/heap/heapam.c | 20 +++---- src/backend/access/heap/heapam_handler.c | 10 ++-- src/backend/access/heap/heaptoast.c | 20 +++---- src/backend/access/heap/pruneheap.c | 2 +- src/backend/access/heap/rewriteheap.c | 6 +- src/backend/access/heap/vacuumlazy.c | 18 +++--- src/backend/access/heap/visibilitymap.c | 4 +- src/backend/access/nbtree/nbtdedup.c | 8 +-- src/backend/access/nbtree/nbtpage.c | 14 ++--- src/backend/access/nbtree/nbtree.c | 4 +- src/backend/access/nbtree/nbtsort.c | 12 ++-- src/backend/access/rmgrdesc/xlogdesc.c | 2 +- src/backend/access/spgist/spgdoinsert.c | 24 ++++---- src/backend/access/spgist/spgtextproc.c | 10 ++-- src/backend/access/spgist/spgutils.c | 12 ++-- src/backend/access/table/tableam.c | 2 +- src/backend/access/transam/README | 2 +- src/backend/access/transam/clog.c | 6 +- src/backend/access/transam/commit_ts.c | 24 ++++---- src/backend/access/transam/generic_xlog.c | 18 +++--- src/backend/access/transam/multixact.c | 54 +++++++++--------- src/backend/access/transam/slru.c | 22 ++++---- src/backend/access/transam/subtrans.c | 20 +++---- src/backend/access/transam/timeline.c | 2 +- src/backend/access/transam/varsup.c | 2 +- src/backend/access/transam/xlog.c | 30 +++++----- src/backend/access/transam/xloginsert.c | 24 ++++---- src/backend/access/transam/xlogprefetcher.c | 4 +- src/backend/access/transam/xlogreader.c | 28 +++++----- src/backend/access/transam/xlogrecovery.c | 10 ++-- src/backend/backup/basebackup.c | 40 ++++++------- src/backend/backup/basebackup_lz4.c | 6 +- src/backend/backup/basebackup_zstd.c | 6 +- src/backend/catalog/storage.c | 2 +- src/backend/commands/analyze.c | 4 +- src/backend/commands/async.c | 6 +- src/backend/commands/vacuumparallel.c | 2 +- src/backend/executor/nodeAgg.c | 30 +++++----- src/backend/nodes/tidbitmap.c | 4 +- src/backend/optimizer/path/costsize.c | 12 ++-- src/backend/optimizer/util/plancat.c | 2 +- src/backend/po/de.po | 16 +++--- src/backend/po/es.po | 14 ++--- src/backend/po/fr.po | 18 +++--- src/backend/po/id.po | 8 +-- src/backend/po/it.po | 12 ++-- src/backend/po/ja.po | 16 +++--- src/backend/po/ko.po | 22 ++++---- src/backend/po/pl.po | 12 ++-- src/backend/po/pt_BR.po | 10 ++-- src/backend/po/ru.po | 24 ++++---- src/backend/po/sv.po | 14 ++--- src/backend/po/tr.po | 12 ++-- src/backend/po/uk.po | 12 ++-- src/backend/po/zh_CN.po | 12 ++-- src/backend/replication/logical/worker.c | 2 +- src/backend/storage/buffer/buf_init.c | 4 +- src/backend/storage/buffer/bufmgr.c | 22 ++++---- src/backend/storage/buffer/freelist.c | 2 +- src/backend/storage/buffer/localbuf.c | 8 +-- src/backend/storage/file/buffile.c | 14 ++--- src/backend/storage/file/copydir.c | 2 +- src/backend/storage/file/fd.c | 4 +- src/backend/storage/freespace/README | 8 +-- src/backend/storage/freespace/freespace.c | 22 ++++---- src/backend/storage/freespace/indexfsm.c | 6 +- src/backend/storage/large_object/inv_api.c | 30 +++++----- src/backend/storage/lmgr/predicate.c | 4 +- src/backend/storage/page/bufpage.c | 22 ++++---- src/backend/storage/smgr/md.c | 62 ++++++++++----------- src/backend/utils/adt/pgstatfuncs.c | 4 +- src/backend/utils/adt/selfuncs.c | 2 +- src/backend/utils/init/miscinit.c | 6 +- src/backend/utils/misc/guc.c | 16 +++--- src/backend/utils/misc/guc_tables.c | 6 +- src/backend/utils/sort/logtape.c | 48 ++++++++-------- src/backend/utils/sort/sharedtuplestore.c | 12 ++-- src/backend/utils/sort/tuplesort.c | 6 +- src/bin/initdb/initdb.c | 22 ++++---- src/bin/pg_checksums/pg_checksums.c | 18 +++--- src/bin/pg_resetwal/pg_resetwal.c | 6 +- src/bin/pg_rewind/filemap.c | 4 +- src/bin/pg_rewind/pg_rewind.c | 4 +- src/bin/pg_upgrade/file.c | 10 ++-- src/bin/pg_waldump/pg_waldump.c | 2 +- src/common/file_utils.c | 6 +- src/include/access/brin_page.h | 6 +- src/include/access/ginblock.h | 6 +- src/include/access/gist.h | 2 +- src/include/access/gist_private.h | 6 +- src/include/access/hash.h | 6 +- src/include/access/heaptoast.h | 20 +++---- src/include/access/htup_details.h | 6 +- src/include/access/itup.h | 2 +- src/include/access/nbtree.h | 6 +- src/include/access/slru.h | 2 +- src/include/access/spgist_private.h | 6 +- src/include/access/xlogrecord.h | 6 +- src/include/backup/basebackup_sink.h | 4 +- src/include/c.h | 6 +- src/include/pg_config_manual.h | 2 +- src/include/postgres_ext.h | 1 + src/include/storage/bufmgr.h | 4 +- src/include/storage/bufpage.h | 8 +-- src/include/storage/checksum_impl.h | 6 +- src/include/storage/fsm_internals.h | 4 +- src/include/storage/large_object.h | 8 +-- src/include/storage/off.h | 2 +- src/include/utils/rel.h | 4 +- src/test/modules/test_slru/test_slru.c | 2 +- src/test/regress/expected/btree_index.out | 4 +- src/test/regress/expected/largeobject.out | 2 +- src/test/regress/expected/largeobject_1.out | 2 +- src/test/regress/sql/btree_index.sql | 4 +- src/test/regress/sql/largeobject.sql | 2 +- src/tools/msvc/Solution.pm | 2 +- 159 files changed, 752 insertions(+), 751 deletions(-) diff --git a/contrib/amcheck/expected/check_btree.out b/contrib/amcheck/expected/check_btree.out index 38791bbc1f..b677475ae9 100644 --- a/contrib/amcheck/expected/check_btree.out +++ b/contrib/amcheck/expected/check_btree.out @@ -168,7 +168,7 @@ WHERE attrelid = 'toast_bug'::regclass AND attname = 'buggy'; x (1 row) --- Insert compressible heap tuple (comfortably exceeds TOAST_TUPLE_THRESHOLD): +-- Insert compressible heap tuple (comfortably exceeds toast_tuple_threshold): INSERT INTO toast_bug SELECT repeat('a', 2200); -- Should not get false positive report of corruption: SELECT bt_index_check('toasty', true); diff --git a/contrib/amcheck/sql/check_btree.sql b/contrib/amcheck/sql/check_btree.sql index 033c04b4d0..6bf3f02c22 100644 --- a/contrib/amcheck/sql/check_btree.sql +++ b/contrib/amcheck/sql/check_btree.sql @@ -110,7 +110,7 @@ WHERE attrelid = 'toasty'::regclass AND attname = 'buggy'; SELECT attstorage FROM pg_attribute WHERE attrelid = 'toast_bug'::regclass AND attname = 'buggy'; --- Insert compressible heap tuple (comfortably exceeds TOAST_TUPLE_THRESHOLD): +-- Insert compressible heap tuple (comfortably exceeds toast_tuple_threshold): INSERT INTO toast_bug SELECT repeat('a', 2200); -- Should not get false positive report of corruption: SELECT bt_index_check('toasty', true); diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c index 97f3253522..004aa26b7c 100644 --- a/contrib/amcheck/verify_heapam.c +++ b/contrib/amcheck/verify_heapam.c @@ -540,13 +540,13 @@ verify_heapam(PG_FUNCTION_ARGS) (unsigned) MAXALIGN(SizeofHeapTupleHeader))); continue; } - if (ctx.lp_off + ctx.lp_len > BLCKSZ) + if (ctx.lp_off + ctx.lp_len > cluster_block_size) { report_corruption(&ctx, psprintf("line pointer to page offset %u with length %u ends beyond maximum page offset %u", ctx.lp_off, ctx.lp_len, - (unsigned) BLCKSZ)); + (unsigned) cluster_block_size)); continue; } @@ -1460,7 +1460,7 @@ check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx, uint32 extsize) { int32 chunk_seq; - int32 last_chunk_seq = (extsize - 1) / TOAST_MAX_CHUNK_SIZE; + int32 last_chunk_seq = (extsize - 1) / cluster_toast_max_chunk_size; Pointer chunk; bool isnull; int32 chunksize; @@ -1530,8 +1530,8 @@ check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx, return; } - expected_size = chunk_seq < last_chunk_seq ? TOAST_MAX_CHUNK_SIZE - : extsize - (last_chunk_seq * TOAST_MAX_CHUNK_SIZE); + expected_size = chunk_seq < last_chunk_seq ? cluster_toast_max_chunk_size + : extsize - (last_chunk_seq * cluster_toast_max_chunk_size); if (chunksize != expected_size) report_toast_corruption(ctx, ta, @@ -1773,7 +1773,7 @@ check_toasted_attribute(HeapCheckContext *ctx, ToastedAttribute *ta) int32 last_chunk_seq; extsize = VARATT_EXTERNAL_GET_EXTSIZE(ta->toast_pointer); - last_chunk_seq = (extsize - 1) / TOAST_MAX_CHUNK_SIZE; + last_chunk_seq = (extsize - 1) / cluster_toast_max_chunk_size; /* * Setup a scan key to find chunks in toast table with matching va_valueid diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index 94a9759322..f1298fd34a 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -2962,7 +2962,7 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) BTPageOpaque opaque; OffsetNumber maxoffset; - page = palloc(BLCKSZ); + page = palloc(cluster_block_size); /* * We copy the page into local storage to avoid holding pin on the buffer @@ -2979,7 +2979,7 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) _bt_checkpage(state->rel, buffer); /* Only use copy of page in palloc()'d memory */ - memcpy(page, BufferGetPage(buffer), BLCKSZ); + memcpy(page, BufferGetPage(buffer), cluster_block_size); UnlockReleaseBuffer(buffer); opaque = BTPageGetOpaque(page); @@ -3163,7 +3163,7 @@ PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, ItemId itemid = PageGetItemId(page, offset); if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) > - BLCKSZ - MAXALIGN(sizeof(BTPageOpaqueData))) + cluster_block_size - MAXALIGN(sizeof(BTPageOpaqueData))) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("line pointer points past end of tuple space in index \"%s\"", diff --git a/contrib/bloom/blinsert.c b/contrib/bloom/blinsert.c index b90145148d..32a339469a 100644 --- a/contrib/bloom/blinsert.c +++ b/contrib/bloom/blinsert.c @@ -52,7 +52,7 @@ flushCachedPage(Relation index, BloomBuildState *buildstate) state = GenericXLogStart(index); page = GenericXLogRegisterBuffer(state, buffer, GENERIC_XLOG_FULL_IMAGE); - memcpy(page, buildstate->data.data, BLCKSZ); + memcpy(page, buildstate->data.data, cluster_block_size); GenericXLogFinish(state); UnlockReleaseBuffer(buffer); } diff --git a/contrib/bloom/bloom.h b/contrib/bloom/bloom.h index 330811ec60..d08fe1de26 100644 --- a/contrib/bloom/bloom.h +++ b/contrib/bloom/bloom.h @@ -112,7 +112,7 @@ typedef struct BloomOptions */ typedef BlockNumber FreeBlockNumberArray[ MAXALIGN_DOWN( - BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(BloomPageOpaqueData)) + cluster_block_size - SizeOfPageHeaderData - MAXALIGN(sizeof(BloomPageOpaqueData)) - MAXALIGN(sizeof(uint16) * 2 + sizeof(uint32) + sizeof(BloomOptions)) ) / sizeof(BlockNumber) ]; @@ -150,7 +150,7 @@ typedef struct BloomState } BloomState; #define BloomPageGetFreeSpace(state, page) \ - (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \ + (cluster_block_size - MAXALIGN(SizeOfPageHeaderData) \ - BloomPageGetMaxOffset(page) * (state)->sizeOfBloomTuple \ - MAXALIGN(sizeof(BloomPageOpaqueData))) diff --git a/contrib/bloom/blutils.c b/contrib/bloom/blutils.c index f23fbb1d9e..9d79319e90 100644 --- a/contrib/bloom/blutils.c +++ b/contrib/bloom/blutils.c @@ -400,7 +400,7 @@ BloomInitPage(Page page, uint16 flags) { BloomPageOpaque opaque; - PageInit(page, BLCKSZ, sizeof(BloomPageOpaqueData)); + PageInit(page, cluster_block_size, sizeof(BloomPageOpaqueData)); opaque = BloomPageGetOpaque(page); opaque->flags = flags; diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index 2189be8a3c..c265b6e309 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -840,7 +840,7 @@ fileAnalyzeForeignTable(Relation relation, * Convert size to pages. Must return at least 1 so that we can tell * later on that pg_class.relpages is not default. */ - *totalpages = (stat_buf.st_size + (BLCKSZ - 1)) / BLCKSZ; + *totalpages = (stat_buf.st_size + (cluster_block_size - 1)) / cluster_block_size; if (*totalpages < 1) *totalpages = 1; @@ -1010,12 +1010,12 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel, * back to the default if using a program as the input. */ if (fdw_private->is_program || stat(fdw_private->filename, &stat_buf) < 0) - stat_buf.st_size = 10 * BLCKSZ; + stat_buf.st_size = 10 * cluster_block_size; /* * Convert size to pages for use in I/O cost estimate later. */ - pages = (stat_buf.st_size + (BLCKSZ - 1)) / BLCKSZ; + pages = (stat_buf.st_size + (cluster_block_size - 1)) / cluster_block_size; if (pages < 1) pages = 1; fdw_private->pages = pages; diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c index 9cdc8e182b..77b00ef2b9 100644 --- a/contrib/pageinspect/btreefuncs.c +++ b/contrib/pageinspect/btreefuncs.c @@ -116,7 +116,7 @@ GetBTPageStatistics(BlockNumber blkno, Buffer buffer, BTPageStat *stat) stat->blkno = blkno; - stat->max_avail = BLCKSZ - (BLCKSZ - phdr->pd_special + SizeOfPageHeaderData); + stat->max_avail = cluster_block_size - (cluster_block_size - phdr->pd_special + SizeOfPageHeaderData); stat->dead_items = stat->live_items = 0; @@ -663,8 +663,8 @@ bt_page_items_internal(PG_FUNCTION_ARGS, enum pageinspect_version ext_version) uargs = palloc(sizeof(ua_page_items)); - uargs->page = palloc(BLCKSZ); - memcpy(uargs->page, BufferGetPage(buffer), BLCKSZ); + uargs->page = palloc(cluster_block_size); + memcpy(uargs->page, BufferGetPage(buffer), cluster_block_size); UnlockReleaseBuffer(buffer); relation_close(rel, AccessShareLock); diff --git a/contrib/pageinspect/expected/checksum.out b/contrib/pageinspect/expected/checksum.out index a85388e158..236df5be64 100644 --- a/contrib/pageinspect/expected/checksum.out +++ b/contrib/pageinspect/expected/checksum.out @@ -6,7 +6,7 @@ -- on the configured block size. This test has several different expected -- results files to handle the following possibilities: -- --- BLCKSZ end file +-- cluster_block_size end file -- 8K LE checksum.out -- 8K BE checksum_1.out -- diff --git a/contrib/pageinspect/expected/checksum_1.out b/contrib/pageinspect/expected/checksum_1.out index 6fb1b1b04d..ce7a30f3c1 100644 --- a/contrib/pageinspect/expected/checksum_1.out +++ b/contrib/pageinspect/expected/checksum_1.out @@ -6,7 +6,7 @@ -- on the configured block size. This test has several different expected -- results files to handle the following possibilities: -- --- BLCKSZ end file +-- cluster_block_size end file -- 8K LE checksum.out -- 8K BE checksum_1.out -- diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c index b25a63cbd6..2527501078 100644 --- a/contrib/pageinspect/rawpage.c +++ b/contrib/pageinspect/rawpage.c @@ -179,8 +179,8 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno) blkno, RelationGetRelationName(rel)))); /* Initialize buffer to copy to */ - raw_page = (bytea *) palloc(BLCKSZ + VARHDRSZ); - SET_VARSIZE(raw_page, BLCKSZ + VARHDRSZ); + raw_page = (bytea *) palloc(cluster_block_size + VARHDRSZ); + SET_VARSIZE(raw_page, cluster_block_size + VARHDRSZ); raw_page_data = VARDATA(raw_page); /* Take a verbatim copy of the page */ @@ -188,7 +188,7 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno) buf = ReadBufferExtended(rel, forknum, blkno, RBM_NORMAL, NULL); LockBuffer(buf, BUFFER_LOCK_SHARE); - memcpy(raw_page_data, BufferGetPage(buf), BLCKSZ); + memcpy(raw_page_data, BufferGetPage(buf), cluster_block_size); LockBuffer(buf, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buf); @@ -219,12 +219,12 @@ get_page_from_raw(bytea *raw_page) raw_page_size = VARSIZE_ANY_EXHDR(raw_page); - if (raw_page_size != BLCKSZ) + if (raw_page_size != cluster_block_size) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid page size"), errdetail("Expected %d bytes, got %d.", - BLCKSZ, raw_page_size))); + cluster_block_size, raw_page_size))); page = palloc(raw_page_size); diff --git a/contrib/pageinspect/sql/checksum.sql b/contrib/pageinspect/sql/checksum.sql index b877db0611..2afe04d0d2 100644 --- a/contrib/pageinspect/sql/checksum.sql +++ b/contrib/pageinspect/sql/checksum.sql @@ -6,7 +6,7 @@ -- on the configured block size. This test has several different expected -- results files to handle the following possibilities: -- --- BLCKSZ end file +-- cluster_block_size end file -- 8K LE checksum.out -- 8K BE checksum_1.out -- diff --git a/contrib/pg_walinspect/pg_walinspect.c b/contrib/pg_walinspect/pg_walinspect.c index 796a74f322..318c55abac 100644 --- a/contrib/pg_walinspect/pg_walinspect.c +++ b/contrib/pg_walinspect/pg_walinspect.c @@ -386,9 +386,9 @@ GetWALBlockInfo(FunctionCallInfo fcinfo, XLogReaderState *record, (errcode(ERRCODE_INTERNAL_ERROR), errmsg_internal("%s", record->errormsg_buf))); - block_fpi_data = (bytea *) palloc(BLCKSZ + VARHDRSZ); - SET_VARSIZE(block_fpi_data, BLCKSZ + VARHDRSZ); - memcpy(VARDATA(block_fpi_data), page, BLCKSZ); + block_fpi_data = (bytea *) palloc(cluster_block_size + VARHDRSZ); + SET_VARSIZE(block_fpi_data, cluster_block_size + VARHDRSZ); + memcpy(VARDATA(block_fpi_data), page, cluster_block_size); values[i++] = PointerGetDatum(block_fpi_data); } else diff --git a/contrib/pgstattuple/pgstatapprox.c b/contrib/pgstattuple/pgstatapprox.c index f601dc6121..0ce0b4f9a9 100644 --- a/contrib/pgstattuple/pgstatapprox.c +++ b/contrib/pgstattuple/pgstatapprox.c @@ -94,7 +94,7 @@ statapprox_heap(Relation rel, output_type *stat) if (VM_ALL_VISIBLE(rel, blkno, &vmbuffer)) { freespace = GetRecordedFreeSpace(rel, blkno); - stat->tuple_len += BLCKSZ - freespace; + stat->tuple_len += cluster_block_size - freespace; stat->free_space += freespace; continue; } @@ -113,7 +113,7 @@ statapprox_heap(Relation rel, output_type *stat) if (!PageIsNew(page)) stat->free_space += PageGetHeapFreeSpace(page); else - stat->free_space += BLCKSZ - SizeOfPageHeaderData; + stat->free_space += cluster_block_size - SizeOfPageHeaderData; /* We may count the page as scanned even if it's new/empty */ scanned++; @@ -182,7 +182,7 @@ statapprox_heap(Relation rel, output_type *stat) UnlockReleaseBuffer(buf); } - stat->table_len = (uint64) nblocks * BLCKSZ; + stat->table_len = (uint64) nblocks * cluster_block_size; /* * We don't know how many tuples are in the pages we didn't scan, so diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c index d69ac1c93d..9391bd540b 100644 --- a/contrib/pgstattuple/pgstatindex.c +++ b/contrib/pgstattuple/pgstatindex.c @@ -297,7 +297,7 @@ pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo) { int max_avail; - max_avail = BLCKSZ - (BLCKSZ - ((PageHeader) page)->pd_special + SizeOfPageHeaderData); + max_avail = cluster_block_size - (cluster_block_size - ((PageHeader) page)->pd_special + SizeOfPageHeaderData); indexStat.max_avail += max_avail; indexStat.free_space += PageGetFreeSpace(page); @@ -342,7 +342,7 @@ pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo) indexStat.leaf_pages + indexStat.internal_pages + indexStat.deleted_pages + - indexStat.empty_pages) * BLCKSZ); + indexStat.empty_pages) * cluster_block_size); values[j++] = psprintf("%u", indexStat.root_blkno); values[j++] = psprintf(INT64_FORMAT, indexStat.internal_pages); values[j++] = psprintf(INT64_FORMAT, indexStat.leaf_pages); diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index 93b7834b77..6a1c6bc6aa 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -386,7 +386,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) table_endscan(scan); relation_close(rel, AccessShareLock); - stat.table_len = (uint64) nblocks * BLCKSZ; + stat.table_len = (uint64) nblocks * cluster_block_size; return build_pgstattuple_type(&stat, fcinfo); } @@ -409,7 +409,7 @@ pgstat_btree_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno, if (PageIsNew(page)) { /* fully empty page */ - stat->free_space += BLCKSZ; + stat->free_space += cluster_block_size; } else { @@ -419,7 +419,7 @@ pgstat_btree_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno, if (P_IGNORE(opaque)) { /* deleted or half-dead page */ - stat->free_space += BLCKSZ; + stat->free_space += cluster_block_size; } else if (P_ISLEAF(opaque)) { @@ -456,7 +456,7 @@ pgstat_hash_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno, switch (opaque->hasho_flag & LH_PAGE_TYPE) { case LH_UNUSED_PAGE: - stat->free_space += BLCKSZ; + stat->free_space += cluster_block_size; break; case LH_BUCKET_PAGE: case LH_OVERFLOW_PAGE: @@ -531,7 +531,7 @@ pgstat_index(Relation rel, BlockNumber start, pgstat_page pagefn, /* Quit if we've scanned the whole relation */ if (blkno >= nblocks) { - stat.table_len = (uint64) nblocks * BLCKSZ; + stat.table_len = (uint64) nblocks * cluster_block_size; break; } diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c index 09d6dd60dd..22cdddba67 100644 --- a/contrib/postgres_fdw/deparse.c +++ b/contrib/postgres_fdw/deparse.c @@ -2364,7 +2364,7 @@ deparseAnalyzeSizeSql(StringInfo buf, Relation rel) appendStringInfoString(buf, "SELECT pg_catalog.pg_relation_size("); deparseStringLiteral(buf, relname.data); - appendStringInfo(buf, "::pg_catalog.regclass) / %d", BLCKSZ); + appendStringInfo(buf, "::pg_catalog.regclass) / %d", cluster_block_size); } /* diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index 1393716587..6c55044f64 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -755,7 +755,7 @@ postgresGetForeignRelSize(PlannerInfo *root, { baserel->pages = 10; baserel->tuples = - (10 * BLCKSZ) / (baserel->reltarget->width + + (10 * cluster_block_size) / (baserel->reltarget->width + MAXALIGN(SizeofHeapTupleHeader)); } diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index d17ff51e28..bc678737d6 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -2023,7 +2023,7 @@ SCRAM-SHA-256$<iteration count>:&l Size of the on-disk representation of this table in pages (of size - BLCKSZ). This is only an estimate used by the + cluster_block_size). This is only an estimate used by the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as CREATE INDEX. @@ -4884,8 +4884,8 @@ SCRAM-SHA-256$<iteration count>:&l assigned when it is created. Each large object is broken into segments or pages small enough to be conveniently stored as rows in pg_largeobject. - The amount of data per page is defined to be LOBLKSIZE (which is currently - BLCKSZ/4, or typically 2 kB). + The amount of data per page is defined to be cluster_loblksize (which is currently + cluster_block_size/4, or typically 2 kB). @@ -4939,7 +4939,7 @@ SCRAM-SHA-256$<iteration count>:&l Actual data stored in the large object. - This will never be more than LOBLKSIZE bytes and might be less. + This will never be more than cluster_loblksize bytes and might be less. @@ -4949,9 +4949,9 @@ SCRAM-SHA-256$<iteration count>:&l Each row of pg_largeobject holds data for one page of a large object, beginning at - byte offset (pageno * LOBLKSIZE) within the object. The implementation + byte offset (pageno * cluster_loblksize) within the object. The implementation allows sparse storage: pages might be missing, and might be shorter than - LOBLKSIZE bytes even if they are not the last page of the object. + cluster_loblksize bytes even if they are not the last page of the object. Missing regions within a large object read as zeroes. diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 694d667bf9..2274bc0ffb 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -1631,8 +1631,8 @@ include_dir 'conf.d' settings significantly higher than the minimum are usually needed for good performance. If this value is specified without units, it is taken as blocks, - that is BLCKSZ bytes, typically 8kB. - (Non-default values of BLCKSZ change the minimum + that is cluster_block_size bytes, typically 8kB. + (Non-default values of cluster_block_size change the minimum value.) This parameter can only be set at server start. @@ -1762,9 +1762,9 @@ include_dir 'conf.d' each database session. These are session-local buffers used only for access to temporary tables. If this value is specified without units, it is taken as blocks, - that is BLCKSZ bytes, typically 8kB. + that is cluster_block_size bytes, typically 8kB. The default is eight megabytes (8MB). - (If BLCKSZ is not 8kB, the default value scales + (If cluster_block_size is not 8kB, the default value scales proportionally to it.) This setting can be changed within individual sessions, but only before the first use of temporary tables @@ -1779,7 +1779,7 @@ include_dir 'conf.d' buffers is only a buffer descriptor, or about 64 bytes, per increment in temp_buffers. However if a buffer is actually used an additional 8192 bytes will be consumed for it - (or in general, BLCKSZ bytes). + (or in general, cluster_block_size bytes). @@ -2435,11 +2435,11 @@ include_dir 'conf.d' cache, where performance might degrade. This setting may have no effect on some platforms. If this value is specified without units, it is taken as blocks, - that is BLCKSZ bytes, typically 8kB. + that is cluster_block_size bytes, typically 8kB. The valid range is between 0, which disables forced writeback, and 2MB. The default is 512kB on Linux, - 0 elsewhere. (If BLCKSZ is not 8kB, + 0 elsewhere. (If cluster_block_size is not 8kB, the default and maximum values scale proportionally to it.) This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2481,11 +2481,11 @@ include_dir 'conf.d' than the OS's page cache, where performance might degrade. This setting may have no effect on some platforms. If this value is specified without units, it is taken as blocks, - that is BLCKSZ bytes, typically 8kB. + that is cluster_block_size bytes, typically 8kB. The valid range is between 0, which disables forced writeback, and 2MB. The default is 0, i.e., no - forced writeback. (If BLCKSZ is not 8kB, + forced writeback. (If cluster_block_size is not 8kB, the maximum value scales proportionally to it.) @@ -3488,11 +3488,11 @@ include_dir 'conf.d' than the OS's page cache, where performance might degrade. This setting may have no effect on some platforms. If this value is specified without units, it is taken as blocks, - that is BLCKSZ bytes, typically 8kB. + that is cluster_block_size bytes, typically 8kB. The valid range is between 0, which disables forced writeback, and 2MB. The default is 256kB on - Linux, 0 elsewhere. (If BLCKSZ is not + Linux, 0 elsewhere. (If cluster_block_size is not 8kB, the default and maximum values scale proportionally to it.) This parameter can only be set in the postgresql.conf file or on the server command line. @@ -5614,7 +5614,7 @@ ANY num_sync ( num_sync ( . If this value is specified without units, it is taken as blocks, - that is BLCKSZ bytes, typically 8kB. + that is cluster_block_size bytes, typically 8kB. The default is 512 kilobytes (512kB). @@ -5668,9 +5668,9 @@ ANY num_sync ( ) is influenced by block_size. See relation size 32 TB - with the default BLCKSZ of 8192 bytes + with the default cluster_block_size of 8192 bytes diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 4ff415d6a0..c655c70e38 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -2693,7 +2693,7 @@ description | Waiting for a newly initialized WAL file to reach durable storage Relation data reads, writes, and extends are done in block_size units, derived from the build-time - parameter BLCKSZ, which is 8192 by + parameter cluster_block_size, which is 8192 by default. diff --git a/doc/src/sgml/pgfreespacemap.sgml b/doc/src/sgml/pgfreespacemap.sgml index 829ad60f32..0da94e5f63 100644 --- a/doc/src/sgml/pgfreespacemap.sgml +++ b/doc/src/sgml/pgfreespacemap.sgml @@ -60,7 +60,7 @@ The values stored in the free space map are not exact. They're rounded - to precision of 1/256th of BLCKSZ (32 bytes with default BLCKSZ), and + to precision of 1/256th of cluster_block_size (32 bytes with default cluster_block_size), and they're not kept fully up-to-date as tuples are inserted and updated. diff --git a/doc/src/sgml/storage.sgml b/doc/src/sgml/storage.sgml index 148fb1b49d..4cf7460b0a 100644 --- a/doc/src/sgml/storage.sgml +++ b/doc/src/sgml/storage.sgml @@ -415,7 +415,7 @@ described in more detail below. Out-of-line values are divided (after compression if used) into chunks of at -most TOAST_MAX_CHUNK_SIZE bytes (by default this value is chosen +most cluster_toast_max_chunk_size bytes (by default this value is chosen so that four chunk rows will fit on a page, making it about 2000 bytes). Each chunk is stored as a separate row in the TOAST table belonging to the owning table. Every @@ -438,10 +438,10 @@ bytes regardless of the actual size of the represented value. The TOAST management code is triggered only when a row value to be stored in a table is wider than -TOAST_TUPLE_THRESHOLD bytes (normally 2 kB). +toast_tuple_threshold bytes (normally 2 kB). The TOAST code will compress and/or move field values out-of-line until the row value is shorter than -TOAST_TUPLE_TARGET bytes (also normally 2 kB, adjustable) +cluster_toast_tuple_target bytes (also normally 2 kB, adjustable) or no more gains can be had. During an UPDATE operation, values of unchanged fields are normally preserved as-is; so an UPDATE of a row with out-of-line values incurs no TOAST costs if @@ -496,7 +496,7 @@ with ALTER TABLE ... SET STORAGE -TOAST_TUPLE_TARGET can be adjusted for each table using +cluster_toast_tuple_target can be adjusted for each table using ALTER TABLE ... SET (toast_tuple_target = N) diff --git a/src/backend/access/brin/brin_bloom.c b/src/backend/access/brin/brin_bloom.c index 56534cf29c..b3b0633d73 100644 --- a/src/backend/access/brin/brin_bloom.c +++ b/src/backend/access/brin/brin_bloom.c @@ -212,7 +212,7 @@ typedef struct BloomOptions * be larger because the index has multiple columns. */ #define BloomMaxFilterSize \ - MAXALIGN_DOWN(BLCKSZ - \ + MAXALIGN_DOWN(cluster_block_size - \ (MAXALIGN(SizeOfPageHeaderData + \ sizeof(ItemIdData)) + \ MAXALIGN(sizeof(BrinSpecialSpace)) + \ diff --git a/src/backend/access/brin/brin_pageops.c b/src/backend/access/brin/brin_pageops.c index b578d25954..11103ff25d 100644 --- a/src/backend/access/brin/brin_pageops.c +++ b/src/backend/access/brin/brin_pageops.c @@ -27,7 +27,7 @@ * a single item per page, unlike other index AMs. */ #define BrinMaxItemSize \ - MAXALIGN_DOWN(BLCKSZ - \ + MAXALIGN_DOWN(cluster_block_size - \ (MAXALIGN(SizeOfPageHeaderData + \ sizeof(ItemIdData)) + \ MAXALIGN(sizeof(BrinSpecialSpace)))) @@ -475,7 +475,7 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, void brin_page_init(Page page, uint16 type) { - PageInit(page, BLCKSZ, sizeof(BrinSpecialSpace)); + PageInit(page, cluster_block_size, sizeof(BrinSpecialSpace)); BrinPageType(page) = type; } diff --git a/src/backend/access/common/bufmask.c b/src/backend/access/common/bufmask.c index 5e392dab1e..4e6e9eb7e1 100644 --- a/src/backend/access/common/bufmask.c +++ b/src/backend/access/common/bufmask.c @@ -76,7 +76,7 @@ mask_unused_space(Page page) /* Sanity check */ if (pd_lower > pd_upper || pd_special < pd_upper || - pd_lower < SizeOfPageHeaderData || pd_special > BLCKSZ) + pd_lower < SizeOfPageHeaderData || pd_special > cluster_block_size) { elog(ERROR, "invalid page pd_lower %u pd_upper %u pd_special %u", pd_lower, pd_upper, pd_special); @@ -120,7 +120,7 @@ mask_page_content(Page page) { /* Mask Page Content */ memset(page + SizeOfPageHeaderData, MASK_MARKER, - BLCKSZ - SizeOfPageHeaderData); + cluster_block_size - SizeOfPageHeaderData); /* Mask pd_lower and pd_upper */ memset(&((PageHeader) page)->pd_lower, MASK_MARKER, diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 469de9bb49..52d4ee6834 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -328,7 +328,7 @@ static relopt_int intRelOpts[] = RELOPT_KIND_HEAP, ShareUpdateExclusiveLock }, - TOAST_TUPLE_TARGET, 128, TOAST_TUPLE_TARGET_MAIN + cluster_toast_tuple_target, 128, toast_tuple_target_main }, { { diff --git a/src/backend/access/common/syncscan.c b/src/backend/access/common/syncscan.c index 2bc6828883..3aa54f28d4 100644 --- a/src/backend/access/common/syncscan.c +++ b/src/backend/access/common/syncscan.c @@ -80,7 +80,7 @@ bool trace_syncscan = false; * the buffer cache anyway, and on the other hand the page is most likely * still in the OS cache. */ -#define SYNC_SCAN_REPORT_INTERVAL (128 * 1024 / BLCKSZ) +#define SYNC_SCAN_REPORT_INTERVAL (128 * 1024 / cluster_block_size) /* diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c index 588825ed85..8745f87be6 100644 --- a/src/backend/access/common/toast_internals.c +++ b/src/backend/access/common/toast_internals.c @@ -133,7 +133,7 @@ toast_save_datum(Relation rel, Datum value, { struct varlena hdr; /* this is to make the union big enough for a chunk: */ - char data[TOAST_MAX_CHUNK_SIZE + VARHDRSZ]; + char data[cluster_toast_max_chunk_size + VARHDRSZ]; /* ensure union is aligned well enough: */ int32 align_it; } chunk_data; @@ -311,7 +311,7 @@ toast_save_datum(Relation rel, Datum value, /* * Calculate the size of this chunk */ - chunk_size = Min(TOAST_MAX_CHUNK_SIZE, data_todo); + chunk_size = Min(cluster_toast_max_chunk_size, data_todo); /* * Build a tuple and store it diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c index 35490c7283..6ec59ae354 100644 --- a/src/backend/access/gin/ginbtree.c +++ b/src/backend/access/gin/ginbtree.c @@ -510,7 +510,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, * critical section yet.) */ newrootpg = PageGetTempPage(newrpage); - GinInitPage(newrootpg, GinPageGetOpaque(newlpage)->flags & ~(GIN_LEAF | GIN_COMPRESSED), BLCKSZ); + GinInitPage(newrootpg, GinPageGetOpaque(newlpage)->flags & ~(GIN_LEAF | GIN_COMPRESSED), cluster_block_size); btree->fillRoot(btree, newrootpg, BufferGetBlockNumber(lbuffer), newlpage, @@ -567,15 +567,15 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, { /* Splitting the root, three pages to update */ MarkBufferDirty(lbuffer); - memcpy(page, newrootpg, BLCKSZ); - memcpy(BufferGetPage(lbuffer), newlpage, BLCKSZ); - memcpy(BufferGetPage(rbuffer), newrpage, BLCKSZ); + memcpy(page, newrootpg, cluster_block_size); + memcpy(BufferGetPage(lbuffer), newlpage, cluster_block_size); + memcpy(BufferGetPage(rbuffer), newrpage, cluster_block_size); } else { /* Normal split, only two pages to update */ - memcpy(page, newlpage, BLCKSZ); - memcpy(BufferGetPage(rbuffer), newrpage, BLCKSZ); + memcpy(page, newlpage, cluster_block_size); + memcpy(BufferGetPage(rbuffer), newrpage, cluster_block_size); } /* We also clear childbuf's INCOMPLETE_SPLIT flag, if passed */ diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index 9caeac164a..9bef09651e 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -655,7 +655,7 @@ dataBeginPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, break; if (append) { - if ((leaf->lsize - segsize) < (BLCKSZ * 3) / 4) + if ((leaf->lsize - segsize) < (cluster_block_size * 3) / 4) break; } @@ -681,8 +681,8 @@ dataBeginPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, /* * Now allocate a couple of temporary page images, and fill them. */ - *newlpage = palloc(BLCKSZ); - *newrpage = palloc(BLCKSZ); + *newlpage = palloc(cluster_block_size); + *newrpage = palloc(cluster_block_size); dataPlaceToPageLeafSplit(leaf, lbound, rbound, *newlpage, *newrpage); @@ -887,7 +887,7 @@ computeLeafRecompressWALData(disassembledLeaf *leaf) walbufbegin = palloc(sizeof(ginxlogRecompressDataLeaf) + - BLCKSZ + /* max size needed to hold the segment data */ + cluster_block_size + /* max size needed to hold the segment data */ nmodified * 2 /* (segno + action) per action */ ); walbufend = walbufbegin; @@ -1041,8 +1041,8 @@ dataPlaceToPageLeafSplit(disassembledLeaf *leaf, leafSegmentInfo *seginfo; /* Initialize temporary pages to hold the new left and right pages */ - GinInitPage(lpage, GIN_DATA | GIN_LEAF | GIN_COMPRESSED, BLCKSZ); - GinInitPage(rpage, GIN_DATA | GIN_LEAF | GIN_COMPRESSED, BLCKSZ); + GinInitPage(lpage, GIN_DATA | GIN_LEAF | GIN_COMPRESSED, cluster_block_size); + GinInitPage(rpage, GIN_DATA | GIN_LEAF | GIN_COMPRESSED, cluster_block_size); /* * Copy the segments that go to the left page. @@ -1259,7 +1259,7 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf, Page lpage; Page rpage; OffsetNumber separator; - PostingItem allitems[(BLCKSZ / sizeof(PostingItem)) + 1]; + PostingItem allitems[(cluster_block_size / sizeof(PostingItem)) + 1]; lpage = PageGetTempPage(oldpage); rpage = PageGetTempPage(oldpage); @@ -1779,8 +1779,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, bool is_build = (buildStats != NULL); /* Construct the new root page in memory first. */ - tmppage = (Page) palloc(BLCKSZ); - GinInitPage(tmppage, GIN_DATA | GIN_LEAF | GIN_COMPRESSED, BLCKSZ); + tmppage = (Page) palloc(cluster_block_size); + GinInitPage(tmppage, GIN_DATA | GIN_LEAF | GIN_COMPRESSED, cluster_block_size); GinPageGetOpaque(tmppage)->rightlink = InvalidBlockNumber; /* diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index eb6c554831..9a816e5bfe 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -38,8 +38,8 @@ /* GUC parameter */ int gin_pending_list_limit = 0; -#define GIN_PAGE_FREESIZE \ - ( BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) ) +#define gin_page_freesize \ + ( cluster_block_size - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) ) typedef struct KeyArray { @@ -92,7 +92,7 @@ writeListPage(Relation index, Buffer buffer, off++; } - Assert(size <= BLCKSZ); /* else we overran workspace */ + Assert(size <= cluster_block_size); /* else we overran workspace */ GinPageGetOpaque(page)->rightlink = rightlink; @@ -455,7 +455,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) * ginInsertCleanup() should not be called inside our CRIT_SECTION. */ cleanupSize = GinGetPendingListCleanupSize(index); - if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * 1024L) + if (metadata->nPendingPages * gin_page_freesize > cleanupSize * 1024L) needCleanup = true; UnlockReleaseBuffer(metabuffer); diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index 1f0214498c..022f71e25c 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -1632,9 +1632,9 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) */ for (;;) { - Datum datum[BLCKSZ / sizeof(IndexTupleData)]; - GinNullCategory category[BLCKSZ / sizeof(IndexTupleData)]; - bool datumExtracted[BLCKSZ / sizeof(IndexTupleData)]; + Datum datum[cluster_block_size / sizeof(IndexTupleData)]; + GinNullCategory category[cluster_block_size / sizeof(IndexTupleData)]; + bool datumExtracted[cluster_block_size / sizeof(IndexTupleData)]; Assert(pos->lastOffset > pos->firstOffset); memset(datumExtracted + pos->firstOffset - 1, 0, diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index e5d310d836..628d0f3a40 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -569,7 +569,7 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, BlockNumber blkno = GIN_ROOT_BLKNO; GinVacuumState gvs; Buffer buffer; - BlockNumber rootOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))]; + BlockNumber rootOfPostingTree[cluster_block_size / (sizeof(IndexTupleData) + sizeof(ItemId))]; uint32 nRoot; gvs.tmpCxt = AllocSetContextCreate(CurrentMemoryContext, diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index f7c84beef8..9adaf5e80c 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -146,7 +146,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) GinPostingList *plist; plist = ginCompressPostingList(uncompressed, nuncompressed, - BLCKSZ, &npacked); + cluster_block_size, &npacked); totalsize = SizeOfGinPostingList(plist); Assert(npacked == nuncompressed); @@ -236,7 +236,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) Assert(nnewitems == nolditems + nitems); newseg = ginCompressPostingList(newitems, nnewitems, - BLCKSZ, &npacked); + cluster_block_size, &npacked); Assert(npacked == nnewitems); newsegsize = SizeOfGinPostingList(newseg); diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 5e0c1447f9..8f4bf77616 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -255,7 +255,7 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo) * Calculate target amount of free space to leave on pages. */ fillfactor = options ? options->fillfactor : GIST_DEFAULT_FILLFACTOR; - buildstate.freespace = BLCKSZ * (100 - fillfactor) / 100; + buildstate.freespace = cluster_block_size * (100 - fillfactor) / 100; /* * Build the index using the chosen strategy. @@ -415,7 +415,7 @@ gist_indexsortbuild(GISTBuildState *state) * Write an empty page as a placeholder for the root page. It will be * replaced with the real root page at the end. */ - page = palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE, MCXT_ALLOC_ZERO); + page = palloc_aligned(cluster_block_size, PG_IO_ALIGN_SIZE, MCXT_ALLOC_ZERO); smgrextend(RelationGetSmgr(state->indexrel), MAIN_FORKNUM, GIST_ROOT_BLKNO, page, true); state->pages_allocated++; @@ -510,7 +510,7 @@ gist_indexsortbuild_levelstate_add(GISTBuildState *state, if (levelstate->pages[levelstate->current_page] == NULL) levelstate->pages[levelstate->current_page] = - palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE, 0); + palloc_aligned(cluster_block_size, PG_IO_ALIGN_SIZE, 0); newPage = levelstate->pages[levelstate->current_page]; gistinitpage(newPage, old_page_flags); @@ -580,7 +580,7 @@ gist_indexsortbuild_levelstate_flush(GISTBuildState *state, /* Create page and copy data */ data = (char *) (dist->list); - target = palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE, MCXT_ALLOC_ZERO); + target = palloc_aligned(cluster_block_size, PG_IO_ALIGN_SIZE, MCXT_ALLOC_ZERO); gistinitpage(target, isleaf ? F_LEAF : 0); for (int i = 0; i < dist->block.num; i++) { @@ -631,7 +631,7 @@ gist_indexsortbuild_levelstate_flush(GISTBuildState *state, if (parent == NULL) { parent = palloc0(sizeof(GistSortedBuildLevelState)); - parent->pages[0] = (Page) palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE, 0); + parent->pages[0] = (Page) palloc_aligned(cluster_block_size, PG_IO_ALIGN_SIZE, 0); parent->parent = NULL; gistinitpage(parent->pages[0], 0); @@ -702,7 +702,7 @@ gistInitBuffering(GISTBuildState *buildstate) int levelStep; /* Calc space of index page which is available for index tuples */ - pageFreeSpace = BLCKSZ - SizeOfPageHeaderData - sizeof(GISTPageOpaqueData) + pageFreeSpace = cluster_block_size - SizeOfPageHeaderData - sizeof(GISTPageOpaqueData) - sizeof(ItemIdData) - buildstate->freespace; @@ -799,7 +799,7 @@ gistInitBuffering(GISTBuildState *buildstate) break; /* each node in the lowest level of a subtree has one page in memory */ - if (maxlowestlevelpages > ((double) maintenance_work_mem * 1024) / BLCKSZ) + if (maxlowestlevelpages > ((double) maintenance_work_mem * 1024) / cluster_block_size) break; /* Good, we can handle this levelStep. See if we can go one higher. */ @@ -858,7 +858,7 @@ calculatePagesPerBuffer(GISTBuildState *buildstate, int levelStep) Size pageFreeSpace; /* Calc space of index page which is available for index tuples */ - pageFreeSpace = BLCKSZ - SizeOfPageHeaderData - sizeof(GISTPageOpaqueData) + pageFreeSpace = cluster_block_size - SizeOfPageHeaderData - sizeof(GISTPageOpaqueData) - sizeof(ItemIdData) - buildstate->freespace; diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index 1423b4b047..bdedd12ce2 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -187,11 +187,11 @@ gistAllocateNewPageBuffer(GISTBuildBuffers *gfbb) GISTNodeBufferPage *pageBuffer; pageBuffer = (GISTNodeBufferPage *) MemoryContextAllocZero(gfbb->context, - BLCKSZ); + cluster_block_size); pageBuffer->prev = InvalidBlockNumber; /* Set page free space */ - PAGE_FREE_SPACE(pageBuffer) = BLCKSZ - BUFFER_PAGE_DATA_OFFSET; + PAGE_FREE_SPACE(pageBuffer) = cluster_block_size - BUFFER_PAGE_DATA_OFFSET; return pageBuffer; } @@ -379,7 +379,7 @@ gistPushItupToNodeBuffer(GISTBuildBuffers *gfbb, GISTNodeBuffer *nodeBuffer, * the new page by storing its block number in the prev-link. */ PAGE_FREE_SPACE(nodeBuffer->pageBuffer) = - BLCKSZ - MAXALIGN(offsetof(GISTNodeBufferPage, tupledata)); + cluster_block_size - MAXALIGN(offsetof(GISTNodeBufferPage, tupledata)); nodeBuffer->pageBuffer->prev = blkno; /* We've just added one more page */ @@ -755,7 +755,7 @@ ReadTempFileBlock(BufFile *file, long blknum, void *ptr) { if (BufFileSeekBlock(file, blknum) != 0) elog(ERROR, "could not seek to block %ld in temporary file", blknum); - BufFileReadExact(file, ptr, BLCKSZ); + BufFileReadExact(file, ptr, cluster_block_size); } static void @@ -763,5 +763,5 @@ WriteTempFileBlock(BufFile *file, long blknum, const void *ptr) { if (BufFileSeekBlock(file, blknum) != 0) elog(ERROR, "could not seek to block %ld in temporary file", blknum); - BufFileWrite(file, ptr, BLCKSZ); + BufFileWrite(file, ptr, cluster_block_size); } diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index b6bc8c2c56..ae9ab2931c 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -758,7 +758,7 @@ gistinitpage(Page page, uint32 f) { GISTPageOpaque opaque; - PageInit(page, BLCKSZ, sizeof(GISTPageOpaqueData)); + PageInit(page, cluster_block_size, sizeof(GISTPageOpaqueData)); opaque = GistPageGetOpaque(page); opaque->rightlink = InvalidBlockNumber; diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index fc5d97f606..ea78dc81c2 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -152,7 +152,7 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo) * one page. Also, "initial index size" accounting does not include the * metapage, nor the first bitmap page. */ - sort_threshold = (maintenance_work_mem * 1024L) / BLCKSZ; + sort_threshold = (maintenance_work_mem * 1024L) / cluster_block_size; if (index->rd_rel->relpersistence != RELPERSISTENCE_TEMP) sort_threshold = Min(sort_threshold, NBuffers); else diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 0c6e79f1bd..2a77d13feb 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -1012,7 +1012,7 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks) * _hash_freeovflpage for similar usage. We take care to make the special * space valid for the benefit of tools such as pageinspect. */ - _hash_pageinit(page, BLCKSZ); + _hash_pageinit(page, cluster_block_size); ovflopaque = HashPageGetOpaque(page); diff --git a/src/backend/access/heap/README.HOT b/src/backend/access/heap/README.HOT index 74e407f375..25995e7b3b 100644 --- a/src/backend/access/heap/README.HOT +++ b/src/backend/access/heap/README.HOT @@ -254,7 +254,7 @@ large enough to accept any extra maintenance burden for. The currently planned heuristic is to prune and defrag when first accessing a page that potentially has prunable tuples (as flagged by the pd_prune_xid page hint field) and that either has free space less than MAX(fillfactor -target free space, BLCKSZ/10) *or* has recently had an UPDATE fail to +target free space, cluster_block_size/10) *or* has recently had an UPDATE fail to find enough free space to store an updated tuple version. (These rules are subject to change.) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 6a66214a58..ef3f3e0bdf 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2048,7 +2048,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, Assert(!HeapTupleHasExternal(tup)); return tup; } - else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD) + else if (HeapTupleHasExternal(tup) || tup->t_len > toast_tuple_threshold) return heap_toast_insert_or_update(relation, tup, NULL, options); else return tup; @@ -2062,7 +2062,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, static int heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace) { - size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace; + size_t page_avail = cluster_block_size - SizeOfPageHeaderData - saveFreeSpace; int npages = 1; for (int i = done; i < ntuples; i++) @@ -2072,7 +2072,7 @@ heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveF if (page_avail < tup_sz) { npages++; - page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace; + page_avail = cluster_block_size - SizeOfPageHeaderData - saveFreeSpace; } page_avail -= tup_sz; } @@ -2333,7 +2333,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, scratchptr += datalen; } totaldatalen = scratchptr - tupledata; - Assert((scratchptr - scratch.data) < BLCKSZ); + Assert((scratchptr - scratch.data) < cluster_block_size); if (need_tuple_data) xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE; @@ -3490,7 +3490,7 @@ l2: else need_toast = (HeapTupleHasExternal(&oldtup) || HeapTupleHasExternal(newtup) || - newtup->t_len > TOAST_TUPLE_THRESHOLD); + newtup->t_len > toast_tuple_threshold); pagefree = PageGetHeapFreeSpace(page); @@ -7990,7 +7990,7 @@ index_delete_sort(TM_IndexDeleteOp *delstate) * Shellsort gap sequence (taken from Sedgewick-Incerpi paper). * * This implementation is fast with array sizes up to ~4500. This covers - * all supported BLCKSZ values. + * all supported cluster_block_size values. */ const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1}; @@ -9020,7 +9020,7 @@ heap_xlog_visible(XLogReaderState *record) /* initialize the page if it was read as zeros */ if (PageIsNew(vmpage)) - PageInit(vmpage, BLCKSZ, 0); + PageInit(vmpage, cluster_block_size, 0); /* remove VISIBILITYMAP_XLOG_* */ vmbits = xlrec->flags & VISIBILITYMAP_VALID_BITS; @@ -9325,7 +9325,7 @@ heap_xlog_insert(XLogReaderState *record) * don't bother to update the FSM in that case, it doesn't need to be * totally accurate anyway. */ - if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5) + if (action == BLK_NEEDS_REDO && freespace < cluster_block_size / 5) XLogRecordPageWithFreeSpace(target_locator, blkno, freespace); } @@ -9472,7 +9472,7 @@ heap_xlog_multi_insert(XLogReaderState *record) * don't bother to update the FSM in that case, it doesn't need to be * totally accurate anyway. */ - if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5) + if (action == BLK_NEEDS_REDO && freespace < cluster_block_size / 5) XLogRecordPageWithFreeSpace(rlocator, blkno, freespace); } @@ -9747,7 +9747,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update) * don't bother to update the FSM in that case, it doesn't need to be * totally accurate anyway. */ - if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5) + if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < cluster_block_size / 5) XLogRecordPageWithFreeSpace(rlocator, newblk, freespace); } diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index 5a17112c91..6857b46203 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -2030,7 +2030,7 @@ heapam_scan_get_blocks_done(HeapScanDesc hscan) /* * Check to see whether the table needs a TOAST table. It does only if * (1) there are any toastable attributes, and (2) the maximum length - * of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to + * of a tuple could exceed toast_tuple_threshold. (We don't want to * create a toast table for something like "f1 varchar(20)".) */ static bool @@ -2075,7 +2075,7 @@ heapam_relation_needs_toast_table(Relation rel) tuple_length = MAXALIGN(SizeofHeapTupleHeader + BITMAPLEN(tupdesc->natts)) + MAXALIGN(data_length); - return (tuple_length > TOAST_TUPLE_THRESHOLD); + return (tuple_length > toast_tuple_threshold); } /* @@ -2095,8 +2095,8 @@ heapam_relation_toast_am(Relation rel) #define HEAP_OVERHEAD_BYTES_PER_TUPLE \ (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData)) -#define HEAP_USABLE_BYTES_PER_PAGE \ - (BLCKSZ - SizeOfPageHeaderData) +#define heap_usable_bytes_per_page \ + (cluster_block_size - SizeOfPageHeaderData) static void heapam_estimate_rel_size(Relation rel, int32 *attr_widths, @@ -2106,7 +2106,7 @@ heapam_estimate_rel_size(Relation rel, int32 *attr_widths, table_block_relation_estimate_size(rel, attr_widths, pages, tuples, allvisfrac, HEAP_OVERHEAD_BYTES_PER_TUPLE, - HEAP_USABLE_BYTES_PER_PAGE); + heap_usable_bytes_per_page); } diff --git a/src/backend/access/heap/heaptoast.c b/src/backend/access/heap/heaptoast.c index 52ecd45654..98b0a701e5 100644 --- a/src/backend/access/heap/heaptoast.c +++ b/src/backend/access/heap/heaptoast.c @@ -174,7 +174,7 @@ heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, hoff += BITMAPLEN(numAttrs); hoff = MAXALIGN(hoff); /* now convert to a limit on the tuple data size */ - maxDataLen = RelationGetToastTupleTarget(rel, TOAST_TUPLE_TARGET) - hoff; + maxDataLen = RelationGetToastTupleTarget(rel, cluster_toast_tuple_target) - hoff; /* * Look for attributes with attstorage EXTENDED to compress. Also find @@ -255,7 +255,7 @@ heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * increase the target tuple size, so that MAIN attributes aren't stored * externally unless really necessary. */ - maxDataLen = TOAST_TUPLE_TARGET_MAIN - hoff; + maxDataLen = toast_tuple_target_main - hoff; while (heap_compute_data_size(tupleDesc, toast_values, toast_isnull) > maxDataLen && @@ -634,7 +634,7 @@ heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, SysScanDesc toastscan; HeapTuple ttup; int32 expectedchunk; - int32 totalchunks = ((attrsize - 1) / TOAST_MAX_CHUNK_SIZE) + 1; + int32 totalchunks = ((attrsize - 1) / cluster_toast_max_chunk_size) + 1; int startchunk; int endchunk; int num_indexes; @@ -647,8 +647,8 @@ heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, &toastidxs, &num_indexes); - startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE; - endchunk = (sliceoffset + slicelength - 1) / TOAST_MAX_CHUNK_SIZE; + startchunk = sliceoffset / cluster_toast_max_chunk_size; + endchunk = (sliceoffset + slicelength - 1) / cluster_toast_max_chunk_size; Assert(endchunk <= totalchunks); /* Set up a scan key to fetch from the index. */ @@ -749,8 +749,8 @@ heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, curchunk, startchunk, endchunk, valueid, RelationGetRelationName(toastrel)))); - expected_size = curchunk < totalchunks - 1 ? TOAST_MAX_CHUNK_SIZE - : attrsize - ((totalchunks - 1) * TOAST_MAX_CHUNK_SIZE); + expected_size = curchunk < totalchunks - 1 ? cluster_toast_max_chunk_size + : attrsize - ((totalchunks - 1) * cluster_toast_max_chunk_size); if (chunksize != expected_size) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), @@ -765,12 +765,12 @@ heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, chcpystrt = 0; chcpyend = chunksize - 1; if (curchunk == startchunk) - chcpystrt = sliceoffset % TOAST_MAX_CHUNK_SIZE; + chcpystrt = sliceoffset % cluster_toast_max_chunk_size; if (curchunk == endchunk) - chcpyend = (sliceoffset + slicelength - 1) % TOAST_MAX_CHUNK_SIZE; + chcpyend = (sliceoffset + slicelength - 1) % cluster_toast_max_chunk_size; memcpy(VARDATA(result) + - (curchunk * TOAST_MAX_CHUNK_SIZE - sliceoffset) + chcpystrt, + (curchunk * cluster_toast_max_chunk_size - sliceoffset) + chcpystrt, chunkdata + chcpystrt, (chcpyend - chcpystrt) + 1); diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 47b9e20915..8b8b9c4d79 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -187,7 +187,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer) */ minfree = RelationGetTargetPageFreeSpace(relation, HEAP_DEFAULT_FILLFACTOR); - minfree = Max(minfree, BLCKSZ / 10); + minfree = Max(minfree, cluster_block_size / 10); if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree) { diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index 424958912c..3402625d6d 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -255,7 +255,7 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm state->rs_old_rel = old_heap; state->rs_new_rel = new_heap; - state->rs_buffer = (Page) palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE, 0); + state->rs_buffer = (Page) palloc_aligned(cluster_block_size, PG_IO_ALIGN_SIZE, 0); /* new_heap needn't be empty, just locked */ state->rs_blockno = RelationGetNumberOfBlocks(new_heap); state->rs_buffer_valid = false; @@ -631,7 +631,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup) Assert(!HeapTupleHasExternal(tup)); heaptup = tup; } - else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD) + else if (HeapTupleHasExternal(tup) || tup->t_len > toast_tuple_threshold) { int options = HEAP_INSERT_SKIP_FSM; @@ -702,7 +702,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup) if (!state->rs_buffer_valid) { /* Initialize a new empty page */ - PageInit(page, BLCKSZ, 0); + PageInit(page, cluster_block_size, 0); state->rs_buffer_valid = true; } diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 6a41ee635d..d6f46ad60e 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -97,8 +97,8 @@ * Perform a failsafe check each time we scan another 4GB of pages. * (Note that this is deliberately kept to a power-of-two, usually 2^19.) */ -#define FAILSAFE_EVERY_PAGES \ - ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / BLCKSZ)) +#define failsafe_every_pages \ + ((BlockNumber) (((uint64) 4 * 1024 * 1024 * 1024) / cluster_block_size)) /* * When a table has no indexes, vacuum the FSM after every 8GB, approximately @@ -106,8 +106,8 @@ * that has some removable tuples). When there are indexes, this is ignored, * and we vacuum FSM after each index/heap cleaning pass. */ -#define VACUUM_FSM_EVERY_PAGES \ - ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ)) +#define vacuum_fsm_every_pages \ + ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / cluster_block_size)) /* * Before we consider skipping a page that's marked as clean in @@ -749,9 +749,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, } if (secs_dur > 0 || usecs_dur > 0) { - read_rate = (double) BLCKSZ * PageMissOp / (1024 * 1024) / + read_rate = (double) cluster_block_size * PageMissOp / (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0); - write_rate = (double) BLCKSZ * PageDirtyOp / (1024 * 1024) / + write_rate = (double) cluster_block_size * PageDirtyOp / (1024 * 1024) / (secs_dur + usecs_dur / 1000000.0); } appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"), @@ -900,7 +900,7 @@ lazy_scan_heap(LVRelState *vacrel) * one-pass strategy, and the two-pass strategy with the index_cleanup * param set to 'off'. */ - if (vacrel->scanned_pages % FAILSAFE_EVERY_PAGES == 0) + if (vacrel->scanned_pages % failsafe_every_pages == 0) lazy_check_wraparound_failsafe(vacrel); /* @@ -1051,7 +1051,7 @@ lazy_scan_heap(LVRelState *vacrel) * space visible on upper FSM pages. Note we have not yet * performed FSM processing for blkno. */ - if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES) + if (blkno - next_fsm_block_to_vacuum >= vacuum_fsm_every_pages) { FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno); @@ -1439,7 +1439,7 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0) { - freespace = BLCKSZ - SizeOfPageHeaderData; + freespace = cluster_block_size - SizeOfPageHeaderData; RecordPageWithFreeSpace(vacrel->rel, blkno, freespace); } diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 2e18cd88bc..833774c164 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -105,7 +105,7 @@ * extra headers, so the whole page minus the standard page header is * used for the bitmap. */ -#define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData)) +#define MAPSIZE (cluster_block_size - MAXALIGN(SizeOfPageHeaderData)) /* Number of heap blocks we can represent in one byte */ #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK) @@ -613,7 +613,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend) { LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); if (PageIsNew(BufferGetPage(buf))) - PageInit(BufferGetPage(buf), BLCKSZ, 0); + PageInit(BufferGetPage(buf), cluster_block_size, 0); LockBuffer(buf, BUFFER_LOCK_UNLOCK); } return buf; diff --git a/src/backend/access/nbtree/nbtdedup.c b/src/backend/access/nbtree/nbtdedup.c index d4db0b28f2..6d1b02b17c 100644 --- a/src/backend/access/nbtree/nbtdedup.c +++ b/src/backend/access/nbtree/nbtdedup.c @@ -324,7 +324,7 @@ _bt_bottomupdel_pass(Relation rel, Buffer buf, Relation heapRel, state = (BTDedupState) palloc(sizeof(BTDedupStateData)); state->deduplicate = true; state->nmaxitems = 0; - state->maxpostingsize = BLCKSZ; /* We're not really deduplicating */ + state->maxpostingsize = cluster_block_size; /* We're not really deduplicating */ state->base = NULL; state->baseoff = InvalidOffsetNumber; state->basetupsize = 0; @@ -353,7 +353,7 @@ _bt_bottomupdel_pass(Relation rel, Buffer buf, Relation heapRel, delstate.irel = rel; delstate.iblknum = BufferGetBlockNumber(buf); delstate.bottomup = true; - delstate.bottomupfreespace = Max(BLCKSZ / 16, newitemsz); + delstate.bottomupfreespace = Max(cluster_block_size / 16, newitemsz); delstate.ndeltids = 0; delstate.deltids = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexDelete)); delstate.status = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexStatus)); @@ -417,7 +417,7 @@ _bt_bottomupdel_pass(Relation rel, Buffer buf, Relation heapRel, return true; /* Don't dedup when we won't end up back here any time soon anyway */ - return PageGetExactFreeSpace(page) >= Max(BLCKSZ / 24, newitemsz); + return PageGetExactFreeSpace(page) >= Max(cluster_block_size / 24, newitemsz); } /* @@ -597,7 +597,7 @@ _bt_dedup_finish_pending(Page newpage, BTDedupState state) spacesaving = state->phystupsize - (tuplesz + sizeof(ItemIdData)); /* Increment nintervals, since we wrote a new posting list tuple */ state->nintervals++; - Assert(spacesaving > 0 && spacesaving < BLCKSZ); + Assert(spacesaving > 0 && spacesaving < cluster_block_size); } /* Reset state for next pending posting list */ diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 6558aea42b..58dcdf6de4 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -70,7 +70,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level, BTMetaPageData *metad; BTPageOpaque metaopaque; - _bt_pageinit(page, BLCKSZ); + _bt_pageinit(page, cluster_block_size); metad = BTPageGetMeta(page); metad->btm_magic = BTREE_MAGIC; @@ -977,7 +977,7 @@ _bt_allocbuf(Relation rel, Relation heaprel) */ buf = ExtendBufferedRel(BMR_REL(rel), MAIN_FORKNUM, NULL, EB_LOCK_FIRST); if (!RelationUsesLocalBuffers(rel)) - VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ); + VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), cluster_block_size); /* Initialize the new page before returning it */ page = BufferGetPage(buf); @@ -1060,7 +1060,7 @@ _bt_lockbuf(Relation rel, Buffer buf, int access) * lock/pin held, though. */ if (!RelationUsesLocalBuffers(rel)) - VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ); + VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), cluster_block_size); } /* @@ -1073,13 +1073,13 @@ _bt_unlockbuf(Relation rel, Buffer buf) * Buffer is pinned and locked, which means that it is expected to be * defined and addressable. Check that proactively. */ - VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ); + VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), cluster_block_size); /* LockBuffer() asserts that pin is held by this backend */ LockBuffer(buf, BUFFER_LOCK_UNLOCK); if (!RelationUsesLocalBuffers(rel)) - VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(buf), BLCKSZ); + VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(buf), cluster_block_size); } /* @@ -1097,7 +1097,7 @@ _bt_conditionallockbuf(Relation rel, Buffer buf) return false; if (!RelationUsesLocalBuffers(rel)) - VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ); + VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), cluster_block_size); return true; } @@ -1112,7 +1112,7 @@ _bt_upgradelockbufcleanup(Relation rel, Buffer buf) * Buffer is pinned and locked, which means that it is expected to be * defined and addressable. Check that proactively. */ - VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ); + VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), cluster_block_size); /* LockBuffer() asserts that pin is held by this backend */ LockBuffer(buf, BUFFER_LOCK_UNLOCK); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 62bc9917f1..cb2757f68d 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -427,8 +427,8 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, */ if (scan->xs_want_itup && so->currTuples == NULL) { - so->currTuples = (char *) palloc(BLCKSZ * 2); - so->markTuples = so->currTuples + BLCKSZ; + so->currTuples = (char *) palloc(cluster_block_size * 2); + so->markTuples = so->currTuples + cluster_block_size; } /* diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index c2665fce41..b701e492fe 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -619,10 +619,10 @@ _bt_blnewpage(uint32 level) Page page; BTPageOpaque opaque; - page = (Page) palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE, 0); + page = (Page) palloc_aligned(cluster_block_size, PG_IO_ALIGN_SIZE, 0); /* Zero the page and set up standard page header info */ - _bt_pageinit(page, BLCKSZ); + _bt_pageinit(page, cluster_block_size); /* Initialize BT opaque state */ opaque = BTPageGetOpaque(page); @@ -660,7 +660,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) while (blkno > wstate->btws_pages_written) { if (!wstate->btws_zeropage) - wstate->btws_zeropage = (Page) palloc_aligned(BLCKSZ, + wstate->btws_zeropage = (Page) palloc_aligned(cluster_block_size, PG_IO_ALIGN_SIZE, MCXT_ALLOC_ZERO); /* don't set checksum for all-zero page */ @@ -715,7 +715,7 @@ _bt_pagestate(BTWriteState *wstate, uint32 level) state->btps_level = level; /* set "full" threshold based on level. See notes at head of file. */ if (level > 0) - state->btps_full = (BLCKSZ * (100 - BTREE_NONLEAF_FILLFACTOR) / 100); + state->btps_full = (cluster_block_size * (100 - BTREE_NONLEAF_FILLFACTOR) / 100); else state->btps_full = BTGetTargetPageFreeSpace(wstate->index); @@ -1172,7 +1172,7 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) * set to point to "P_NONE"). This changes the index to the "valid" state * by filling in a valid magic number in the metapage. */ - metapage = (Page) palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE, 0); + metapage = (Page) palloc_aligned(cluster_block_size, PG_IO_ALIGN_SIZE, 0); _bt_initmetapage(metapage, rootblkno, rootlevel, wstate->inskey->allequalimage); _bt_blwritepage(wstate, metapage, BTREE_METAPAGE); @@ -1350,7 +1350,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) * leaf pages full with few very large tuples doesn't seem * like a useful goal.) */ - dstate->maxpostingsize = MAXALIGN_DOWN((BLCKSZ * 10 / 100)) - + dstate->maxpostingsize = MAXALIGN_DOWN((cluster_block_size * 10 / 100)) - sizeof(ItemIdData); Assert(dstate->maxpostingsize <= BTMaxItemSize(state->btps_page) && dstate->maxpostingsize <= INDEX_SIZE_MASK); diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c index f390c177e4..b9dd2b7e5d 100644 --- a/src/backend/access/rmgrdesc/xlogdesc.c +++ b/src/backend/access/rmgrdesc/xlogdesc.c @@ -271,7 +271,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty, "" : " for WAL verification", XLogRecGetBlock(record, block_id)->hole_offset, XLogRecGetBlock(record, block_id)->hole_length, - BLCKSZ - + cluster_block_size - XLogRecGetBlock(record, block_id)->hole_length - XLogRecGetBlock(record, block_id)->bimg_len, method); diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index 3554edcc9a..d8c2752e85 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -341,8 +341,8 @@ checkSplitConditions(Relation index, SpGistState *state, if (SpGistBlockIsRoot(current->blkno)) { /* return impossible values to force split */ - *nToSplit = BLCKSZ; - return BLCKSZ; + *nToSplit = cluster_block_size; + return cluster_block_size; } i = current->offnum; @@ -899,7 +899,7 @@ doPickSplit(Relation index, SpGistState *state, * fit on one page. */ allTheSame = checkAllTheSame(&in, &out, - totalLeafSizes > SPGIST_PAGE_CAPACITY, + totalLeafSizes > spgist_page_capacity, &includeNew); /* @@ -1030,7 +1030,7 @@ doPickSplit(Relation index, SpGistState *state, for (i = 0; i < nToInsert; i++) leafPageSelect[i] = 0; /* signifies current page */ } - else if (in.nTuples == 1 && totalLeafSizes > SPGIST_PAGE_CAPACITY) + else if (in.nTuples == 1 && totalLeafSizes > spgist_page_capacity) { /* * We're trying to split up a long value by repeated suffixing, but @@ -1051,7 +1051,7 @@ doPickSplit(Relation index, SpGistState *state, newLeafBuffer = SpGistGetBuffer(index, GBUF_LEAF | (isNulls ? GBUF_NULLS : 0), Min(totalLeafSizes, - SPGIST_PAGE_CAPACITY), + spgist_page_capacity), &xlrec.initDest); /* @@ -1995,13 +1995,13 @@ spgdoinsert(Relation index, SpGistState *state, * If it isn't gonna fit, and the opclass can't reduce the datum size by * suffixing, bail out now rather than doing a lot of useless work. */ - if (leafSize > SPGIST_PAGE_CAPACITY && + if (leafSize > spgist_page_capacity && (isnull || !state->config.longValuesOK)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", leafSize - sizeof(ItemIdData), - SPGIST_PAGE_CAPACITY - sizeof(ItemIdData), + spgist_page_capacity - sizeof(ItemIdData), RelationGetRelationName(index)), errhint("Values larger than a buffer page cannot be indexed."))); bestLeafSize = leafSize; @@ -2059,7 +2059,7 @@ spgdoinsert(Relation index, SpGistState *state, current.buffer = SpGistGetBuffer(index, GBUF_LEAF | (isnull ? GBUF_NULLS : 0), - Min(leafSize, SPGIST_PAGE_CAPACITY), + Min(leafSize, spgist_page_capacity), &isNew); current.blkno = BufferGetBlockNumber(current.buffer); } @@ -2122,9 +2122,9 @@ spgdoinsert(Relation index, SpGistState *state, } else if ((sizeToSplit = checkSplitConditions(index, state, ¤t, - &nToSplit)) < SPGIST_PAGE_CAPACITY / 2 && + &nToSplit)) < spgist_page_capacity / 2 && nToSplit < 64 && - leafTuple->size + sizeof(ItemIdData) + sizeToSplit <= SPGIST_PAGE_CAPACITY) + leafTuple->size + sizeof(ItemIdData) + sizeToSplit <= spgist_page_capacity) { /* * the amount of data is pretty small, so just move the whole @@ -2258,7 +2258,7 @@ spgdoinsert(Relation index, SpGistState *state, * than MAXALIGN, to accommodate opclasses that trim one * byte from the leaf datum per pass.) */ - if (leafSize > SPGIST_PAGE_CAPACITY) + if (leafSize > spgist_page_capacity) { bool ok = false; @@ -2278,7 +2278,7 @@ spgdoinsert(Relation index, SpGistState *state, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index row size %zu exceeds maximum %zu for index \"%s\"", leafSize - sizeof(ItemIdData), - SPGIST_PAGE_CAPACITY - sizeof(ItemIdData), + spgist_page_capacity - sizeof(ItemIdData), RelationGetRelationName(index)), errhint("Values larger than a buffer page cannot be indexed."))); } diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c index 03a7afdbab..0b247d4789 100644 --- a/src/backend/access/spgist/spgtextproc.c +++ b/src/backend/access/spgist/spgtextproc.c @@ -53,20 +53,20 @@ * In the worst case, an inner tuple in a text radix tree could have as many * as 258 nodes (one for each possible byte value, plus the two special * cases). Each node can take 16 bytes on MAXALIGN=8 machines. The inner - * tuple must fit on an index page of size BLCKSZ. Rather than assuming we + * tuple must fit on an index page of size cluster_block_size. Rather than assuming we * know the exact amount of overhead imposed by page headers, tuple headers, * etc, we leave 100 bytes for that (the actual overhead should be no more * than 56 bytes at this writing, so there is slop in this number). - * So we can safely create prefixes up to BLCKSZ - 258 * 16 - 100 bytes long. + * So we can safely create prefixes up to cluster_block_size - 258 * 16 - 100 bytes long. * Unfortunately, because 258 * 16 is over 4K, there is no safe prefix length - * when BLCKSZ is less than 8K; it is always possible to get "SPGiST inner + * when cluster_block_size is less than 8K; it is always possible to get "SPGiST inner * tuple size exceeds maximum" if there are too many distinct next-byte values * at a given place in the tree. Since use of nonstandard block sizes appears * to be negligible in the field, we just live with that fact for now, - * choosing a max prefix size of 32 bytes when BLCKSZ is configured smaller + * choosing a max prefix size of 32 bytes when cluster_block_size is configured smaller * than default. */ -#define SPGIST_MAX_PREFIX_LENGTH Max((int) (BLCKSZ - 258 * 16 - 100), 32) +#define SPGIST_MAX_PREFIX_LENGTH Max((int) (cluster_block_size - 258 * 16 - 100), 32) /* * Strategy for collation aware operator on text is equal to btree strategy diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 8f32e46fb8..6dfd875bc9 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -544,7 +544,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew) SpGistLastUsedPage *lup; /* Bail out if even an empty page wouldn't meet the demand */ - if (needSpace > SPGIST_PAGE_CAPACITY) + if (needSpace > spgist_page_capacity) elog(ERROR, "desired SPGiST tuple size is too big"); /* @@ -555,7 +555,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew) * error for requests that would otherwise be legal. */ needSpace += SpGistGetTargetPageFreeSpace(index); - needSpace = Min(needSpace, SPGIST_PAGE_CAPACITY); + needSpace = Min(needSpace, spgist_page_capacity); /* Get the cache entry for this flags setting */ lup = GET_LUP(cache, flags); @@ -681,7 +681,7 @@ SpGistInitPage(Page page, uint16 f) { SpGistPageOpaque opaque; - PageInit(page, BLCKSZ, sizeof(SpGistPageOpaqueData)); + PageInit(page, cluster_block_size, sizeof(SpGistPageOpaqueData)); opaque = SpGistPageGetOpaque(page); opaque->flags = f; opaque->spgist_page_id = SPGIST_PAGE_ID; @@ -693,7 +693,7 @@ SpGistInitPage(Page page, uint16 f) void SpGistInitBuffer(Buffer b, uint16 f) { - Assert(BufferGetPageSize(b) == BLCKSZ); + Assert(BufferGetPageSize(b) == cluster_block_size); SpGistInitPage(BufferGetPage(b), f); } @@ -1002,12 +1002,12 @@ spgFormInnerTuple(SpGistState *state, bool hasPrefix, Datum prefix, /* * Inner tuple should be small enough to fit on a page */ - if (size > SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)) + if (size > spgist_page_capacity - sizeof(ItemIdData)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("SP-GiST inner tuple size %zu exceeds maximum %zu", (Size) size, - SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)), + spgist_page_capacity - sizeof(ItemIdData)), errhint("Values larger than a buffer page cannot be indexed."))); /* diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index c6bdb7e1c6..6ca676143c 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -636,7 +636,7 @@ table_block_relation_size(Relation rel, ForkNumber forkNumber) else nblocks = smgrnblocks(RelationGetSmgr(rel), forkNumber); - return nblocks * BLCKSZ; + return nblocks * cluster_block_size; } /* diff --git a/src/backend/access/transam/README b/src/backend/access/transam/README index 22c8ae9755..1773ed1da5 100644 --- a/src/backend/access/transam/README +++ b/src/backend/access/transam/README @@ -851,7 +851,7 @@ we won't be able to hint its outputs until the second xact is sync'd, up to three walwriter cycles later. This argues for keeping N (the group size) as small as possible. For the moment we are setting the group size to 32, which makes the LSN cache space the same size as the actual clog buffer -space (independently of BLCKSZ). +space (independently of cluster_block_size). It is useful that we can run both synchronous and asynchronous commit transactions concurrently, but the safety of this is perhaps not diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 4a431d5876..fc9118487d 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -45,7 +45,7 @@ #include "storage/sync.h" /* - * Defines for CLOG page sizes. A page is the same BLCKSZ as is used + * Defines for CLOG page sizes. A page is the same cluster_block_size as is used * everywhere else in Postgres. * * Note: because TransactionIds are 32 bits and wrap around at 0xFFFFFFFF, @@ -59,7 +59,7 @@ /* We need two bits per xact, so four xacts fit in a byte */ #define CLOG_BITS_PER_XACT 2 #define CLOG_XACTS_PER_BYTE 4 -#define CLOG_XACTS_PER_PAGE (BLCKSZ * CLOG_XACTS_PER_BYTE) +#define CLOG_XACTS_PER_PAGE (cluster_block_size * CLOG_XACTS_PER_BYTE) #define CLOG_XACT_BITMASK ((1 << CLOG_BITS_PER_XACT) - 1) #define TransactionIdToPage(xid) ((xid) / (TransactionId) CLOG_XACTS_PER_PAGE) @@ -802,7 +802,7 @@ TrimCLOG(void) /* Zero so-far-unused positions in the current byte */ *byteptr &= (1 << bshift) - 1; /* Zero the rest of the page */ - MemSet(byteptr + 1, 0, BLCKSZ - byteno - 1); + MemSet(byteptr + 1, 0, cluster_block_size - byteno - 1); XactCtl->shared->page_dirty[slotno] = true; } diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index b897fabc70..e9fb797c82 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -37,13 +37,13 @@ #include "utils/timestamp.h" /* - * Defines for CommitTs page sizes. A page is the same BLCKSZ as is used + * Defines for CommitTs page sizes. A page is the same cluster_block_size as is used * everywhere else in Postgres. * * Note: because TransactionIds are 32 bits and wrap around at 0xFFFFFFFF, * CommitTs page numbering also wraps around at - * 0xFFFFFFFF/COMMIT_TS_XACTS_PER_PAGE, and CommitTs segment numbering at - * 0xFFFFFFFF/COMMIT_TS_XACTS_PER_PAGE/SLRU_PAGES_PER_SEGMENT. We need take no + * 0xFFFFFFFF/commit_ts_xacts_per_page, and CommitTs segment numbering at + * 0xFFFFFFFF/commit_ts_xacts_per_page/SLRU_PAGES_PER_SEGMENT. We need take no * explicit notice of that fact in this module, except when comparing segment * and page numbers in TruncateCommitTs (see CommitTsPagePrecedes). */ @@ -62,13 +62,13 @@ typedef struct CommitTimestampEntry #define SizeOfCommitTimestampEntry (offsetof(CommitTimestampEntry, nodeid) + \ sizeof(RepOriginId)) -#define COMMIT_TS_XACTS_PER_PAGE \ - (BLCKSZ / SizeOfCommitTimestampEntry) +#define commit_ts_xacts_per_page \ + (cluster_block_size / SizeOfCommitTimestampEntry) #define TransactionIdToCTsPage(xid) \ - ((xid) / (TransactionId) COMMIT_TS_XACTS_PER_PAGE) + ((xid) / (TransactionId) commit_ts_xacts_per_page) #define TransactionIdToCTsEntry(xid) \ - ((xid) % (TransactionId) COMMIT_TS_XACTS_PER_PAGE) + ((xid) % (TransactionId) commit_ts_xacts_per_page) /* * Link to shared-memory data structures for CommitTs control @@ -524,7 +524,7 @@ CommitTsShmemInit(void) CommitTsSLRULock, "pg_commit_ts", LWTRANCHE_COMMITTS_BUFFER, SYNC_HANDLER_COMMIT_TS); - SlruPagePrecedesUnitTests(CommitTsCtl, COMMIT_TS_XACTS_PER_PAGE); + SlruPagePrecedesUnitTests(CommitTsCtl, commit_ts_xacts_per_page); commitTsShared = ShmemInitStruct("CommitTs shared", sizeof(CommitTimestampShared), @@ -898,7 +898,7 @@ AdvanceOldestCommitTsXid(TransactionId oldestXact) * Decide whether a commitTS page number is "older" for truncation purposes. * Analogous to CLOGPagePrecedes(). * - * At default BLCKSZ, (1 << 31) % COMMIT_TS_XACTS_PER_PAGE == 128. This + * At default cluster_block_size, (1 << 31) % commit_ts_xacts_per_page == 128. This * introduces differences compared to CLOG and the other SLRUs having (1 << * 31) % per_page == 0. This function never tests exactly * TransactionIdPrecedes(x-2^31, x). When the system reaches xidStopLimit, @@ -923,13 +923,13 @@ CommitTsPagePrecedes(int page1, int page2) TransactionId xid1; TransactionId xid2; - xid1 = ((TransactionId) page1) * COMMIT_TS_XACTS_PER_PAGE; + xid1 = ((TransactionId) page1) * commit_ts_xacts_per_page; xid1 += FirstNormalTransactionId + 1; - xid2 = ((TransactionId) page2) * COMMIT_TS_XACTS_PER_PAGE; + xid2 = ((TransactionId) page2) * commit_ts_xacts_per_page; xid2 += FirstNormalTransactionId + 1; return (TransactionIdPrecedes(xid1, xid2) && - TransactionIdPrecedes(xid1, xid2 + COMMIT_TS_XACTS_PER_PAGE - 1)); + TransactionIdPrecedes(xid1, xid2 + commit_ts_xacts_per_page - 1)); } diff --git a/src/backend/access/transam/generic_xlog.c b/src/backend/access/transam/generic_xlog.c index 6c68191ca6..771dde3419 100644 --- a/src/backend/access/transam/generic_xlog.c +++ b/src/backend/access/transam/generic_xlog.c @@ -45,7 +45,7 @@ */ #define FRAGMENT_HEADER_SIZE (2 * sizeof(OffsetNumber)) #define MATCH_THRESHOLD FRAGMENT_HEADER_SIZE -#define MAX_DELTA_SIZE (BLCKSZ + 2 * FRAGMENT_HEADER_SIZE) +#define MAX_DELTA_SIZE (cluster_block_size + 2 * FRAGMENT_HEADER_SIZE) /* Struct of generic xlog data for single page */ typedef struct @@ -241,8 +241,8 @@ computeDelta(PageData *pageData, Page curpage, Page targetpage) 0, curLower); /* ... and for upper part, ignoring what's between */ computeRegionDelta(pageData, curpage, targetpage, - targetUpper, BLCKSZ, - curUpper, BLCKSZ); + targetUpper, cluster_block_size, + curUpper, cluster_block_size); /* * If xlog debug is enabled, then check produced delta. Result of delta @@ -253,11 +253,11 @@ computeDelta(PageData *pageData, Page curpage, Page targetpage) { PGAlignedBlock tmp; - memcpy(tmp.data, curpage, BLCKSZ); + memcpy(tmp.data, curpage, cluster_block_size); applyPageRedo(tmp.data, pageData->delta, pageData->deltaLen); if (memcmp(tmp.data, targetpage, targetLower) != 0 || memcmp(tmp.data + targetUpper, targetpage + targetUpper, - BLCKSZ - targetUpper) != 0) + cluster_block_size - targetUpper) != 0) elog(ERROR, "result of generic xlog apply does not match"); } #endif @@ -311,7 +311,7 @@ GenericXLogRegisterBuffer(GenericXLogState *state, Buffer buffer, int flags) /* Empty slot, so use it (there cannot be a match later) */ page->buffer = buffer; page->flags = flags; - memcpy(page->image, BufferGetPage(buffer), BLCKSZ); + memcpy(page->image, BufferGetPage(buffer), cluster_block_size); return (Page) page->image; } else if (page->buffer == buffer) @@ -373,7 +373,7 @@ GenericXLogFinish(GenericXLogState *state) pageHeader->pd_upper - pageHeader->pd_lower); memcpy(page + pageHeader->pd_upper, pageData->image + pageHeader->pd_upper, - BLCKSZ - pageHeader->pd_upper); + cluster_block_size - pageHeader->pd_upper); XLogRegisterBuffer(i, pageData->buffer, REGBUF_FORCE_IMAGE | REGBUF_STANDARD); @@ -392,7 +392,7 @@ GenericXLogFinish(GenericXLogState *state) pageHeader->pd_upper - pageHeader->pd_lower); memcpy(page + pageHeader->pd_upper, pageData->image + pageHeader->pd_upper, - BLCKSZ - pageHeader->pd_upper); + cluster_block_size - pageHeader->pd_upper); XLogRegisterBuffer(i, pageData->buffer, REGBUF_STANDARD); XLogRegisterBufData(i, pageData->delta, pageData->deltaLen); @@ -426,7 +426,7 @@ GenericXLogFinish(GenericXLogState *state) continue; memcpy(BufferGetPage(pageData->buffer), pageData->image, - BLCKSZ); + cluster_block_size); /* We don't worry about zeroing the "hole" in this case */ MarkBufferDirty(pageData->buffer); } diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index abb022e067..ef135140e3 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -93,25 +93,25 @@ /* - * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is + * Defines for MultiXactOffset page sizes. A page is the same cluster_block_size as is * used everywhere else in Postgres. * * Note: because MultiXactOffsets are 32 bits and wrap around at 0xFFFFFFFF, * MultiXact page numbering also wraps around at - * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE, and segment numbering at - * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_PAGES_PER_SEGMENT. We need + * 0xFFFFFFFF/multixact_offsets_per_page, and segment numbering at + * 0xFFFFFFFF/multixact_offsets_per_page/SLRU_PAGES_PER_SEGMENT. We need * take no explicit notice of that fact in this module, except when comparing * segment and page numbers in TruncateMultiXact (see * MultiXactOffsetPagePrecedes). */ /* We need four bytes per offset */ -#define MULTIXACT_OFFSETS_PER_PAGE (BLCKSZ / sizeof(MultiXactOffset)) +#define multixact_offsets_per_page (cluster_block_size / sizeof(MultiXactOffset)) #define MultiXactIdToOffsetPage(xid) \ - ((xid) / (MultiXactOffset) MULTIXACT_OFFSETS_PER_PAGE) + ((xid) / (MultiXactOffset) multixact_offsets_per_page) #define MultiXactIdToOffsetEntry(xid) \ - ((xid) % (MultiXactOffset) MULTIXACT_OFFSETS_PER_PAGE) + ((xid) % (MultiXactOffset) multixact_offsets_per_page) #define MultiXactIdToOffsetSegment(xid) (MultiXactIdToOffsetPage(xid) / SLRU_PAGES_PER_SEGMENT) /* @@ -119,7 +119,7 @@ * additional flag bits for each TransactionId. To do this without getting * into alignment issues, we store four bytes of flags, and then the * corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and - * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups + * are stored as a whole in pages. Thus, with 8kB cluster_block_size, we keep 409 groups * per page. This wastes 12 bytes per page, but that's OK -- simplicity (and * performance) trumps space efficiency here. * @@ -138,9 +138,9 @@ /* size in bytes of a complete group */ #define MULTIXACT_MEMBERGROUP_SIZE \ (sizeof(TransactionId) * MULTIXACT_MEMBERS_PER_MEMBERGROUP + MULTIXACT_FLAGBYTES_PER_GROUP) -#define MULTIXACT_MEMBERGROUPS_PER_PAGE (BLCKSZ / MULTIXACT_MEMBERGROUP_SIZE) -#define MULTIXACT_MEMBERS_PER_PAGE \ - (MULTIXACT_MEMBERGROUPS_PER_PAGE * MULTIXACT_MEMBERS_PER_MEMBERGROUP) +#define multixact_membergroups_per_page (cluster_block_size / MULTIXACT_MEMBERGROUP_SIZE) +#define multixact_members_per_page \ + (multixact_membergroups_per_page * MULTIXACT_MEMBERS_PER_MEMBERGROUP) /* * Because the number of items per page is not a divisor of the last item @@ -153,16 +153,16 @@ * This constant is the number of members in the last page of the last segment. */ #define MAX_MEMBERS_IN_LAST_MEMBERS_PAGE \ - ((uint32) ((0xFFFFFFFF % MULTIXACT_MEMBERS_PER_PAGE) + 1)) + ((uint32) ((0xFFFFFFFF % multixact_members_per_page) + 1)) /* page in which a member is to be found */ -#define MXOffsetToMemberPage(xid) ((xid) / (TransactionId) MULTIXACT_MEMBERS_PER_PAGE) +#define MXOffsetToMemberPage(xid) ((xid) / (TransactionId) multixact_members_per_page) #define MXOffsetToMemberSegment(xid) (MXOffsetToMemberPage(xid) / SLRU_PAGES_PER_SEGMENT) /* Location (byte offset within page) of flag word for a given member */ #define MXOffsetToFlagsOffset(xid) \ ((((xid) / (TransactionId) MULTIXACT_MEMBERS_PER_MEMBERGROUP) % \ - (TransactionId) MULTIXACT_MEMBERGROUPS_PER_PAGE) * \ + (TransactionId) multixact_membergroups_per_page) * \ (TransactionId) MULTIXACT_MEMBERGROUP_SIZE) #define MXOffsetToFlagsBitShift(xid) \ (((xid) % (TransactionId) MULTIXACT_MEMBERS_PER_MEMBERGROUP) * \ @@ -1152,7 +1152,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) if (MultiXactState->oldestOffsetKnown && MultiXactOffsetWouldWrap(MultiXactState->offsetStopLimit, nextOffset, - nmembers + MULTIXACT_MEMBERS_PER_PAGE * SLRU_PAGES_PER_SEGMENT * OFFSET_WARN_SEGMENTS)) + nmembers + multixact_members_per_page * SLRU_PAGES_PER_SEGMENT * OFFSET_WARN_SEGMENTS)) ereport(WARNING, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used", @@ -1855,7 +1855,7 @@ MultiXactShmemInit(void) MultiXactOffsetSLRULock, "pg_multixact/offsets", LWTRANCHE_MULTIXACTOFFSET_BUFFER, SYNC_HANDLER_MULTIXACT_OFFSET); - SlruPagePrecedesUnitTests(MultiXactOffsetCtl, MULTIXACT_OFFSETS_PER_PAGE); + SlruPagePrecedesUnitTests(MultiXactOffsetCtl, multixact_offsets_per_page); SimpleLruInit(MultiXactMemberCtl, "MultiXactMember", NUM_MULTIXACTMEMBER_BUFFERS, 0, MultiXactMemberSLRULock, "pg_multixact/members", @@ -2072,7 +2072,7 @@ TrimMultiXact(void) offptr = (MultiXactOffset *) MultiXactOffsetCtl->shared->page_buffer[slotno]; offptr += entryno; - MemSet(offptr, 0, BLCKSZ - (entryno * sizeof(MultiXactOffset))); + MemSet(offptr, 0, cluster_block_size - (entryno * sizeof(MultiXactOffset))); MultiXactOffsetCtl->shared->page_dirty[slotno] = true; } @@ -2104,7 +2104,7 @@ TrimMultiXact(void) xidptr = (TransactionId *) (MultiXactMemberCtl->shared->page_buffer[slotno] + memberoff); - MemSet(xidptr, 0, BLCKSZ - memberoff); + MemSet(xidptr, 0, cluster_block_size - memberoff); /* * Note: we don't need to zero out the flag bits in the remaining @@ -2480,7 +2480,7 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers) difference = MaxMultiXactOffset - offset + 1; } else - difference = MULTIXACT_MEMBERS_PER_PAGE - offset % MULTIXACT_MEMBERS_PER_PAGE; + difference = multixact_members_per_page - offset % multixact_members_per_page; /* * Advance to next page, taking care to properly handle the wraparound @@ -2634,10 +2634,10 @@ SetOffsetVacuumLimit(bool is_startup) { /* move back to start of the corresponding segment */ offsetStopLimit = oldestOffset - (oldestOffset % - (MULTIXACT_MEMBERS_PER_PAGE * SLRU_PAGES_PER_SEGMENT)); + (multixact_members_per_page * SLRU_PAGES_PER_SEGMENT)); /* always leave one segment before the wraparound point */ - offsetStopLimit -= (MULTIXACT_MEMBERS_PER_PAGE * SLRU_PAGES_PER_SEGMENT); + offsetStopLimit -= (multixact_members_per_page * SLRU_PAGES_PER_SEGMENT); if (!prevOldestOffsetKnown && !is_startup) ereport(LOG, @@ -2997,7 +2997,7 @@ TruncateMultiXact(MultiXactId newOldestMulti, Oid newOldestMultiDB) */ trunc.earliestExistingPage = -1; SlruScanDirectory(MultiXactOffsetCtl, SlruScanDirCbFindEarliest, &trunc); - earliest = trunc.earliestExistingPage * MULTIXACT_OFFSETS_PER_PAGE; + earliest = trunc.earliestExistingPage * multixact_offsets_per_page; if (earliest < FirstMultiXactId) earliest = FirstMultiXactId; @@ -3118,14 +3118,14 @@ MultiXactOffsetPagePrecedes(int page1, int page2) MultiXactId multi1; MultiXactId multi2; - multi1 = ((MultiXactId) page1) * MULTIXACT_OFFSETS_PER_PAGE; + multi1 = ((MultiXactId) page1) * multixact_offsets_per_page; multi1 += FirstMultiXactId + 1; - multi2 = ((MultiXactId) page2) * MULTIXACT_OFFSETS_PER_PAGE; + multi2 = ((MultiXactId) page2) * multixact_offsets_per_page; multi2 += FirstMultiXactId + 1; return (MultiXactIdPrecedes(multi1, multi2) && MultiXactIdPrecedes(multi1, - multi2 + MULTIXACT_OFFSETS_PER_PAGE - 1)); + multi2 + multixact_offsets_per_page - 1)); } /* @@ -3138,12 +3138,12 @@ MultiXactMemberPagePrecedes(int page1, int page2) MultiXactOffset offset1; MultiXactOffset offset2; - offset1 = ((MultiXactOffset) page1) * MULTIXACT_MEMBERS_PER_PAGE; - offset2 = ((MultiXactOffset) page2) * MULTIXACT_MEMBERS_PER_PAGE; + offset1 = ((MultiXactOffset) page1) * multixact_members_per_page; + offset2 = ((MultiXactOffset) page2) * multixact_members_per_page; return (MultiXactOffsetPrecedes(offset1, offset2) && MultiXactOffsetPrecedes(offset1, - offset2 + MULTIXACT_MEMBERS_PER_PAGE - 1)); + offset2 + multixact_members_per_page - 1)); } /* diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index 71ac70fb40..675eae6361 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -169,7 +169,7 @@ SimpleLruShmemSize(int nslots, int nlsns) if (nlsns > 0) sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */ - return BUFFERALIGN(sz) + BLCKSZ * nslots; + return BUFFERALIGN(sz) + cluster_block_size * nslots; } /* @@ -251,7 +251,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, shared->page_status[slotno] = SLRU_PAGE_EMPTY; shared->page_dirty[slotno] = false; shared->page_lru_count[slotno] = 0; - ptr += BLCKSZ; + ptr += cluster_block_size; } /* Should fit to estimated shmem size */ @@ -297,7 +297,7 @@ SimpleLruZeroPage(SlruCtl ctl, int pageno) SlruRecentlyUsed(shared, slotno); /* Set the buffer to zeroes */ - MemSet(shared->page_buffer[slotno], 0, BLCKSZ); + MemSet(shared->page_buffer[slotno], 0, cluster_block_size); /* Set the LSNs for this new page to zero */ SimpleLruZeroLSNs(ctl, slotno); @@ -628,7 +628,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno) { int segno = pageno / SLRU_PAGES_PER_SEGMENT; int rpageno = pageno % SLRU_PAGES_PER_SEGMENT; - int offset = rpageno * BLCKSZ; + int offset = rpageno * cluster_block_size; char path[MAXPGPATH]; int fd; bool result; @@ -659,7 +659,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno) SlruReportIOError(ctl, pageno, 0); } - result = endpos >= (off_t) (offset + BLCKSZ); + result = endpos >= (off_t) (offset + cluster_block_size); if (CloseTransientFile(fd) != 0) { @@ -687,7 +687,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) SlruShared shared = ctl->shared; int segno = pageno / SLRU_PAGES_PER_SEGMENT; int rpageno = pageno % SLRU_PAGES_PER_SEGMENT; - off_t offset = rpageno * BLCKSZ; + off_t offset = rpageno * cluster_block_size; char path[MAXPGPATH]; int fd; @@ -713,13 +713,13 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) ereport(LOG, (errmsg("file \"%s\" doesn't exist, reading as zeroes", path))); - MemSet(shared->page_buffer[slotno], 0, BLCKSZ); + MemSet(shared->page_buffer[slotno], 0, cluster_block_size); return true; } errno = 0; pgstat_report_wait_start(WAIT_EVENT_SLRU_READ); - if (pg_pread(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ) + if (pg_pread(fd, shared->page_buffer[slotno], cluster_block_size, offset) != cluster_block_size) { pgstat_report_wait_end(); slru_errcause = SLRU_READ_FAILED; @@ -759,7 +759,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata) SlruShared shared = ctl->shared; int segno = pageno / SLRU_PAGES_PER_SEGMENT; int rpageno = pageno % SLRU_PAGES_PER_SEGMENT; - off_t offset = rpageno * BLCKSZ; + off_t offset = rpageno * cluster_block_size; char path[MAXPGPATH]; int fd = -1; @@ -874,7 +874,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata) errno = 0; pgstat_report_wait_start(WAIT_EVENT_SLRU_WRITE); - if (pg_pwrite(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ) + if (pg_pwrite(fd, shared->page_buffer[slotno], cluster_block_size, offset) != cluster_block_size) { pgstat_report_wait_end(); /* if write didn't set errno, assume problem is no disk space */ @@ -933,7 +933,7 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid) { int segno = pageno / SLRU_PAGES_PER_SEGMENT; int rpageno = pageno % SLRU_PAGES_PER_SEGMENT; - int offset = rpageno * BLCKSZ; + int offset = rpageno * cluster_block_size; char path[MAXPGPATH]; SlruFileName(ctl, path, segno); diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index 62bb610167..c7f3246164 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -36,23 +36,23 @@ /* - * Defines for SubTrans page sizes. A page is the same BLCKSZ as is used + * Defines for SubTrans page sizes. A page is the same cluster_block_size as is used * everywhere else in Postgres. * * Note: because TransactionIds are 32 bits and wrap around at 0xFFFFFFFF, * SubTrans page numbering also wraps around at - * 0xFFFFFFFF/SUBTRANS_XACTS_PER_PAGE, and segment numbering at - * 0xFFFFFFFF/SUBTRANS_XACTS_PER_PAGE/SLRU_PAGES_PER_SEGMENT. We need take no + * 0xFFFFFFFF/subtrans_xacts_per_page, and segment numbering at + * 0xFFFFFFFF/subtrans_xacts_per_page/SLRU_PAGES_PER_SEGMENT. We need take no * explicit notice of that fact in this module, except when comparing segment * and page numbers in TruncateSUBTRANS (see SubTransPagePrecedes) and zeroing * them in StartupSUBTRANS. */ /* We need four bytes per xact */ -#define SUBTRANS_XACTS_PER_PAGE (BLCKSZ / sizeof(TransactionId)) +#define subtrans_xacts_per_page (cluster_block_size / sizeof(TransactionId)) -#define TransactionIdToPage(xid) ((xid) / (TransactionId) SUBTRANS_XACTS_PER_PAGE) -#define TransactionIdToEntry(xid) ((xid) % (TransactionId) SUBTRANS_XACTS_PER_PAGE) +#define TransactionIdToPage(xid) ((xid) / (TransactionId) subtrans_xacts_per_page) +#define TransactionIdToEntry(xid) ((xid) % (TransactionId) subtrans_xacts_per_page) /* @@ -194,7 +194,7 @@ SUBTRANSShmemInit(void) SimpleLruInit(SubTransCtl, "Subtrans", NUM_SUBTRANS_BUFFERS, 0, SubtransSLRULock, "pg_subtrans", LWTRANCHE_SUBTRANS_BUFFER, SYNC_HANDLER_NONE); - SlruPagePrecedesUnitTests(SubTransCtl, SUBTRANS_XACTS_PER_PAGE); + SlruPagePrecedesUnitTests(SubTransCtl, subtrans_xacts_per_page); } /* @@ -364,11 +364,11 @@ SubTransPagePrecedes(int page1, int page2) TransactionId xid1; TransactionId xid2; - xid1 = ((TransactionId) page1) * SUBTRANS_XACTS_PER_PAGE; + xid1 = ((TransactionId) page1) * subtrans_xacts_per_page; xid1 += FirstNormalTransactionId + 1; - xid2 = ((TransactionId) page2) * SUBTRANS_XACTS_PER_PAGE; + xid2 = ((TransactionId) page2) * subtrans_xacts_per_page; xid2 += FirstNormalTransactionId + 1; return (TransactionIdPrecedes(xid1, xid2) && - TransactionIdPrecedes(xid1, xid2 + SUBTRANS_XACTS_PER_PAGE - 1)); + TransactionIdPrecedes(xid1, xid2 + subtrans_xacts_per_page - 1)); } diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c index 94e152694e..8b4ed3d053 100644 --- a/src/backend/access/transam/timeline.c +++ b/src/backend/access/transam/timeline.c @@ -307,7 +307,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, char path[MAXPGPATH]; char tmppath[MAXPGPATH]; char histfname[MAXFNAMELEN]; - char buffer[BLCKSZ]; + char buffer[cluster_block_size]; int srcfd; int fd; int nbytes; diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index 334adac09e..ff375874b1 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -370,7 +370,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid) * being significant compared to total XID space. (VACUUM requires an XID * if it truncates at wal_level!=minimal. "VACUUM (ANALYZE)", which a DBA * might do by reflex, assigns an XID. Hence, we had better be sure - * there's lots of XIDs left...) Also, at default BLCKSZ, this leaves two + * there's lots of XIDs left...) Also, at default cluster_block_size, this leaves two * completely-idle segments. In the event of edge-case bugs involving * page or segment arithmetic, idle segments render the bugs unreachable * outside of single-user mode. diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index f6f8adc72a..c7f38f7bc6 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -3923,7 +3923,7 @@ WriteControlFile(void) ControlFile->maxAlign = MAXIMUM_ALIGNOF; ControlFile->floatFormat = FLOATFORMAT_VALUE; - ControlFile->blcksz = BLCKSZ; + ControlFile->blcksz = cluster_block_size; ControlFile->relseg_size = RELSEG_SIZE; ControlFile->xlog_blcksz = XLOG_BLCKSZ; ControlFile->xlog_seg_size = wal_segment_size; @@ -3931,8 +3931,8 @@ WriteControlFile(void) ControlFile->nameDataLen = NAMEDATALEN; ControlFile->indexMaxKeys = INDEX_MAX_KEYS; - ControlFile->toast_max_chunk_size = TOAST_MAX_CHUNK_SIZE; - ControlFile->loblksize = LOBLKSIZE; + ControlFile->toast_max_chunk_size = cluster_toast_max_chunk_size; + ControlFile->loblksize = cluster_loblksize; ControlFile->float8ByVal = FLOAT8PASSBYVAL; @@ -4087,12 +4087,12 @@ ReadControlFile(void) (errmsg("database files are incompatible with server"), errdetail("The database cluster appears to use a different floating-point number format than the server executable."), errhint("It looks like you need to initdb."))); - if (ControlFile->blcksz != BLCKSZ) + if (ControlFile->blcksz != cluster_block_size) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with BLCKSZ %d," - " but the server was compiled with BLCKSZ %d.", - ControlFile->blcksz, BLCKSZ), + errdetail("The database cluster was initialized with cluster_block_size %d," + " but the server was compiled with cluster_block_size %d.", + ControlFile->blcksz, cluster_block_size), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->relseg_size != RELSEG_SIZE) ereport(FATAL, @@ -4122,19 +4122,19 @@ ReadControlFile(void) " but the server was compiled with INDEX_MAX_KEYS %d.", ControlFile->indexMaxKeys, INDEX_MAX_KEYS), errhint("It looks like you need to recompile or initdb."))); - if (ControlFile->toast_max_chunk_size != TOAST_MAX_CHUNK_SIZE) + if (ControlFile->toast_max_chunk_size != cluster_toast_max_chunk_size) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d," - " but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.", - ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE), + errdetail("The database cluster was initialized with cluster_toast_max_chunk_size %d," + " but the server was compiled with cluster_toast_max_chunk_size %d.", + ControlFile->toast_max_chunk_size, (int) cluster_toast_max_chunk_size), errhint("It looks like you need to recompile or initdb."))); - if (ControlFile->loblksize != LOBLKSIZE) + if (ControlFile->loblksize != cluster_loblksize) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with LOBLKSIZE %d," - " but the server was compiled with LOBLKSIZE %d.", - ControlFile->loblksize, (int) LOBLKSIZE), + errdetail("The database cluster was initialized with cluster_loblksize %d," + " but the server was compiled with cluster_loblksize %d.", + ControlFile->loblksize, (int) cluster_loblksize), errhint("It looks like you need to recompile or initdb."))); #ifdef USE_FLOAT8_BYVAL diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index 258cbd7035..87e04d6e03 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -46,18 +46,18 @@ * backup block image. */ #ifdef USE_LZ4 -#define LZ4_MAX_BLCKSZ LZ4_COMPRESSBOUND(BLCKSZ) +#define LZ4_MAX_BLCKSZ LZ4_COMPRESSBOUND(cluster_block_size) #else #define LZ4_MAX_BLCKSZ 0 #endif #ifdef USE_ZSTD -#define ZSTD_MAX_BLCKSZ ZSTD_COMPRESSBOUND(BLCKSZ) +#define ZSTD_MAX_BLCKSZ ZSTD_COMPRESSBOUND(cluster_block_size) #else #define ZSTD_MAX_BLCKSZ 0 #endif -#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ) +#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(cluster_block_size) /* Buffer size required to store a compressed version of backup block image */ #define COMPRESS_BUFSIZE Max(Max(PGLZ_MAX_BLCKSZ, LZ4_MAX_BLCKSZ), ZSTD_MAX_BLCKSZ) @@ -383,7 +383,7 @@ XLogRegisterData(char *data, uint32 len) * block_id, the data is appended. * * The maximum amount of data that can be registered per block is 65535 - * bytes. That should be plenty; if you need more than BLCKSZ bytes to + * bytes. That should be plenty; if you need more than cluster_block_size bytes to * reconstruct the changes to the page, you might as well just log a full * copy of it. (the "main data" that's not associated with a block is not * limited) @@ -650,7 +650,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, if (lower >= SizeOfPageHeaderData && upper > lower && - upper <= BLCKSZ) + upper <= cluster_block_size) { bimg.hole_offset = lower; cbimg.hole_length = upper - lower; @@ -746,12 +746,12 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, } else { - bimg.length = BLCKSZ - cbimg.hole_length; + bimg.length = cluster_block_size - cbimg.hole_length; if (cbimg.hole_length == 0) { rdt_datas_last->data = page; - rdt_datas_last->len = BLCKSZ; + rdt_datas_last->len = cluster_block_size; } else { @@ -765,7 +765,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, rdt_datas_last->data = page + (bimg.hole_offset + cbimg.hole_length); rdt_datas_last->len = - BLCKSZ - (bimg.hole_offset + cbimg.hole_length); + cluster_block_size - (bimg.hole_offset + cbimg.hole_length); } } @@ -932,7 +932,7 @@ static bool XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length, char *dest, uint16 *dlen) { - int32 orig_len = BLCKSZ - hole_length; + int32 orig_len = cluster_block_size - hole_length; int32 len = -1; int32 extra_bytes = 0; char *source; @@ -945,7 +945,7 @@ XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length, memcpy(source, page, hole_offset); memcpy(source + hole_offset, page + (hole_offset + hole_length), - BLCKSZ - (hole_length + hole_offset)); + cluster_block_size - (hole_length + hole_offset)); /* * Extra data needs to be stored in WAL record for the compressed @@ -1096,10 +1096,10 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std) uint16 upper = ((PageHeader) page)->pd_upper; memcpy(copied_buffer.data, origdata, lower); - memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper); + memcpy(copied_buffer.data + upper, origdata + upper, cluster_block_size - upper); } else - memcpy(copied_buffer.data, origdata, BLCKSZ); + memcpy(copied_buffer.data, origdata, cluster_block_size); XLogBeginInsert(); diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c index 539928cb85..93026d5847 100644 --- a/src/backend/access/transam/xlogprefetcher.c +++ b/src/backend/access/transam/xlogprefetcher.c @@ -51,7 +51,7 @@ * Every time we process this much WAL, we'll update the values in * pg_stat_recovery_prefetch. */ -#define XLOGPREFETCHER_STATS_DISTANCE BLCKSZ +#define xlogprefetcher_stats_distance cluster_block_size /* * To detect repeated access to the same block and skip useless extra system @@ -442,7 +442,7 @@ XLogPrefetcherComputeStats(XLogPrefetcher *prefetcher) SharedStats->wal_distance = wal_distance; prefetcher->next_stats_shm_lsn = - prefetcher->reader->ReadRecPtr + XLOGPREFETCHER_STATS_DISTANCE; + prefetcher->reader->ReadRecPtr + xlogprefetcher_stats_distance; } /* diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index c9f9f6e98f..cdce1f21ad 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -189,7 +189,7 @@ XLogReaderFree(XLogReaderState *state) * readRecordBufSize is set to the new buffer size. * * To avoid useless small increases, round its size to a multiple of - * XLOG_BLCKSZ, and make sure it's at least 5*Max(BLCKSZ, XLOG_BLCKSZ) to start + * XLOG_BLCKSZ, and make sure it's at least 5*Max(cluster_block_size, XLOG_BLCKSZ) to start * with. (That is enough for all "normal" records, but very large commit or * abort records might need more space.) */ @@ -199,7 +199,7 @@ allocate_recordbuf(XLogReaderState *state, uint32 reclength) uint32 newSize = reclength; newSize += XLOG_BLCKSZ - (newSize % XLOG_BLCKSZ); - newSize = Max(newSize, 5 * Max(BLCKSZ, XLOG_BLCKSZ)); + newSize = Max(newSize, 5 * Max(cluster_block_size, XLOG_BLCKSZ)); #ifndef FRONTEND @@ -1786,17 +1786,17 @@ DecodeXLogRecord(XLogReaderState *state, blk->hole_length = 0; } else - blk->hole_length = BLCKSZ - blk->bimg_len; + blk->hole_length = cluster_block_size - blk->bimg_len; datatotal += blk->bimg_len; /* * cross-check that hole_offset > 0, hole_length > 0 and - * bimg_len < BLCKSZ if the HAS_HOLE flag is set. + * bimg_len < cluster_block_size if the HAS_HOLE flag is set. */ if ((blk->bimg_info & BKPIMAGE_HAS_HOLE) && (blk->hole_offset == 0 || blk->hole_length == 0 || - blk->bimg_len == BLCKSZ)) + blk->bimg_len == cluster_block_size)) { report_invalid_record(state, "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X", @@ -1823,10 +1823,10 @@ DecodeXLogRecord(XLogReaderState *state, } /* - * Cross-check that bimg_len < BLCKSZ if it is compressed. + * Cross-check that bimg_len < cluster_block_size if it is compressed. */ if (BKPIMAGE_COMPRESSED(blk->bimg_info) && - blk->bimg_len == BLCKSZ) + blk->bimg_len == cluster_block_size) { report_invalid_record(state, "BKPIMAGE_COMPRESSED set, but block image length %u at %X/%X", @@ -1836,12 +1836,12 @@ DecodeXLogRecord(XLogReaderState *state, } /* - * cross-check that bimg_len = BLCKSZ if neither HAS_HOLE is + * cross-check that bimg_len = cluster_block_size if neither HAS_HOLE is * set nor COMPRESSED(). */ if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) && !BKPIMAGE_COMPRESSED(blk->bimg_info) && - blk->bimg_len != BLCKSZ) + blk->bimg_len != cluster_block_size) { report_invalid_record(state, "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_COMPRESSED set, but block image length is %u at %X/%X", @@ -2073,14 +2073,14 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) if ((bkpb->bimg_info & BKPIMAGE_COMPRESS_PGLZ) != 0) { if (pglz_decompress(ptr, bkpb->bimg_len, tmp.data, - BLCKSZ - bkpb->hole_length, true) < 0) + cluster_block_size - bkpb->hole_length, true) < 0) decomp_success = false; } else if ((bkpb->bimg_info & BKPIMAGE_COMPRESS_LZ4) != 0) { #ifdef USE_LZ4 if (LZ4_decompress_safe(ptr, tmp.data, - bkpb->bimg_len, BLCKSZ - bkpb->hole_length) <= 0) + bkpb->bimg_len, cluster_block_size - bkpb->hole_length) <= 0) decomp_success = false; #else report_invalid_record(record, "could not restore image at %X/%X compressed with %s not supported by build, block %d", @@ -2094,7 +2094,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) { #ifdef USE_ZSTD size_t decomp_result = ZSTD_decompress(tmp.data, - BLCKSZ - bkpb->hole_length, + cluster_block_size - bkpb->hole_length, ptr, bkpb->bimg_len); if (ZSTD_isError(decomp_result)) @@ -2129,7 +2129,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) /* generate page, taking into account hole if necessary */ if (bkpb->hole_length == 0) { - memcpy(page, ptr, BLCKSZ); + memcpy(page, ptr, cluster_block_size); } else { @@ -2138,7 +2138,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) MemSet(page + bkpb->hole_offset, 0, bkpb->hole_length); memcpy(page + (bkpb->hole_offset + bkpb->hole_length), ptr + bkpb->hole_offset, - BLCKSZ - (bkpb->hole_offset + bkpb->hole_length)); + cluster_block_size - (bkpb->hole_offset + bkpb->hole_length)); } return true; diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index becc2bda62..0ac856c945 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -293,7 +293,7 @@ static bool backupEndRequired = false; */ bool reachedConsistency = false; -/* Buffers dedicated to consistency checks of size BLCKSZ */ +/* Buffers dedicated to consistency checks of size cluster_block_size */ static char *replay_image_masked = NULL; static char *primary_image_masked = NULL; @@ -606,8 +606,8 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr, * (2) a static char array isn't guaranteed to have any particular * alignment, whereas palloc() will provide MAXALIGN'd storage. */ - replay_image_masked = (char *) palloc(BLCKSZ); - primary_image_masked = (char *) palloc(BLCKSZ); + replay_image_masked = (char *) palloc(cluster_block_size); + primary_image_masked = (char *) palloc(cluster_block_size); if (read_backup_label(&CheckPointLoc, &CheckPointTLI, &backupEndRequired, &backupFromStandby)) @@ -2432,7 +2432,7 @@ verifyBackupPageConsistency(XLogReaderState *record) * Take a copy of the local page where WAL has been applied to have a * comparison base before masking it... */ - memcpy(replay_image_masked, page, BLCKSZ); + memcpy(replay_image_masked, page, cluster_block_size); /* No need for this page anymore now that a copy is in. */ UnlockReleaseBuffer(buf); @@ -2467,7 +2467,7 @@ verifyBackupPageConsistency(XLogReaderState *record) } /* Time to compare the primary and replay images. */ - if (memcmp(replay_image_masked, primary_image_masked, BLCKSZ) != 0) + if (memcmp(replay_image_masked, primary_image_masked, cluster_block_size) != 0) { elog(FATAL, "inconsistent page found, rel %u/%u/%u, forknum %u, blkno %u", diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c index 45be21131c..8fa1ae018a 100644 --- a/src/backend/backup/basebackup.c +++ b/src/backend/backup/basebackup.c @@ -55,7 +55,7 @@ * NB: The buffer size is required to be a multiple of the system block * size, so use that value instead if it's bigger than our preference. */ -#define SINK_BUFFER_LENGTH Max(32768, BLCKSZ) +#define sink_buffer_length Max(32768, cluster_block_size) typedef struct { @@ -306,7 +306,7 @@ perform_base_backup(basebackup_options *opt, bbsink *sink) } /* notify basebackup sink about start of backup */ - bbsink_begin_backup(sink, &state, SINK_BUFFER_LENGTH); + bbsink_begin_backup(sink, &state, sink_buffer_length); /* Send off our tablespaces one by one */ foreach(lc, state.tablespaces) @@ -370,8 +370,8 @@ perform_base_backup(basebackup_options *opt, bbsink *sink) else { /* Properly terminate the tarfile. */ - StaticAssertDecl(2 * TAR_BLOCK_SIZE <= BLCKSZ, - "BLCKSZ too small for 2 tar blocks"); + StaticAssertDecl(2 * TAR_BLOCK_SIZE <= cluster_block_size, + "cluster_block_size too small for 2 tar blocks"); memset(sink->bbs_buffer, 0, 2 * TAR_BLOCK_SIZE); bbsink_archive_contents(sink, 2 * TAR_BLOCK_SIZE); @@ -623,8 +623,8 @@ perform_base_backup(basebackup_options *opt, bbsink *sink) } /* Properly terminate the tar file. */ - StaticAssertStmt(2 * TAR_BLOCK_SIZE <= BLCKSZ, - "BLCKSZ too small for 2 tar blocks"); + StaticAssertStmt(2 * TAR_BLOCK_SIZE <= cluster_block_size, + "cluster_block_size too small for 2 tar blocks"); memset(sink->bbs_buffer, 0, 2 * TAR_BLOCK_SIZE); bbsink_archive_contents(sink, 2 * TAR_BLOCK_SIZE); @@ -1562,27 +1562,27 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename, /* * The checksums are verified at block level, so we iterate over the - * buffer in chunks of BLCKSZ, after making sure that - * TAR_SEND_SIZE/buf is divisible by BLCKSZ and we read a multiple of - * BLCKSZ bytes. + * buffer in chunks of cluster_block_size, after making sure that + * TAR_SEND_SIZE/buf is divisible by cluster_block_size and we read a multiple of + * cluster_block_size bytes. */ - Assert((sink->bbs_buffer_length % BLCKSZ) == 0); + Assert((sink->bbs_buffer_length % cluster_block_size) == 0); - if (verify_checksum && (cnt % BLCKSZ != 0)) + if (verify_checksum && (cnt % cluster_block_size != 0)) { ereport(WARNING, (errmsg("could not verify checksum in file \"%s\", block " "%u: read buffer size %d and page size %d " "differ", - readfilename, blkno, (int) cnt, BLCKSZ))); + readfilename, blkno, (int) cnt, cluster_block_size))); verify_checksum = false; } if (verify_checksum) { - for (i = 0; i < cnt / BLCKSZ; i++) + for (i = 0; i < cnt / cluster_block_size; i++) { - page = sink->bbs_buffer + BLCKSZ * i; + page = sink->bbs_buffer + cluster_block_size * i; /* * Only check pages which have not been modified since the @@ -1625,8 +1625,8 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename, /* Reread the failed block */ reread_cnt = basebackup_read_file(fd, - sink->bbs_buffer + BLCKSZ * i, - BLCKSZ, len + BLCKSZ * i, + sink->bbs_buffer + cluster_block_size * i, + cluster_block_size, len + cluster_block_size * i, readfilename, false); if (reread_cnt == 0) @@ -1639,7 +1639,7 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename, * code that handles that case. (We must fix * up cnt first, though.) */ - cnt = BLCKSZ * i; + cnt = cluster_block_size * i; break; } @@ -1745,12 +1745,12 @@ _tarWriteHeader(bbsink *sink, const char *filename, const char *linktarget, /* * As of this writing, the smallest supported block size is 1kB, which * is twice TAR_BLOCK_SIZE. Since the buffer size is required to be a - * multiple of BLCKSZ, it should be safe to assume that the buffer is + * multiple of cluster_block_size, it should be safe to assume that the buffer is * large enough to fit an entire tar block. We double-check by means * of these assertions. */ - StaticAssertDecl(TAR_BLOCK_SIZE <= BLCKSZ, - "BLCKSZ too small for tar block"); + StaticAssertDecl(TAR_BLOCK_SIZE <= cluster_block_size, + "cluster_block_size too small for tar block"); Assert(sink->bbs_buffer_length >= TAR_BLOCK_SIZE); rc = tarCreateHeader(sink->bbs_buffer, filename, linktarget, diff --git a/src/backend/backup/basebackup_lz4.c b/src/backend/backup/basebackup_lz4.c index 7acb606564..25f23bd8d3 100644 --- a/src/backend/backup/basebackup_lz4.c +++ b/src/backend/backup/basebackup_lz4.c @@ -117,10 +117,10 @@ bbsink_lz4_begin_backup(bbsink *sink) &mysink->prefs); /* - * The buffer length is expected to be a multiple of BLCKSZ, so round up. + * The buffer length is expected to be a multiple of cluster_block_size, so round up. */ - output_buffer_bound = output_buffer_bound + BLCKSZ - - (output_buffer_bound % BLCKSZ); + output_buffer_bound = output_buffer_bound + cluster_block_size - + (output_buffer_bound % cluster_block_size); bbsink_begin_backup(sink->bbs_next, sink->bbs_state, output_buffer_bound); } diff --git a/src/backend/backup/basebackup_zstd.c b/src/backend/backup/basebackup_zstd.c index 6a078cdf48..62d73e4b22 100644 --- a/src/backend/backup/basebackup_zstd.c +++ b/src/backend/backup/basebackup_zstd.c @@ -143,10 +143,10 @@ bbsink_zstd_begin_backup(bbsink *sink) output_buffer_bound = ZSTD_compressBound(mysink->base.bbs_buffer_length); /* - * The buffer length is expected to be a multiple of BLCKSZ, so round up. + * The buffer length is expected to be a multiple of cluster_block_size, so round up. */ - output_buffer_bound = output_buffer_bound + BLCKSZ - - (output_buffer_bound % BLCKSZ); + output_buffer_bound = output_buffer_bound + cluster_block_size - + (output_buffer_bound % cluster_block_size); bbsink_begin_backup(sink->bbs_next, sink->bbs_state, output_buffer_bound); } diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index 2add053489..32797d718c 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -800,7 +800,7 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker) * main fork is longer than ever but FSM fork gets shorter. */ if (pendingsync->is_truncated || - total_blocks * BLCKSZ / 1024 >= wal_skip_threshold) + total_blocks * cluster_block_size / 1024 >= wal_skip_threshold) { /* allocate the initial array, or extend it, if needed */ if (maxrels == 0) diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index bfd981aa3f..871101e550 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -772,9 +772,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params, if (delay_in_ms > 0) { - read_rate = (double) BLCKSZ * AnalyzePageMiss / (1024 * 1024) / + read_rate = (double) cluster_block_size * AnalyzePageMiss / (1024 * 1024) / (delay_in_ms / 1000.0); - write_rate = (double) BLCKSZ * AnalyzePageDirty / (1024 * 1024) / + write_rate = (double) cluster_block_size * AnalyzePageDirty / (1024 * 1024) / (delay_in_ms / 1000.0); } diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index d148d10850..c7d19f4ee0 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -163,7 +163,7 @@ * than that, so changes in that data structure won't affect user-visible * restrictions. */ -#define NOTIFY_PAYLOAD_MAX_LENGTH (BLCKSZ - NAMEDATALEN - 128) +#define NOTIFY_PAYLOAD_MAX_LENGTH (cluster_block_size - NAMEDATALEN - 128) /* * Struct representing an entry in the global notify queue @@ -311,7 +311,7 @@ static AsyncQueueControl *asyncQueueControl; static SlruCtlData NotifyCtlData; #define NotifyCtl (&NotifyCtlData) -#define QUEUE_PAGESIZE BLCKSZ +#define QUEUE_PAGESIZE cluster_block_size #define QUEUE_FULL_WARN_INTERVAL 5000 /* warn at most once every 5s */ /* @@ -322,7 +322,7 @@ static SlruCtlData NotifyCtlData; * * The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2 * pages, because more than that would confuse slru.c into thinking there - * was a wraparound condition. With the default BLCKSZ this means there + * was a wraparound condition. With the default cluster_block_size this means there * can be up to 8GB of queued-and-not-read data. * * Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index 351ab4957a..1263603df9 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -1036,7 +1036,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) /* Each parallel VACUUM worker gets its own access strategy. */ pvs.bstrategy = GetAccessStrategyWithSize(BAS_VACUUM, - shared->ring_nbuffers * (BLCKSZ / 1024)); + shared->ring_nbuffers * (cluster_block_size / 1024)); /* Setup error traceback support for ereport() */ errcallback.callback = parallel_vacuum_error_callback; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 468db94fe5..02358ba372 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -216,8 +216,8 @@ * * Tapes' buffers can take up substantial memory when many tapes are open at * once. We only need one tape open at a time in read mode (using a buffer - * that's a multiple of BLCKSZ); but we need one tape open in write mode (each - * requiring a buffer of size BLCKSZ) for each partition. + * that's a multiple of cluster_block_size); but we need one tape open in write mode (each + * requiring a buffer of size cluster_block_size) for each partition. * * Note that it's possible for transition states to start small but then * grow very large; for instance in the case of ARRAY_AGG. In such cases, @@ -299,12 +299,12 @@ /* * For reading from tapes, the buffer size must be a multiple of - * BLCKSZ. Larger values help when reading from multiple tapes concurrently, - * but that doesn't happen in HashAgg, so we simply use BLCKSZ. Writing to a - * tape always uses a buffer of size BLCKSZ. + * cluster_block_size. Larger values help when reading from multiple tapes concurrently, + * but that doesn't happen in HashAgg, so we simply use cluster_block_size. Writing to a + * tape always uses a buffer of size cluster_block_size. */ -#define HASHAGG_READ_BUFFER_SIZE BLCKSZ -#define HASHAGG_WRITE_BUFFER_SIZE BLCKSZ +#define hashagg_read_buffer_size cluster_block_size +#define hashagg_write_buffer_size cluster_block_size /* * HyperLogLog is used for estimating the cardinality of the spilled tuples in @@ -1827,8 +1827,8 @@ hash_agg_set_limits(double hashentrysize, double input_groups, int used_bits, *num_partitions = npartitions; partition_mem = - HASHAGG_READ_BUFFER_SIZE + - HASHAGG_WRITE_BUFFER_SIZE * npartitions; + hashagg_read_buffer_size + + hashagg_write_buffer_size * npartitions; /* * Don't set the limit below 3/4 of hash_mem. In that case, we are at the @@ -1933,9 +1933,9 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true); /* memory for read/write tape buffers, if spilled */ - buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE; + buffer_mem = npartitions * hashagg_write_buffer_size; if (from_tape) - buffer_mem += HASHAGG_READ_BUFFER_SIZE; + buffer_mem += hashagg_read_buffer_size; /* update peak mem */ total_mem = meta_mem + hashkey_mem + buffer_mem; @@ -1945,7 +1945,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) /* update disk usage */ if (aggstate->hash_tapeset != NULL) { - uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeset) * (BLCKSZ / 1024); + uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeset) * (cluster_block_size / 1024); if (aggstate->hash_disk_used < disk_used) aggstate->hash_disk_used = disk_used; @@ -2004,8 +2004,8 @@ hash_choose_num_partitions(double input_groups, double hashentrysize, * open partition files are greater than 1/4 of hash_mem. */ partition_limit = - (hash_mem_limit * 0.25 - HASHAGG_READ_BUFFER_SIZE) / - HASHAGG_WRITE_BUFFER_SIZE; + (hash_mem_limit * 0.25 - hashagg_read_buffer_size) / + hashagg_write_buffer_size; mem_wanted = HASHAGG_PARTITION_FACTOR * input_groups * hashentrysize; @@ -3113,7 +3113,7 @@ hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno) freeHyperLogLog(&spill->hll_card[i]); /* rewinding frees the buffer while not in use */ - LogicalTapeRewindForRead(tape, HASHAGG_READ_BUFFER_SIZE); + LogicalTapeRewindForRead(tape, hashagg_read_buffer_size); new_batch = hashagg_batch_new(tape, setno, spill->ntuples[i], cardinality, diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index 29a1858441..971c29c428 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -13,7 +13,7 @@ * fact that a particular page needs to be visited. * * The "lossy" storage uses one bit per disk page, so at the standard 8K - * BLCKSZ, we can represent all pages in 64Gb of disk space in about 1Mb + * cluster_block_size, we can represent all pages in 64Gb of disk space in about 1Mb * of memory. People pushing around tables of that size should have a * couple of Mb to spare, so we don't worry about providing a second level * of lossiness. In theory we could fall back to page ranges at some @@ -70,7 +70,7 @@ * too different. But we also want PAGES_PER_CHUNK to be a power of 2 to * avoid expensive integer remainder operations. So, define it like this: */ -#define PAGES_PER_CHUNK (BLCKSZ / 32) +#define PAGES_PER_CHUNK (cluster_block_size / 32) /* We use BITS_PER_BITMAPWORD and typedef bitmapword from nodes/bitmapset.h */ diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index d6ceafd51c..2a66cdd355 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -1890,7 +1890,7 @@ cost_tuplesort(Cost *startup_cost, Cost *run_cost, /* * We'll have to use a disk-based sort of all the tuples */ - double npages = ceil(input_bytes / BLCKSZ); + double npages = ceil(input_bytes / cluster_block_size); double nruns = input_bytes / sort_mem_bytes; double mergeorder = tuplesort_merge_order(sort_mem_bytes); double log_runs; @@ -2455,7 +2455,7 @@ cost_material(Path *path, */ if (nbytes > work_mem_bytes) { - double npages = ceil(nbytes / BLCKSZ); + double npages = ceil(nbytes / cluster_block_size); run_cost += seq_page_cost * npages; } @@ -2764,7 +2764,7 @@ cost_agg(Path *path, PlannerInfo *root, * Estimate number of pages read and written. For each level of * recursion, a tuple must be written and then later read. */ - pages = relation_byte_size(input_tuples, input_width) / BLCKSZ; + pages = relation_byte_size(input_tuples, input_width) / cluster_block_size; pages_written = pages_read = pages * depth; /* @@ -4551,7 +4551,7 @@ cost_rescan(PlannerInfo *root, Path *path, if (nbytes > work_mem_bytes) { /* It will spill, so account for re-read cost */ - double npages = ceil(nbytes / BLCKSZ); + double npages = ceil(nbytes / cluster_block_size); run_cost += seq_page_cost * npages; } @@ -4578,7 +4578,7 @@ cost_rescan(PlannerInfo *root, Path *path, if (nbytes > work_mem_bytes) { /* It will spill, so account for re-read cost */ - double npages = ceil(nbytes / BLCKSZ); + double npages = ceil(nbytes / cluster_block_size); run_cost += seq_page_cost * npages; } @@ -6356,7 +6356,7 @@ relation_byte_size(double tuples, int width) static double page_size(double tuples, int width) { - return ceil(relation_byte_size(tuples, width) / BLCKSZ); + return ceil(relation_byte_size(tuples, width) / cluster_block_size); } /* diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index 243c8fb1e4..cfe03dedff 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -1089,7 +1089,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths, tuple_width += MAXALIGN(SizeofHeapTupleHeader); tuple_width += sizeof(ItemIdData); /* note: integer division is intentional here */ - density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width; + density = (cluster_block_size - SizeOfPageHeaderData) / tuple_width; } *tuples = rint(density * (double) curpages); diff --git a/src/backend/po/de.po b/src/backend/po/de.po index 0a9e668c38..ac2cb6e4cf 100644 --- a/src/backend/po/de.po +++ b/src/backend/po/de.po @@ -2329,8 +2329,8 @@ msgstr "Der Datenbank-Cluster verwendet anscheinend ein anderes Fließkommazahle #: access/transam/xlog.c:4070 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "Der Datenbank-Cluster wurde mit BLCKSZ %d initialisiert, aber der Server wurde mit BLCKSZ %d kompiliert." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "Der Datenbank-Cluster wurde mit cluster_block_size %d initialisiert, aber der Server wurde mit cluster_block_size %d kompiliert." #: access/transam/xlog.c:4073 access/transam/xlog.c:4080 #: access/transam/xlog.c:4087 access/transam/xlog.c:4094 @@ -2363,13 +2363,13 @@ msgstr "Der Datenbank-Cluster wurde mit INDEX_MAX_KEYS %d initialisiert, aber de #: access/transam/xlog.c:4105 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "Der Datenbank-Cluster wurde mit TOAST_MAX_CHUNK_SIZE %d initialisiert, aber der Server wurde mit TOAST_MAX_CHUNK_SIZE %d kompiliert." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "Der Datenbank-Cluster wurde mit cluster_toast_max_chunk_size %d initialisiert, aber der Server wurde mit cluster_toast_max_chunk_size %d kompiliert." #: access/transam/xlog.c:4112 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "Der Datenbank-Cluster wurde mit LOBLKSIZE %d initialisiert, aber der Server wurde mit LOBLKSIZE %d kompiliert." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "Der Datenbank-Cluster wurde mit cluster_loblksize %d initialisiert, aber der Server wurde mit cluster_loblksize %d kompiliert." #: access/transam/xlog.c:4121 #, c-format @@ -22029,8 +22029,8 @@ msgstr "debug_io_direct wird für WAL nicht unterstützt, weil XLOG_BLCKSZ zu kl #: storage/file/fd.c:3951 #, c-format -msgid "debug_io_direct is not supported for data because BLCKSZ is too small" -msgstr "debug_io_direct wird für Daten nicht unterstützt, weil BLCKSZ zu klein ist" +msgid "debug_io_direct is not supported for data because cluster_block_size is too small" +msgstr "debug_io_direct wird für Daten nicht unterstützt, weil cluster_block_size zu klein ist" #: storage/file/reinit.c:145 #, c-format diff --git a/src/backend/po/es.po b/src/backend/po/es.po index e50a935033..b420226eb9 100644 --- a/src/backend/po/es.po +++ b/src/backend/po/es.po @@ -2387,8 +2387,8 @@ msgstr "Los archivos de la base de datos parecen usar un formato de número de c #: access/transam/xlog.c:4070 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "Los archivos de base de datos fueron inicializados con BLCKSZ %d, pero el servidor fue compilado con BLCKSZ %d." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "Los archivos de base de datos fueron inicializados con cluster_block_size %d, pero el servidor fue compilado con cluster_block_size %d." #: access/transam/xlog.c:4073 access/transam/xlog.c:4080 #: access/transam/xlog.c:4087 access/transam/xlog.c:4094 @@ -2421,13 +2421,13 @@ msgstr "Los archivos de la base de datos fueron inicializados con INDEX_MAX_KEYS #: access/transam/xlog.c:4105 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "Los archivos de la base de datos fueron inicializados con TOAST_MAX_CHUNK_SIZE %d, pero el servidor fue compilado con TOAST_MAX_CHUNK_SIZE %d." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "Los archivos de la base de datos fueron inicializados con cluster_toast_max_chunk_size %d, pero el servidor fue compilado con cluster_toast_max_chunk_size %d." #: access/transam/xlog.c:4112 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "Los archivos de base de datos fueron inicializados con LOBLKSIZE %d, pero el servidor fue compilado con LOBLKSIZE %d." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "Los archivos de base de datos fueron inicializados con cluster_loblksize %d, pero el servidor fue compilado con cluster_loblksize %d." #: access/transam/xlog.c:4121 #, c-format @@ -21808,7 +21808,7 @@ msgstr "" #: storage/file/fd.c:3901 #, c-format -msgid "io_direct is not supported for data because BLCKSZ is too small" +msgid "io_direct is not supported for data because cluster_block_size is too small" msgstr "" #: storage/file/reinit.c:145 diff --git a/src/backend/po/fr.po b/src/backend/po/fr.po index fd51500b93..2b73cd3fdc 100644 --- a/src/backend/po/fr.po +++ b/src/backend/po/fr.po @@ -2380,10 +2380,10 @@ msgstr "" #: access/transam/xlog.c:4096 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." msgstr "" -"Le cluster de base de données a été initialisé avec un BLCKSZ à %d alors que\n" -"le serveur a été compilé avec un BLCKSZ à %d." +"Le cluster de base de données a été initialisé avec un cluster_block_size à %d alors que\n" +"le serveur a été compilé avec un cluster_block_size à %d." #: access/transam/xlog.c:4099 access/transam/xlog.c:4106 #: access/transam/xlog.c:4113 access/transam/xlog.c:4120 @@ -2424,17 +2424,17 @@ msgstr "" #: access/transam/xlog.c:4131 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." msgstr "" -"Le cluster de bases de données a été initialisé avec un TOAST_MAX_CHUNK_SIZE\n" -"à %d alors que le serveur a été compilé avec un TOAST_MAX_CHUNK_SIZE à %d." +"Le cluster de bases de données a été initialisé avec un cluster_toast_max_chunk_size\n" +"à %d alors que le serveur a été compilé avec un cluster_toast_max_chunk_size à %d." #: access/transam/xlog.c:4138 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." msgstr "" -"Le cluster de base de données a été initialisé avec un LOBLKSIZE à %d alors que\n" -"le serveur a été compilé avec un LOBLKSIZE à %d." +"Le cluster de base de données a été initialisé avec un cluster_loblksize à %d alors que\n" +"le serveur a été compilé avec un cluster_loblksize à %d." #: access/transam/xlog.c:4147 #, c-format diff --git a/src/backend/po/id.po b/src/backend/po/id.po index d5d484132b..e503bc4117 100644 --- a/src/backend/po/id.po +++ b/src/backend/po/id.po @@ -1089,8 +1089,8 @@ msgstr "cluster database sepertinya menggunakan format nomor floating-point yang #: access/transam/xlog.c:3648 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "cluster database telah diinisialkan dengan BLCKSZ %d, tapi server telah dikompilasi dengan BLCKSZ %d." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "cluster database telah diinisialkan dengan cluster_block_size %d, tapi server telah dikompilasi dengan cluster_block_size %d." #: access/transam/xlog.c:3651 access/transam/xlog.c:3658 access/transam/xlog.c:3665 access/transam/xlog.c:3672 access/transam/xlog.c:3679 access/transam/xlog.c:3686 access/transam/xlog.c:3693 access/transam/xlog.c:3701 access/transam/xlog.c:3708 access/transam/xlog.c:3717 access/transam/xlog.c:3724 access/transam/xlog.c:3733 access/transam/xlog.c:3740 #, c-format @@ -1124,8 +1124,8 @@ msgstr "cluster database telah diinisialkan dengan INDEX_MAX_KEYS %d, tapi serve #: access/transam/xlog.c:3690 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "cluster database telah diinisialkan dengan TOAST_MAX_CHUNK_SIZE %d, tapi server telah dikompilasi dengan TOAST_MAX_CHUNK_SIZE %d." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "cluster database telah diinisialkan dengan cluster_toast_max_chunk_size %d, tapi server telah dikompilasi dengan cluster_toast_max_chunk_size %d." #: access/transam/xlog.c:3699 #, c-format diff --git a/src/backend/po/it.po b/src/backend/po/it.po index 673e2aaf00..5cd524e120 100644 --- a/src/backend/po/it.po +++ b/src/backend/po/it.po @@ -2067,8 +2067,8 @@ msgstr "Il cluster di database sta usando un formato per i numeri in virgola mob #: access/transam/xlog.c:4096 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "Il cluster di database è stato inizializzato con BLCKSZ %d, ma il server è stato compilato con BLCKSZ %d." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "Il cluster di database è stato inizializzato con cluster_block_size %d, ma il server è stato compilato con cluster_block_size %d." #: access/transam/xlog.c:4099 access/transam/xlog.c:4106 access/transam/xlog.c:4113 access/transam/xlog.c:4120 access/transam/xlog.c:4127 access/transam/xlog.c:4134 access/transam/xlog.c:4141 access/transam/xlog.c:4149 access/transam/xlog.c:4156 #, c-format @@ -2097,13 +2097,13 @@ msgstr "Il cluster di database è stato inizializzato con INDEX_MAX_KEYS %d, ma #: access/transam/xlog.c:4131 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "Il cluster di database è stato inizializzato con TOAST_MAX_CHUNK_SIZE %d, ma il server è stato compilato con TOAST_MAX_CHUNK_SIZE %d." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "Il cluster di database è stato inizializzato con cluster_toast_max_chunk_size %d, ma il server è stato compilato con cluster_toast_max_chunk_size %d." #: access/transam/xlog.c:4138 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "Il cluster di database è stato inizializzato con LOBLKSIZE %d, ma il server è stato compilato con LOBLKSIZE %d." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "Il cluster di database è stato inizializzato con cluster_loblksize %d, ma il server è stato compilato con cluster_loblksize %d." #: access/transam/xlog.c:4147 #, c-format diff --git a/src/backend/po/ja.po b/src/backend/po/ja.po index 1ab9f7f68f..c7a8044f23 100644 --- a/src/backend/po/ja.po +++ b/src/backend/po/ja.po @@ -2096,8 +2096,8 @@ msgstr "データベースクラスタはサーバー実行ファイルと異な #: access/transam/xlog.c:4070 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "データベースクラスタは BLCKSZ %d で初期化されましたが、サーバーは BLCKSZ %d でコンパイルされています。" +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "データベースクラスタは cluster_block_size %d で初期化されましたが、サーバーは cluster_block_size %d でコンパイルされています。" #: access/transam/xlog.c:4073 access/transam/xlog.c:4080 access/transam/xlog.c:4087 access/transam/xlog.c:4094 access/transam/xlog.c:4101 access/transam/xlog.c:4108 access/transam/xlog.c:4115 access/transam/xlog.c:4123 access/transam/xlog.c:4130 #, c-format @@ -2126,13 +2126,13 @@ msgstr "データベースクラスタは INDEX_MAX_KEYS %d で初期化され #: access/transam/xlog.c:4105 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "データベースクラスタは TOAST_MAX_CHUNK_SIZE %d で初期化されましたが、サーバーは TOAST_MAX_CHUNK_SIZE %d でコンパイルされています。" +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "データベースクラスタは cluster_toast_max_chunk_size %d で初期化されましたが、サーバーは cluster_toast_max_chunk_size %d でコンパイルされています。" #: access/transam/xlog.c:4112 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "データベースクラスタは LOBLKSIZE %d で初期化されましたが、サーバーは LOBLKSIZE %d でコンパイルされています。" +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "データベースクラスタは cluster_loblksize %d で初期化されましたが、サーバーは cluster_loblksize %d でコンパイルされています。" #: access/transam/xlog.c:4121 #, c-format @@ -21403,8 +21403,8 @@ msgstr "XLOG_BLCKSZが小さすぎるためdebug_io_directはWALに対しては #: storage/file/fd.c:3951 #, c-format -msgid "debug_io_direct is not supported for data because BLCKSZ is too small" -msgstr "BLCKSZが小さすぎるためdebug_io_directはデータに対してサポートされません" +msgid "debug_io_direct is not supported for data because cluster_block_size is too small" +msgstr "cluster_block_sizeが小さすぎるためdebug_io_directはデータに対してサポートされません" #: storage/file/reinit.c:145 #, c-format diff --git a/src/backend/po/ko.po b/src/backend/po/ko.po index f330f3da7e..825dae75cf 100644 --- a/src/backend/po/ko.po +++ b/src/backend/po/ko.po @@ -2348,10 +2348,10 @@ msgstr "" #: access/transam/xlog.c:4802 #, c-format msgid "" -"The database cluster was initialized with BLCKSZ %d, but the server was " -"compiled with BLCKSZ %d." +"The database cluster was initialized with cluster_block_size %d, but the server was " +"compiled with cluster_block_size %d." msgstr "" -"이 데이터베이스 클러스터는 BLCKSZ %d (으)로 초기화 되었지만, 서버는 BLCKSZ " +"이 데이터베이스 클러스터는 cluster_block_size %d (으)로 초기화 되었지만, 서버는 cluster_block_size " "%d (으)로 컴파일 되어있습니다." #: access/transam/xlog.c:4805 access/transam/xlog.c:4812 @@ -2404,20 +2404,20 @@ msgstr "" #: access/transam/xlog.c:4837 #, c-format msgid "" -"The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the " -"server was compiled with TOAST_MAX_CHUNK_SIZE %d." +"The database cluster was initialized with cluster_toast_max_chunk_size %d, but the " +"server was compiled with cluster_toast_max_chunk_size %d." msgstr "" -"데이터베이스 클러스터는 TOAST_MAX_CHUNK_SIZE %d(으)로 초기화되었지만 서버는 " -"TOAST_MAX_CHUNK_SIZE %d(으)로 컴파일 되었습니다." +"데이터베이스 클러스터는 cluster_toast_max_chunk_size %d(으)로 초기화되었지만 서버는 " +"cluster_toast_max_chunk_size %d(으)로 컴파일 되었습니다." #: access/transam/xlog.c:4844 #, c-format msgid "" -"The database cluster was initialized with LOBLKSIZE %d, but the server was " -"compiled with LOBLKSIZE %d." +"The database cluster was initialized with cluster_loblksize %d, but the server was " +"compiled with cluster_loblksize %d." msgstr "" -"이 데이터베이스 클러스터는 LOBLKSIZE %d(으)로 초기화 되었지만, 서버는 " -"LOBLKSIZE %d (으)로 컴파일 되어있습니다." +"이 데이터베이스 클러스터는 cluster_loblksize %d(으)로 초기화 되었지만, 서버는 " +"cluster_loblksize %d (으)로 컴파일 되어있습니다." #: access/transam/xlog.c:4853 #, c-format diff --git a/src/backend/po/pl.po b/src/backend/po/pl.po index 3ac9d0451c..08fdc7a3d5 100644 --- a/src/backend/po/pl.po +++ b/src/backend/po/pl.po @@ -1927,8 +1927,8 @@ msgstr "Klaster bazy danych wydaje się używać innego formatu liczb zmiennoprz #: access/transam/xlog.c:4550 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "Klaster bazy danych został zainicjowany z BLCKSZ %d, ale serwer był skompilowany z BLCKSZ %d." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "Klaster bazy danych został zainicjowany z cluster_block_size %d, ale serwer był skompilowany z cluster_block_size %d." #: access/transam/xlog.c:4553 access/transam/xlog.c:4560 #: access/transam/xlog.c:4567 access/transam/xlog.c:4574 @@ -1967,13 +1967,13 @@ msgstr "Klaster bazy danych został zainicjowany z INDEX_MAX_KEYS %d, ale serwer #: access/transam/xlog.c:4592 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "Klaster bazy danych został zainicjowany z TOAST_MAX_CHUNK_SIZE %d, ale serwer był skompilowany z TOAST_MAX_CHUNK_SIZE %d." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "Klaster bazy danych został zainicjowany z cluster_toast_max_chunk_size %d, ale serwer był skompilowany z cluster_toast_max_chunk_size %d." #: access/transam/xlog.c:4599 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "Klaster bazy danych został zainicjowany z LOBLKSIZE %d, ale serwer był skompilowany z LOBLKSIZE %d." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "Klaster bazy danych został zainicjowany z cluster_loblksize %d, ale serwer był skompilowany z cluster_loblksize %d." #: access/transam/xlog.c:4608 #, c-format diff --git a/src/backend/po/pt_BR.po b/src/backend/po/pt_BR.po index 37e4a28f07..7e30e2119d 100644 --- a/src/backend/po/pt_BR.po +++ b/src/backend/po/pt_BR.po @@ -1264,7 +1264,7 @@ msgstr "O agrupamento de banco de dados parece utilizar um formato de número de #: access/transam/xlog.c:4506 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." msgstr "O agrupamento de banco de dados foi inicializado com BLCSZ %d, mas o servidor foi compilado com BLCSZ %d." #: access/transam/xlog.c:4509 access/transam/xlog.c:4516 @@ -1305,13 +1305,13 @@ msgstr "O agrupamento de banco de dados foi inicializado com INDEX_MAX_KEYS %d, #: access/transam/xlog.c:4548 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "O agrupamento de banco de dados foi inicializado com TOAST_MAX_CHUNK_SIZE %d, mas o servidor foi compilado com TOAST_MAX_CHUNK_SIZE %d." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "O agrupamento de banco de dados foi inicializado com cluster_toast_max_chunk_size %d, mas o servidor foi compilado com cluster_toast_max_chunk_size %d." #: access/transam/xlog.c:4555 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "O agrupamento de banco de dados foi inicializado com LOBLKSIZE %d, mas o servidor foi compilado com LOBLKSIZE %d." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "O agrupamento de banco de dados foi inicializado com cluster_loblksize %d, mas o servidor foi compilado com cluster_loblksize %d." #: access/transam/xlog.c:4564 #, c-format diff --git a/src/backend/po/ru.po b/src/backend/po/ru.po index ae9c50eed7..97ee086a7b 100644 --- a/src/backend/po/ru.po +++ b/src/backend/po/ru.po @@ -2619,11 +2619,11 @@ msgstr "" #: access/transam/xlog.c:4096 #, c-format msgid "" -"The database cluster was initialized with BLCKSZ %d, but the server was " -"compiled with BLCKSZ %d." +"The database cluster was initialized with cluster_block_size %d, but the server was " +"compiled with cluster_block_size %d." msgstr "" -"Кластер баз данных был инициализирован с BLCKSZ %d, но сервер скомпилирован " -"с BLCKSZ %d." +"Кластер баз данных был инициализирован с cluster_block_size %d, но сервер скомпилирован " +"с cluster_block_size %d." #: access/transam/xlog.c:4099 access/transam/xlog.c:4106 #: access/transam/xlog.c:4113 access/transam/xlog.c:4120 @@ -2673,20 +2673,20 @@ msgstr "" #: access/transam/xlog.c:4131 #, c-format msgid "" -"The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the " -"server was compiled with TOAST_MAX_CHUNK_SIZE %d." +"The database cluster was initialized with cluster_toast_max_chunk_size %d, but the " +"server was compiled with cluster_toast_max_chunk_size %d." msgstr "" -"Кластер баз данных был инициализирован с TOAST_MAX_CHUNK_SIZE %d, но сервер " -"скомпилирован с TOAST_MAX_CHUNK_SIZE %d." +"Кластер баз данных был инициализирован с cluster_toast_max_chunk_size %d, но сервер " +"скомпилирован с cluster_toast_max_chunk_size %d." #: access/transam/xlog.c:4138 #, c-format msgid "" -"The database cluster was initialized with LOBLKSIZE %d, but the server was " -"compiled with LOBLKSIZE %d." +"The database cluster was initialized with cluster_loblksize %d, but the server was " +"compiled with cluster_loblksize %d." msgstr "" -"Кластер баз данных был инициализирован с LOBLKSIZE %d, но сервер " -"скомпилирован с LOBLKSIZE %d." +"Кластер баз данных был инициализирован с cluster_loblksize %d, но сервер " +"скомпилирован с cluster_loblksize %d." #: access/transam/xlog.c:4147 #, c-format diff --git a/src/backend/po/sv.po b/src/backend/po/sv.po index 0da20b6d43..39060c459e 100644 --- a/src/backend/po/sv.po +++ b/src/backend/po/sv.po @@ -2345,8 +2345,8 @@ msgstr "Databasklustret verkar använda en annan flyttalsrepresentation än vad #: access/transam/xlog.c:4070 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "Databasklustret initierades med BLCKSZ %d, men servern kompilerades med BLCKSZ %d." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "Databasklustret initierades med cluster_block_size %d, men servern kompilerades med cluster_block_size %d." #: access/transam/xlog.c:4073 access/transam/xlog.c:4080 #: access/transam/xlog.c:4087 access/transam/xlog.c:4094 @@ -2379,13 +2379,13 @@ msgstr "Databasklustret initierades med INDEX_MAX_KEYS %d, men servern kompilera #: access/transam/xlog.c:4105 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "Databasklustret initierades med TOAST_MAX_CHUNK_SIZE %d, men servern kompilerades med TOAST_MAX_CHUNK_SIZE %d." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "Databasklustret initierades med cluster_toast_max_chunk_size %d, men servern kompilerades med cluster_toast_max_chunk_size %d." #: access/transam/xlog.c:4112 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "Databasklustret initierades med LOBLKSIZE %d, men servern kompilerades med LOBLKSIZE %d." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "Databasklustret initierades med cluster_loblksize %d, men servern kompilerades med cluster_loblksize %d." #: access/transam/xlog.c:4121 #, c-format @@ -22134,7 +22134,7 @@ msgstr "" #: storage/file/fd.c:3951 #, c-format -msgid "debug_io_direct is not supported for data because BLCKSZ is too small" +msgid "debug_io_direct is not supported for data because cluster_block_size is too small" msgstr "" #: storage/file/reinit.c:145 diff --git a/src/backend/po/tr.po b/src/backend/po/tr.po index b791e886b9..695ee51478 100644 --- a/src/backend/po/tr.po +++ b/src/backend/po/tr.po @@ -1761,8 +1761,8 @@ msgstr "Veritabanı dosyaları, sunucu programından farklı ondalık sayı biç #: access/transam/xlog.c:4676 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "Veritabanı clusteri BLCKSZ %d ile ilklendirilmiştir, ancak sunucu BLCKSZ %d ile derlenmiştir." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "Veritabanı clusteri cluster_block_size %d ile ilklendirilmiştir, ancak sunucu cluster_block_size %d ile derlenmiştir." #: access/transam/xlog.c:4679 access/transam/xlog.c:4686 access/transam/xlog.c:4693 access/transam/xlog.c:4700 access/transam/xlog.c:4707 access/transam/xlog.c:4714 access/transam/xlog.c:4721 access/transam/xlog.c:4729 access/transam/xlog.c:4736 access/transam/xlog.c:4745 access/transam/xlog.c:4752 #, c-format @@ -1791,13 +1791,13 @@ msgstr "Veritabanı clusteri INDEX_MAX_KEYS %d ile ilklendirilmiştir, ancak sun #: access/transam/xlog.c:4711 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "Veritabanı clusteri TOAST_MAX_CHUNK_SIZE %d ile ilklendirilmiştir, ancak sunucu TOAST_MAX_CHUNK_SIZE %d ile derlenmiştir." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "Veritabanı clusteri cluster_toast_max_chunk_size %d ile ilklendirilmiştir, ancak sunucu cluster_toast_max_chunk_size %d ile derlenmiştir." #: access/transam/xlog.c:4718 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "Veritabanı clusteri LOBLKSIZE %d ile ilklendirilmiştir, ancak sunucu LOBLKSIZE %d ile derlenmiştir." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "Veritabanı clusteri cluster_loblksize %d ile ilklendirilmiştir, ancak sunucu cluster_loblksize %d ile derlenmiştir." #: access/transam/xlog.c:4727 #, c-format diff --git a/src/backend/po/uk.po b/src/backend/po/uk.po index 1095fd9139..abb8139f4d 100644 --- a/src/backend/po/uk.po +++ b/src/backend/po/uk.po @@ -2288,8 +2288,8 @@ msgstr "Здається, в кластері баз даних і в прогр #: access/transam/xlog.c:4096 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "Кластер бази даних було ініціалізовано з BLCKSZ %d, але сервер було скомпільовано з BLCKSZ %d." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "Кластер бази даних було ініціалізовано з cluster_block_size %d, але сервер було скомпільовано з cluster_block_size %d." #: access/transam/xlog.c:4099 access/transam/xlog.c:4106 #: access/transam/xlog.c:4113 access/transam/xlog.c:4120 @@ -2322,13 +2322,13 @@ msgstr "Кластер бази даних було ініціалізовано #: access/transam/xlog.c:4131 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "Кластер бази даних було ініціалізовано з TOAST_MAX_CHUNK_SIZE %d, але сервер було скомпільовано з TOAST_MAX_CHUNK_SIZE %d." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "Кластер бази даних було ініціалізовано з cluster_toast_max_chunk_size %d, але сервер було скомпільовано з cluster_toast_max_chunk_size %d." #: access/transam/xlog.c:4138 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "Кластер бази даних було ініціалізовано з LOBLKSIZE %d, але сервер було скомпільовано з LOBLKSIZE %d." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "Кластер бази даних було ініціалізовано з cluster_loblksize %d, але сервер було скомпільовано з cluster_loblksize %d." #: access/transam/xlog.c:4147 #, c-format diff --git a/src/backend/po/zh_CN.po b/src/backend/po/zh_CN.po index 574684d775..b576a6e015 100644 --- a/src/backend/po/zh_CN.po +++ b/src/backend/po/zh_CN.po @@ -1852,8 +1852,8 @@ msgstr "数据库集群在使用与服务器执行部分不同的浮点数格式 #: access/transam/xlog.c:4677 #, c-format -msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." -msgstr "数据库簇是以 BLCKSZ %d 初始化的, 但是 服务器是以 BLCKSZ %d 编译的." +msgid "The database cluster was initialized with cluster_block_size %d, but the server was compiled with cluster_block_size %d." +msgstr "数据库簇是以 cluster_block_size %d 初始化的, 但是 服务器是以 cluster_block_size %d 编译的." #: access/transam/xlog.c:4680 access/transam/xlog.c:4687 #: access/transam/xlog.c:4694 access/transam/xlog.c:4701 @@ -1887,13 +1887,13 @@ msgstr "数据库集群是以 INDEX_MAX_KEYS %d 初始化的, 但是 服务器 #: access/transam/xlog.c:4712 #, c-format -msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." -msgstr "数据库集群是以 TOAST_MAX_CHUNK_SIZE %d 初始化的, 但是 服务器是以 TOAST_MAX_CHUNK_SIZE %d 编译的." +msgid "The database cluster was initialized with cluster_toast_max_chunk_size %d, but the server was compiled with cluster_toast_max_chunk_size %d." +msgstr "数据库集群是以 cluster_toast_max_chunk_size %d 初始化的, 但是 服务器是以 cluster_toast_max_chunk_size %d 编译的." #: access/transam/xlog.c:4719 #, c-format -msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." -msgstr "数据库簇是以 LOBLKSIZE %d 初始化的, 但是 服务器是以 LOBLKSIZE %d 编译的." +msgid "The database cluster was initialized with cluster_loblksize %d, but the server was compiled with cluster_loblksize %d." +msgstr "数据库簇是以 cluster_loblksize %d 初始化的, 但是 服务器是以 cluster_loblksize %d 编译的." #: access/transam/xlog.c:4728 #, c-format diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 597947410f..2653f68363 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -2056,7 +2056,7 @@ apply_spooled_messages(FileSet *stream_fileset, TransactionId xid, CurrentResourceOwner = oldowner; - buffer = palloc(BLCKSZ); + buffer = palloc(cluster_block_size); initStringInfo(&s2); MemoryContextSwitchTo(oldcxt); diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index 0057443f0c..69f21908e3 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -82,7 +82,7 @@ InitBufferPool(void) BufferBlocks = (char *) TYPEALIGN(PG_IO_ALIGN_SIZE, ShmemInitStruct("Buffer Blocks", - NBuffers * (Size) BLCKSZ + PG_IO_ALIGN_SIZE, + NBuffers * (Size) cluster_block_size + PG_IO_ALIGN_SIZE, &foundBufs)); /* Align condition variables to cacheline boundary. */ @@ -168,7 +168,7 @@ BufferShmemSize(void) /* size of data pages, plus alignment padding */ size = add_size(size, PG_IO_ALIGN_SIZE); - size = add_size(size, mul_size(NBuffers, BLCKSZ)); + size = add_size(size, mul_size(NBuffers, cluster_block_size)); /* size of stuff controlled by freelist.c */ size = add_size(size, StrategyShmemSize()); diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 3bd82dbfca..d11f73c13b 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -60,7 +60,7 @@ /* Note: these two macros only work on shared buffers, not local ones! */ -#define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ)) +#define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * cluster_block_size)) #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr))) /* Note: this macro only works on local buffers, not shared ones! */ @@ -1116,7 +1116,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * wants us to allocate a buffer. */ if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) - MemSet((char *) bufBlock, 0, BLCKSZ); + MemSet((char *) bufBlock, 0, cluster_block_size); else { instr_time io_start = pgstat_prepare_io_time(); @@ -1137,7 +1137,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, errmsg("invalid page in block %u of relation %s; zeroing out page", blockNum, relpath(smgr->smgr_rlocator, forkNum)))); - MemSet((char *) bufBlock, 0, BLCKSZ); + MemSet((char *) bufBlock, 0, cluster_block_size); } else ereport(ERROR, @@ -1856,7 +1856,7 @@ ExtendBufferedRelShared(BufferManagerRelation bmr, buf_block = BufHdrGetBlock(GetBufferDescriptor(buffers[i] - 1)); /* new buffers are zero-filled */ - MemSet((char *) buf_block, 0, BLCKSZ); + MemSet((char *) buf_block, 0, cluster_block_size); } /* in case we need to pin an existing buffer below */ @@ -2285,7 +2285,7 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy) * not generally guaranteed to be marked undefined or * non-accessible in any case. */ - VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), BLCKSZ); + VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), cluster_block_size); break; } } @@ -2350,7 +2350,7 @@ PinBuffer_Locked(BufferDesc *buf) * Valgrind (this is similar to the PinBuffer() case where the backend * doesn't already have a buffer pin) */ - VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), BLCKSZ); + VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), cluster_block_size); /* * Since we hold the buffer spinlock, we can update the buffer state and @@ -2403,7 +2403,7 @@ UnpinBuffer(BufferDesc *buf) * within access method code that enforces that buffers are only * accessed while a buffer lock is held. */ - VALGRIND_MAKE_MEM_NOACCESS(BufHdrGetBlock(buf), BLCKSZ); + VALGRIND_MAKE_MEM_NOACCESS(BufHdrGetBlock(buf), cluster_block_size); /* I'd better not still hold the buffer content lock */ Assert(!LWLockHeldByMe(BufferDescriptorGetContentLock(buf))); @@ -3491,7 +3491,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum) if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind)) { /* - * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore + * Not every table AM uses cluster_block_size wide fixed size blocks. Therefore * tableam returns the size in bytes - but for the purpose of this * routine, we want the number of blocks. Therefore divide, rounding * up. @@ -3500,7 +3500,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum) szbytes = table_relation_size(relation, forkNum); - return (szbytes + (BLCKSZ - 1)) / BLCKSZ; + return (szbytes + (cluster_block_size - 1)) / cluster_block_size; } else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) { @@ -4288,7 +4288,7 @@ RelationCopyStorageUsingBuffer(RelFileLocator srclocator, * Bulk extend the destination relation of the same size as the source * relation before starting to copy block by block. */ - memset(buf.data, 0, BLCKSZ); + memset(buf.data, 0, cluster_block_size); smgrextend(smgropen(dstlocator, InvalidBackendId), forkNum, nblocks - 1, buf.data, true); @@ -4316,7 +4316,7 @@ RelationCopyStorageUsingBuffer(RelFileLocator srclocator, START_CRIT_SECTION(); /* Copy page data from the source to the destination. */ - memcpy(dstPage, srcPage, BLCKSZ); + memcpy(dstPage, srcPage, cluster_block_size); MarkBufferDirty(dstBuf); /* WAL-log the copied page. */ diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c index 1c804fd2f5..e7365030e9 100644 --- a/src/backend/storage/buffer/freelist.c +++ b/src/backend/storage/buffer/freelist.c @@ -589,7 +589,7 @@ GetAccessStrategyWithSize(BufferAccessStrategyType btype, int ring_size_kb) Assert(ring_size_kb >= 0); /* Figure out how many buffers ring_size_kb is */ - ring_buffers = ring_size_kb / (BLCKSZ / 1024); + ring_buffers = ring_size_kb / (cluster_block_size / 1024); /* 0 means unlimited, so no BufferAccessStrategy required */ if (ring_buffers == 0) diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 1735ec7141..935b870933 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -335,7 +335,7 @@ ExtendBufferedRelLocal(BufferManagerRelation bmr, buf_block = LocalBufHdrGetBlock(buf_hdr); /* new buffers are zero-filled */ - MemSet((char *) buf_block, 0, BLCKSZ); + MemSet((char *) buf_block, 0, cluster_block_size); } first_block = smgrnblocks(bmr.smgr, fork); @@ -745,19 +745,19 @@ GetLocalBufferStorage(void) /* But not more than what we need for all remaining local bufs */ num_bufs = Min(num_bufs, NLocBuffer - total_bufs_allocated); /* And don't overflow MaxAllocSize, either */ - num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ); + num_bufs = Min(num_bufs, MaxAllocSize / cluster_block_size); /* Buffers should be I/O aligned. */ cur_block = (char *) TYPEALIGN(PG_IO_ALIGN_SIZE, MemoryContextAlloc(LocalBufferContext, - num_bufs * BLCKSZ + PG_IO_ALIGN_SIZE)); + num_bufs * cluster_block_size + PG_IO_ALIGN_SIZE)); next_buf_in_block = 0; num_bufs_in_block = num_bufs; } /* Allocate next buffer in current memory block */ - this_buf = cur_block + next_buf_in_block * BLCKSZ; + this_buf = cur_block + next_buf_in_block * cluster_block_size; next_buf_in_block++; total_bufs_allocated++; diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c index 41ab64100e..cf5d980026 100644 --- a/src/backend/storage/file/buffile.c +++ b/src/backend/storage/file/buffile.c @@ -60,7 +60,7 @@ * tablespaces when available. */ #define MAX_PHYSICAL_FILESIZE 0x40000000 -#define BUFFILE_SEG_SIZE (MAX_PHYSICAL_FILESIZE / BLCKSZ) +#define BUFFILE_SEG_SIZE (MAX_PHYSICAL_FILESIZE / cluster_block_size) /* * This data structure represents a buffered file that consists of one or @@ -681,7 +681,7 @@ BufFileWrite(BufFile *file, const void *ptr, size_t size) while (size > 0) { - if (file->pos >= BLCKSZ) + if (file->pos >= cluster_block_size) { /* Buffer full, dump it out */ if (file->dirty) @@ -695,7 +695,7 @@ BufFileWrite(BufFile *file, const void *ptr, size_t size) } } - nthistime = BLCKSZ - file->pos; + nthistime = cluster_block_size - file->pos; if (nthistime > size) nthistime = size; Assert(nthistime > 0); @@ -839,9 +839,9 @@ BufFileTell(BufFile *file, int *fileno, off_t *offset) /* * BufFileSeekBlock --- block-oriented seek * - * Performs absolute seek to the start of the n'th BLCKSZ-sized block of + * Performs absolute seek to the start of the n'th cluster_block_size-sized block of * the file. Note that users of this interface will fail if their files - * exceed BLCKSZ * LONG_MAX bytes, but that is quite a lot; we don't work + * exceed cluster_block_size * LONG_MAX bytes, but that is quite a lot; we don't work * with tables bigger than that, either... * * Result is 0 if OK, EOF if not. Logical position is not moved if an @@ -852,7 +852,7 @@ BufFileSeekBlock(BufFile *file, long blknum) { return BufFileSeek(file, (int) (blknum / BUFFILE_SEG_SIZE), - (off_t) (blknum % BUFFILE_SEG_SIZE) * BLCKSZ, + (off_t) (blknum % BUFFILE_SEG_SIZE) * cluster_block_size, SEEK_SET); } @@ -867,7 +867,7 @@ BufFileTellBlock(BufFile *file) { long blknum; - blknum = (file->curOffset + file->pos) / BLCKSZ; + blknum = (file->curOffset + file->pos) / cluster_block_size; blknum += file->curFile * BUFFILE_SEG_SIZE; return blknum; } diff --git a/src/backend/storage/file/copydir.c b/src/backend/storage/file/copydir.c index e04bc3941a..327bd43d36 100644 --- a/src/backend/storage/file/copydir.c +++ b/src/backend/storage/file/copydir.c @@ -124,7 +124,7 @@ copy_file(const char *fromfile, const char *tofile) off_t flush_offset; /* Size of copy buffer (read and write requests) */ -#define COPY_BUF_SIZE (8 * BLCKSZ) +#define COPY_BUF_SIZE (8 * cluster_block_size) /* * Size of data flush requests. It seems beneficial on most platforms to diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index b490a76ba7..7207405e06 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -3943,10 +3943,10 @@ check_debug_io_direct(char **newval, void **extra, GucSource source) result = false; } #endif -#if BLCKSZ < PG_IO_ALIGN_SIZE +#if cluster_block_size < PG_IO_ALIGN_SIZE if (result && (flags & IO_DIRECT_DATA)) { - GUC_check_errdetail("debug_io_direct is not supported for data because BLCKSZ is too small"); + GUC_check_errdetail("debug_io_direct is not supported for data because cluster_block_size is too small"); result = false; } #endif diff --git a/src/backend/storage/freespace/README b/src/backend/storage/freespace/README index e7ff23b76f..4fa6bacc83 100644 --- a/src/backend/storage/freespace/README +++ b/src/backend/storage/freespace/README @@ -14,8 +14,8 @@ It is important to keep the map small so that it can be searched rapidly. Therefore, we don't attempt to record the exact free space on a page. We allocate one map byte to each page, allowing us to record free space at a granularity of 1/256th of a page. Another way to say it is that -the stored value is the free space divided by BLCKSZ/256 (rounding down). -We assume that the free space must always be less than BLCKSZ, since +the stored value is the free space divided by cluster_block_size/256 (rounding down). +We assume that the free space must always be less than cluster_block_size, since all pages have some overhead; so the maximum map value is 255. To assist in fast searching, the map isn't simply an array of per-page @@ -97,7 +97,7 @@ has the same value as the corresponding leaf node on its parent page. The root page is always stored at physical block 0. For example, assuming each FSM page can hold information about 4 pages (in -reality, it holds (BLCKSZ - headers) / 2, or ~4000 with default BLCKSZ), +reality, it holds (cluster_block_size - headers) / 2, or ~4000 with default cluster_block_size), we get a disk layout like this: 0 <-- page 0 at level 2 (root page) @@ -136,7 +136,7 @@ and so forth. To keep things simple, the tree is always constant height. To cover the maximum relation size of 2^32-1 blocks, three levels is enough with the default -BLCKSZ (4000^3 > 2^32). +cluster_block_size (4000^3 > 2^32). Addressing ---------- diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index fb9440ff72..f17fe758d6 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -38,11 +38,11 @@ * divide the amount of free space a page can have into 256 different * categories. The highest category, 255, represents a page with at least * MaxFSMRequestSize bytes of free space, and the second highest category - * represents the range from 254 * FSM_CAT_STEP, inclusive, to + * represents the range from 254 * fsm_cat_step, inclusive, to * MaxFSMRequestSize, exclusive. * - * MaxFSMRequestSize depends on the architecture and BLCKSZ, but assuming - * default 8k BLCKSZ, and that MaxFSMRequestSize is 8164 bytes, the + * MaxFSMRequestSize depends on the architecture and cluster_block_size, but assuming + * default 8k cluster_block_size, and that MaxFSMRequestSize is 8164 bytes, the * categories look like this: * * @@ -62,14 +62,14 @@ * request of exactly MaxFSMRequestSize bytes. */ #define FSM_CATEGORIES 256 -#define FSM_CAT_STEP (BLCKSZ / FSM_CATEGORIES) +#define fsm_cat_step (cluster_block_size / FSM_CATEGORIES) #define MaxFSMRequestSize MaxHeapTupleSize /* * Depth of the on-disk tree. We need to be able to address 2^32-1 blocks, * and 1626 is the smallest number that satisfies X^3 >= 2^32-1. Likewise, * 256 is the smallest number that satisfies X^4 >= 2^32-1. In practice, - * this means that 4096 bytes is the smallest BLCKSZ that we can get away + * this means that 4096 bytes is the smallest cluster_block_size that we can get away * with a 3-level tree, and 512 is the smallest we support. */ #define FSM_TREE_DEPTH ((SlotsPerFSMPage >= 1626) ? 3 : 4) @@ -217,7 +217,7 @@ XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk, page = BufferGetPage(buf); if (PageIsNew(page)) - PageInit(page, BLCKSZ, 0); + PageInit(page, cluster_block_size, 0); if (fsm_set_avail(page, slot, new_cat)) MarkBufferDirtyHint(buf, false); @@ -370,12 +370,12 @@ fsm_space_avail_to_cat(Size avail) { int cat; - Assert(avail < BLCKSZ); + Assert(avail < cluster_block_size); if (avail >= MaxFSMRequestSize) return 255; - cat = avail / FSM_CAT_STEP; + cat = avail / fsm_cat_step; /* * The highest category, 255, is reserved for MaxFSMRequestSize bytes or @@ -398,7 +398,7 @@ fsm_space_cat_to_avail(uint8 cat) if (cat == 255) return MaxFSMRequestSize; else - return cat * FSM_CAT_STEP; + return cat * fsm_cat_step; } /* @@ -417,7 +417,7 @@ fsm_space_needed_to_cat(Size needed) if (needed == 0) return 1; - cat = (needed + FSM_CAT_STEP - 1) / FSM_CAT_STEP; + cat = (needed + fsm_cat_step - 1) / fsm_cat_step; if (cat > 255) cat = 255; @@ -598,7 +598,7 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend) { LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); if (PageIsNew(BufferGetPage(buf))) - PageInit(BufferGetPage(buf), BLCKSZ, 0); + PageInit(BufferGetPage(buf), cluster_block_size, 0); LockBuffer(buf, BUFFER_LOCK_UNLOCK); } return buf; diff --git a/src/backend/storage/freespace/indexfsm.c b/src/backend/storage/freespace/indexfsm.c index fff8f4fbfb..0047793d7c 100644 --- a/src/backend/storage/freespace/indexfsm.c +++ b/src/backend/storage/freespace/indexfsm.c @@ -16,7 +16,7 @@ * This is similar to the FSM used for heap, in freespace.c, but instead * of tracking the amount of free space on pages, we only track whether * pages are completely free or in-use. We use the same FSM implementation - * as for heaps, using BLCKSZ - 1 to denote used pages, and 0 for unused. + * as for heaps, using cluster_block_size - 1 to denote used pages, and 0 for unused. * *------------------------------------------------------------------------- */ @@ -37,7 +37,7 @@ BlockNumber GetFreeIndexPage(Relation rel) { - BlockNumber blkno = GetPageWithFreeSpace(rel, BLCKSZ / 2); + BlockNumber blkno = GetPageWithFreeSpace(rel, cluster_block_size / 2); if (blkno != InvalidBlockNumber) RecordUsedIndexPage(rel, blkno); @@ -51,7 +51,7 @@ GetFreeIndexPage(Relation rel) void RecordFreeIndexPage(Relation rel, BlockNumber freeBlock) { - RecordPageWithFreeSpace(rel, freeBlock, BLCKSZ - 1); + RecordPageWithFreeSpace(rel, freeBlock, cluster_block_size - 1); } diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c index 84e543e731..17b834d39b 100644 --- a/src/backend/storage/large_object/inv_api.c +++ b/src/backend/storage/large_object/inv_api.c @@ -185,7 +185,7 @@ getdatafield(Form_pg_largeobject tuple, freeit = true; } len = VARSIZE(datafield) - VARHDRSZ; - if (len < 0 || len > LOBLKSIZE) + if (len < 0 || len > cluster_loblksize) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("pg_largeobject entry for OID %u, page %d has invalid data field size %d", @@ -414,7 +414,7 @@ inv_getsize(LargeObjectDesc *obj_desc) elog(ERROR, "null field found in pg_largeobject"); data = (Form_pg_largeobject) GETSTRUCT(tuple); getdatafield(data, &datafield, &len, &pfreeit); - lastbyte = (uint64) data->pageno * LOBLKSIZE + len; + lastbyte = (uint64) data->pageno * cluster_loblksize + len; if (pfreeit) pfree(datafield); } @@ -493,7 +493,7 @@ inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes) int64 n; int64 off; int len; - int32 pageno = (int32) (obj_desc->offset / LOBLKSIZE); + int32 pageno = (int32) (obj_desc->offset / cluster_loblksize); uint64 pageoff; ScanKeyData skey[2]; SysScanDesc sd; @@ -541,7 +541,7 @@ inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes) * there may be missing pages if the LO contains unwritten "holes". We * want missing sections to read out as zeroes. */ - pageoff = ((uint64) data->pageno) * LOBLKSIZE; + pageoff = ((uint64) data->pageno) * cluster_loblksize; if (pageoff > obj_desc->offset) { n = pageoff - obj_desc->offset; @@ -555,7 +555,7 @@ inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes) { Assert(obj_desc->offset >= pageoff); off = (int) (obj_desc->offset - pageoff); - Assert(off >= 0 && off < LOBLKSIZE); + Assert(off >= 0 && off < cluster_loblksize); getdatafield(data, &datafield, &len, &pfreeit); if (len > off) @@ -586,7 +586,7 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes) int n; int off; int len; - int32 pageno = (int32) (obj_desc->offset / LOBLKSIZE); + int32 pageno = (int32) (obj_desc->offset / cluster_loblksize); ScanKeyData skey[2]; SysScanDesc sd; HeapTuple oldtuple; @@ -598,7 +598,7 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes) { bytea hdr; /* this is to make the union big enough for a LO data chunk: */ - char data[LOBLKSIZE + VARHDRSZ]; + char data[cluster_loblksize + VARHDRSZ]; /* ensure union is aligned well enough: */ int32 align_it; } workbuf; @@ -687,14 +687,14 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes) /* * Fill any hole */ - off = (int) (obj_desc->offset % LOBLKSIZE); + off = (int) (obj_desc->offset % cluster_loblksize); if (off > len) MemSet(workb + len, 0, off - len); /* * Insert appropriate portion of new data */ - n = LOBLKSIZE - off; + n = cluster_loblksize - off; n = (n <= (nbytes - nwritten)) ? n : (nbytes - nwritten); memcpy(workb + off, buf + nwritten, n); nwritten += n; @@ -732,14 +732,14 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes) * * First, fill any hole */ - off = (int) (obj_desc->offset % LOBLKSIZE); + off = (int) (obj_desc->offset % cluster_loblksize); if (off > 0) MemSet(workb, 0, off); /* * Insert appropriate portion of new data */ - n = LOBLKSIZE - off; + n = cluster_loblksize - off; n = (n <= (nbytes - nwritten)) ? n : (nbytes - nwritten); memcpy(workb + off, buf + nwritten, n); nwritten += n; @@ -779,7 +779,7 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes) void inv_truncate(LargeObjectDesc *obj_desc, int64 len) { - int32 pageno = (int32) (len / LOBLKSIZE); + int32 pageno = (int32) (len / cluster_loblksize); int32 off; ScanKeyData skey[2]; SysScanDesc sd; @@ -789,7 +789,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int64 len) { bytea hdr; /* this is to make the union big enough for a LO data chunk: */ - char data[LOBLKSIZE + VARHDRSZ]; + char data[cluster_loblksize + VARHDRSZ]; /* ensure union is aligned well enough: */ int32 align_it; } workbuf; @@ -872,7 +872,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int64 len) /* * Fill any hole */ - off = len % LOBLKSIZE; + off = len % cluster_loblksize; if (off > pagelen) MemSet(workb + pagelen, 0, off - pagelen); @@ -911,7 +911,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int64 len) * * Fill the hole up to the truncation point */ - off = len % LOBLKSIZE; + off = len % cluster_loblksize; if (off > 0) MemSet(workb, 0, off); diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index 1af41213b4..a16509089a 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -321,9 +321,9 @@ static SlruCtlData SerialSlruCtlData; #define SerialSlruCtl (&SerialSlruCtlData) -#define SERIAL_PAGESIZE BLCKSZ +#define serial_pagesize cluster_block_size #define SERIAL_ENTRYSIZE sizeof(SerCommitSeqNo) -#define SERIAL_ENTRIESPERPAGE (SERIAL_PAGESIZE / SERIAL_ENTRYSIZE) +#define SERIAL_ENTRIESPERPAGE (serial_pagesize / SERIAL_ENTRYSIZE) /* * Set maximum pages based on the number needed to track all transactions. diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index 9a302ddc30..07a664355f 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -45,7 +45,7 @@ PageInit(Page page, Size pageSize, Size specialSize) specialSize = MAXALIGN(specialSize); - Assert(pageSize == BLCKSZ); + Assert(pageSize == cluster_block_size); Assert(pageSize > specialSize + SizeOfPageHeaderData); /* Make sure all fields of page are zero, as well as unused space */ @@ -117,7 +117,7 @@ PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags) if ((p->pd_flags & ~PD_VALID_FLAG_BITS) == 0 && p->pd_lower <= p->pd_upper && p->pd_upper <= p->pd_special && - p->pd_special <= BLCKSZ && + p->pd_special <= cluster_block_size && p->pd_special == MAXALIGN(p->pd_special)) header_sane = true; @@ -128,7 +128,7 @@ PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags) /* Check all-zeroes case */ all_zeroes = true; pagebytes = (size_t *) page; - for (i = 0; i < (BLCKSZ / sizeof(size_t)); i++) + for (i = 0; i < (cluster_block_size / sizeof(size_t)); i++) { if (pagebytes[i] != 0) { @@ -211,7 +211,7 @@ PageAddItemExtended(Page page, if (phdr->pd_lower < SizeOfPageHeaderData || phdr->pd_lower > phdr->pd_upper || phdr->pd_upper > phdr->pd_special || - phdr->pd_special > BLCKSZ) + phdr->pd_special > cluster_block_size) ereport(PANIC, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u", @@ -723,7 +723,7 @@ PageRepairFragmentation(Page page) if (pd_lower < SizeOfPageHeaderData || pd_lower > pd_upper || pd_upper > pd_special || - pd_special > BLCKSZ || + pd_special > cluster_block_size || pd_special != MAXALIGN(pd_special)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), @@ -1066,7 +1066,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum) if (phdr->pd_lower < SizeOfPageHeaderData || phdr->pd_lower > phdr->pd_upper || phdr->pd_upper > phdr->pd_special || - phdr->pd_special > BLCKSZ || + phdr->pd_special > cluster_block_size || phdr->pd_special != MAXALIGN(phdr->pd_special)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), @@ -1201,7 +1201,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) if (pd_lower < SizeOfPageHeaderData || pd_lower > pd_upper || pd_upper > pd_special || - pd_special > BLCKSZ || + pd_special > cluster_block_size || pd_special != MAXALIGN(pd_special)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), @@ -1307,7 +1307,7 @@ PageIndexTupleDeleteNoCompact(Page page, OffsetNumber offnum) if (phdr->pd_lower < SizeOfPageHeaderData || phdr->pd_lower > phdr->pd_upper || phdr->pd_upper > phdr->pd_special || - phdr->pd_special > BLCKSZ || + phdr->pd_special > cluster_block_size || phdr->pd_special != MAXALIGN(phdr->pd_special)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), @@ -1419,7 +1419,7 @@ PageIndexTupleOverwrite(Page page, OffsetNumber offnum, if (phdr->pd_lower < SizeOfPageHeaderData || phdr->pd_lower > phdr->pd_upper || phdr->pd_upper > phdr->pd_special || - phdr->pd_special > BLCKSZ || + phdr->pd_special > cluster_block_size || phdr->pd_special != MAXALIGN(phdr->pd_special)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), @@ -1523,11 +1523,11 @@ PageSetChecksumCopy(Page page, BlockNumber blkno) */ if (pageCopy == NULL) pageCopy = MemoryContextAllocAligned(TopMemoryContext, - BLCKSZ, + cluster_block_size, PG_IO_ALIGN_SIZE, 0); - memcpy(pageCopy, (char *) page, BLCKSZ); + memcpy(pageCopy, (char *) page, cluster_block_size); ((PageHeader) pageCopy)->pd_checksum = pg_checksum_page(pageCopy, blkno); return pageCopy; } diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index fdecbad170..319e8d8655 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -467,7 +467,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, MdfdVec *v; /* If this build supports direct I/O, the buffer must be I/O aligned. */ - if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= BLCKSZ) + if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= cluster_block_size) Assert((uintptr_t) buffer == TYPEALIGN(PG_IO_ALIGN_SIZE, buffer)); /* This assert is too expensive to have on normally ... */ @@ -490,11 +490,11 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE); - seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE)); + seekpos = (off_t) cluster_block_size * (blocknum % ((BlockNumber) RELSEG_SIZE)); - Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE); + Assert(seekpos < (off_t) cluster_block_size * RELSEG_SIZE); - if ((nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_EXTEND)) != BLCKSZ) + if ((nbytes = FileWrite(v->mdfd_vfd, buffer, cluster_block_size, seekpos, WAIT_EVENT_DATA_FILE_EXTEND)) != cluster_block_size) { if (nbytes < 0) ereport(ERROR, @@ -507,7 +507,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, (errcode(ERRCODE_DISK_FULL), errmsg("could not extend file \"%s\": wrote only %d of %d bytes at block %u", FilePathName(v->mdfd_vfd), - nbytes, BLCKSZ, blocknum), + nbytes, cluster_block_size, blocknum), errhint("Check free disk space."))); } @@ -553,7 +553,7 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, while (remblocks > 0) { BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE); - off_t seekpos = (off_t) BLCKSZ * segstartblock; + off_t seekpos = (off_t) cluster_block_size * segstartblock; int numblocks; if (segstartblock + remblocks > RELSEG_SIZE) @@ -582,7 +582,7 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, int ret; ret = FileFallocate(v->mdfd_vfd, - seekpos, (off_t) BLCKSZ * numblocks, + seekpos, (off_t) cluster_block_size * numblocks, WAIT_EVENT_DATA_FILE_EXTEND); if (ret != 0) { @@ -605,7 +605,7 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, * whole length of the extension. */ ret = FileZero(v->mdfd_vfd, - seekpos, (off_t) BLCKSZ * numblocks, + seekpos, (off_t) cluster_block_size * numblocks, WAIT_EVENT_DATA_FILE_EXTEND); if (ret < 0) ereport(ERROR, @@ -726,11 +726,11 @@ mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum) if (v == NULL) return false; - seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE)); + seekpos = (off_t) cluster_block_size * (blocknum % ((BlockNumber) RELSEG_SIZE)); - Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE); + Assert(seekpos < (off_t) cluster_block_size * RELSEG_SIZE); - (void) FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ, WAIT_EVENT_DATA_FILE_PREFETCH); + (void) FilePrefetch(v->mdfd_vfd, seekpos, cluster_block_size, WAIT_EVENT_DATA_FILE_PREFETCH); #endif /* USE_PREFETCH */ return true; @@ -748,7 +748,7 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, MdfdVec *v; /* If this build supports direct I/O, the buffer must be I/O aligned. */ - if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= BLCKSZ) + if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= cluster_block_size) Assert((uintptr_t) buffer == TYPEALIGN(PG_IO_ALIGN_SIZE, buffer)); TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum, @@ -760,11 +760,11 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, v = _mdfd_getseg(reln, forknum, blocknum, false, EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY); - seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE)); + seekpos = (off_t) cluster_block_size * (blocknum % ((BlockNumber) RELSEG_SIZE)); - Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE); + Assert(seekpos < (off_t) cluster_block_size * RELSEG_SIZE); - nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_READ); + nbytes = FileRead(v->mdfd_vfd, buffer, cluster_block_size, seekpos, WAIT_EVENT_DATA_FILE_READ); TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum, reln->smgr_rlocator.locator.spcOid, @@ -772,9 +772,9 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, reln->smgr_rlocator.locator.relNumber, reln->smgr_rlocator.backend, nbytes, - BLCKSZ); + cluster_block_size); - if (nbytes != BLCKSZ) + if (nbytes != cluster_block_size) { if (nbytes < 0) ereport(ERROR, @@ -791,13 +791,13 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, * update a block that was later truncated away. */ if (zero_damaged_pages || InRecovery) - MemSet(buffer, 0, BLCKSZ); + MemSet(buffer, 0, cluster_block_size); else ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("could not read block %u in file \"%s\": read only %d of %d bytes", blocknum, FilePathName(v->mdfd_vfd), - nbytes, BLCKSZ))); + nbytes, cluster_block_size))); } } @@ -817,7 +817,7 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, MdfdVec *v; /* If this build supports direct I/O, the buffer must be I/O aligned. */ - if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= BLCKSZ) + if (PG_O_DIRECT != 0 && PG_IO_ALIGN_SIZE <= cluster_block_size) Assert((uintptr_t) buffer == TYPEALIGN(PG_IO_ALIGN_SIZE, buffer)); /* This assert is too expensive to have on normally ... */ @@ -834,11 +834,11 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY); - seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE)); + seekpos = (off_t) cluster_block_size * (blocknum % ((BlockNumber) RELSEG_SIZE)); - Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE); + Assert(seekpos < (off_t) cluster_block_size * RELSEG_SIZE); - nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_WRITE); + nbytes = FileWrite(v->mdfd_vfd, buffer, cluster_block_size, seekpos, WAIT_EVENT_DATA_FILE_WRITE); TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum, reln->smgr_rlocator.locator.spcOid, @@ -846,9 +846,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, reln->smgr_rlocator.locator.relNumber, reln->smgr_rlocator.backend, nbytes, - BLCKSZ); + cluster_block_size); - if (nbytes != BLCKSZ) + if (nbytes != cluster_block_size) { if (nbytes < 0) ereport(ERROR, @@ -861,7 +861,7 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, errmsg("could not write block %u in file \"%s\": wrote only %d of %d bytes", blocknum, FilePathName(v->mdfd_vfd), - nbytes, BLCKSZ), + nbytes, cluster_block_size), errhint("Check free disk space."))); } @@ -917,9 +917,9 @@ mdwriteback(SMgrRelation reln, ForkNumber forknum, Assert(nflush >= 1); Assert(nflush <= nblocks); - seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE)); + seekpos = (off_t) cluster_block_size * (blocknum % ((BlockNumber) RELSEG_SIZE)); - FileWriteback(v->mdfd_vfd, seekpos, (off_t) BLCKSZ * nflush, WAIT_EVENT_DATA_FILE_FLUSH); + FileWriteback(v->mdfd_vfd, seekpos, (off_t) cluster_block_size * nflush, WAIT_EVENT_DATA_FILE_FLUSH); nblocks -= nflush; blocknum += nflush; @@ -1061,7 +1061,7 @@ mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) */ BlockNumber lastsegblocks = nblocks - priorblocks; - if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * BLCKSZ, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0) + if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * cluster_block_size, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not truncate file \"%s\" to %u blocks: %m", @@ -1458,7 +1458,7 @@ _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno, */ if (nblocks < ((BlockNumber) RELSEG_SIZE)) { - char *zerobuf = palloc_aligned(BLCKSZ, PG_IO_ALIGN_SIZE, + char *zerobuf = palloc_aligned(cluster_block_size, PG_IO_ALIGN_SIZE, MCXT_ALLOC_ZERO); mdextend(reln, forknum, @@ -1529,7 +1529,7 @@ _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg) errmsg("could not seek to end of file \"%s\": %m", FilePathName(seg->mdfd_vfd)))); /* note that this calculation will ignore any partial block at EOF */ - return (BlockNumber) (len / BLCKSZ); + return (BlockNumber) (len / cluster_block_size); } /* diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 2b9742ad21..1bb1ae53b1 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -1405,12 +1405,12 @@ pg_stat_get_io(PG_FUNCTION_ARGS) values[IO_COL_RESET_TIME] = TimestampTzGetDatum(reset_time); /* - * Hard-code this to the value of BLCKSZ for now. Future + * Hard-code this to the value of cluster_block_size for now. Future * values could include XLOG_BLCKSZ, once WAL IO is tracked, * and constant multipliers, once non-block-oriented IO (e.g. * temporary file IO) is tracked. */ - values[IO_COL_CONVERSION] = Int64GetDatum(BLCKSZ); + values[IO_COL_CONVERSION] = Int64GetDatum(cluster_block_size); for (int io_op = 0; io_op < IOOP_NUM_TYPES; io_op++) { diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index c4fcd0076e..83577060f8 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -7760,7 +7760,7 @@ gincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, * around 3 bytes per item is fairly typical. */ dataPagesFetchedBySel = ceil(*indexSelectivity * - (numTuples / (BLCKSZ / 3))); + (numTuples / (cluster_block_size / 3))); if (dataPagesFetchedBySel > dataPagesFetched) dataPagesFetched = dataPagesFetchedBySel; diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index 1e671c560c..37d91cb902 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -1501,8 +1501,8 @@ AddToDataDirLockFile(int target_line, const char *str) int lineno; char *srcptr; char *destptr; - char srcbuffer[BLCKSZ]; - char destbuffer[BLCKSZ]; + char srcbuffer[cluster_block_size]; + char destbuffer[cluster_block_size]; fd = open(DIRECTORY_LOCK_FILE, O_RDWR | PG_BINARY, 0); if (fd < 0) @@ -1626,7 +1626,7 @@ RecheckDataDirLockFile(void) int fd; int len; long file_pid; - char buffer[BLCKSZ]; + char buffer[cluster_block_size]; fd = open(DIRECTORY_LOCK_FILE, O_RDWR | PG_BINARY, 0); if (fd < 0) diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 84e7ad4d90..1ec0edd6ac 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -105,8 +105,8 @@ typedef struct } unit_conversion; /* Ensure that the constants in the tables don't overflow or underflow */ -#if BLCKSZ < 1024 || BLCKSZ > (1024*1024) -#error BLCKSZ must be between 1KB and 1MB +#if cluster_block_size < 1024 || cluster_block_size > (1024*1024) +#error cluster_block_size must be between 1KB and 1MB #endif #if XLOG_BLCKSZ < 1024 || XLOG_BLCKSZ > (1024*1024) #error XLOG_BLCKSZ must be between 1KB and 1MB @@ -134,11 +134,11 @@ static const unit_conversion memory_unit_conversion_table[] = {"kB", GUC_UNIT_MB, 1.0 / 1024.0}, {"B", GUC_UNIT_MB, 1.0 / (1024.0 * 1024.0)}, - {"TB", GUC_UNIT_BLOCKS, (1024.0 * 1024.0 * 1024.0) / (BLCKSZ / 1024)}, - {"GB", GUC_UNIT_BLOCKS, (1024.0 * 1024.0) / (BLCKSZ / 1024)}, - {"MB", GUC_UNIT_BLOCKS, 1024.0 / (BLCKSZ / 1024)}, - {"kB", GUC_UNIT_BLOCKS, 1.0 / (BLCKSZ / 1024)}, - {"B", GUC_UNIT_BLOCKS, 1.0 / BLCKSZ}, + {"TB", GUC_UNIT_BLOCKS, (1024.0 * 1024.0 * 1024.0) / (cluster_block_size / 1024)}, + {"GB", GUC_UNIT_BLOCKS, (1024.0 * 1024.0) / (cluster_block_size / 1024)}, + {"MB", GUC_UNIT_BLOCKS, 1024.0 / (cluster_block_size / 1024)}, + {"kB", GUC_UNIT_BLOCKS, 1.0 / (cluster_block_size / 1024)}, + {"B", GUC_UNIT_BLOCKS, 1.0 / cluster_block_size}, {"TB", GUC_UNIT_XBLOCKS, (1024.0 * 1024.0 * 1024.0) / (XLOG_BLCKSZ / 1024)}, {"GB", GUC_UNIT_XBLOCKS, (1024.0 * 1024.0) / (XLOG_BLCKSZ / 1024)}, @@ -2782,7 +2782,7 @@ get_config_unit_name(int flags) /* initialize if first time through */ if (bbuf[0] == '\0') - snprintf(bbuf, sizeof(bbuf), "%dkB", BLCKSZ / 1024); + snprintf(bbuf, sizeof(bbuf), "%dkB", cluster_block_size / 1024); return bbuf; } case GUC_UNIT_XBLOCKS: diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index e565a3092f..a465f7f9d1 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -3118,7 +3118,7 @@ struct config_int ConfigureNamesInt[] = GUC_NOT_IN_SAMPLE | GUC_DISALLOW_IN_FILE }, &block_size, - BLCKSZ, BLCKSZ, BLCKSZ, + cluster_block_size, cluster_block_size, cluster_block_size, NULL, NULL, NULL }, @@ -3368,7 +3368,7 @@ struct config_int ConfigureNamesInt[] = GUC_UNIT_BLOCKS | GUC_EXPLAIN, }, &min_parallel_table_scan_size, - (8 * 1024 * 1024) / BLCKSZ, 0, INT_MAX / 3, + (8 * 1024 * 1024) / cluster_block_size, 0, INT_MAX / 3, NULL, NULL, NULL }, @@ -3379,7 +3379,7 @@ struct config_int ConfigureNamesInt[] = GUC_UNIT_BLOCKS | GUC_EXPLAIN, }, &min_parallel_index_scan_size, - (512 * 1024) / BLCKSZ, 0, INT_MAX / 3, + (512 * 1024) / cluster_block_size, 0, INT_MAX / 3, NULL, NULL, NULL }, diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c index 52b8898d5e..f46d2bac60 100644 --- a/src/backend/utils/sort/logtape.c +++ b/src/backend/utils/sort/logtape.c @@ -27,7 +27,7 @@ * larger size than the underlying OS may support. * * For simplicity, we allocate and release space in the underlying file - * in BLCKSZ-size blocks. Space allocation boils down to keeping track + * in cluster_block_size-size blocks. Space allocation boils down to keeping track * of which blocks in the underlying file belong to which logical tape, * plus any blocks that are free (recycled and not yet reused). * The blocks in each logical tape form a chain, with a prev- and next- @@ -86,7 +86,7 @@ #include "utils/memutils.h" /* - * A TapeBlockTrailer is stored at the end of each BLCKSZ block. + * A TapeBlockTrailer is stored at the end of each cluster_block_size block. * * The first block of a tape has prev == -1. The last block of a tape * stores the number of valid bytes on the block, inverted, in 'next' @@ -100,7 +100,7 @@ typedef struct TapeBlockTrailer * bytes on last block (if < 0) */ } TapeBlockTrailer; -#define TapeBlockPayloadSize (BLCKSZ - sizeof(TapeBlockTrailer)) +#define TapeBlockPayloadSize (cluster_block_size - sizeof(TapeBlockTrailer)) #define TapeBlockGetTrailer(buf) \ ((TapeBlockTrailer *) ((char *) buf + TapeBlockPayloadSize)) @@ -192,7 +192,7 @@ struct LogicalTapeSet /* * File size tracking. nBlocksWritten is the size of the underlying file, - * in BLCKSZ blocks. nBlocksAllocated is the number of blocks allocated + * in cluster_block_size blocks. nBlocksAllocated is the number of blocks allocated * by ltsReleaseBlock(), and it is always greater than or equal to * nBlocksWritten. Blocks between nBlocksAllocated and nBlocksWritten are * blocks that have been allocated for a tape, but have not been written @@ -265,7 +265,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, const void *buffer) (errcode_for_file_access(), errmsg("could not seek to block %ld of temporary file", blocknum))); - BufFileWrite(lts->pfile, buffer, BLCKSZ); + BufFileWrite(lts->pfile, buffer, cluster_block_size); /* Update nBlocksWritten, if we extended the file */ if (blocknum == lts->nBlocksWritten) @@ -286,7 +286,7 @@ ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer) (errcode_for_file_access(), errmsg("could not seek to block %ld of temporary file", blocknum))); - BufFileReadExact(lts->pfile, buffer, BLCKSZ); + BufFileReadExact(lts->pfile, buffer, cluster_block_size); } /* @@ -328,7 +328,7 @@ ltsReadFillBuffer(LogicalTape *lt) lt->nextBlockNumber = TapeBlockGetTrailer(thisbuf)->next; /* Advance to next block, if we have buffer space left */ - } while (lt->buffer_size - lt->nbytes > BLCKSZ); + } while (lt->buffer_size - lt->nbytes > cluster_block_size); return (lt->nbytes > 0); } @@ -640,7 +640,7 @@ LogicalTapeImport(LogicalTapeSet *lts, int worker, TapeShare *shared) } /* Don't allocate more for read buffer than could possibly help */ lt->max_size = Min(MaxAllocSize, filesize); - tapeblocks = filesize / BLCKSZ; + tapeblocks = filesize / cluster_block_size; /* * Update # of allocated blocks and # blocks written to reflect the @@ -769,8 +769,8 @@ LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size) /* Allocate data buffer and first block on first write */ if (lt->buffer == NULL) { - lt->buffer = (char *) palloc(BLCKSZ); - lt->buffer_size = BLCKSZ; + lt->buffer = (char *) palloc(cluster_block_size); + lt->buffer_size = cluster_block_size; } if (lt->curBlockNumber == -1) { @@ -783,7 +783,7 @@ LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size) TapeBlockGetTrailer(lt->buffer)->prev = -1L; } - Assert(lt->buffer_size == BLCKSZ); + Assert(lt->buffer_size == cluster_block_size); while (size > 0) { if (lt->pos >= (int) TapeBlockPayloadSize) @@ -837,9 +837,9 @@ LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size) * * 'buffer_size' specifies how much memory to use for the read buffer. * Regardless of the argument, the actual amount of memory used is between - * BLCKSZ and MaxAllocSize, and is a multiple of BLCKSZ. The given value is + * cluster_block_size and MaxAllocSize, and is a multiple of cluster_block_size. The given value is * rounded down and truncated to fit those constraints, if necessary. If the - * tape is frozen, the 'buffer_size' argument is ignored, and a small BLCKSZ + * tape is frozen, the 'buffer_size' argument is ignored, and a small cluster_block_size * byte buffer is used. */ void @@ -851,19 +851,19 @@ LogicalTapeRewindForRead(LogicalTape *lt, size_t buffer_size) * Round and cap buffer_size if needed. */ if (lt->frozen) - buffer_size = BLCKSZ; + buffer_size = cluster_block_size; else { /* need at least one block */ - if (buffer_size < BLCKSZ) - buffer_size = BLCKSZ; + if (buffer_size < cluster_block_size) + buffer_size = cluster_block_size; /* palloc() larger than max_size is unlikely to be helpful */ if (buffer_size > lt->max_size) buffer_size = lt->max_size; - /* round down to BLCKSZ boundary */ - buffer_size -= buffer_size % BLCKSZ; + /* round down to cluster_block_size boundary */ + buffer_size -= buffer_size % cluster_block_size; } if (lt->writing) @@ -1015,12 +1015,12 @@ LogicalTapeFreeze(LogicalTape *lt, TapeShare *share) * we're reading from multiple tapes. But at the end of a sort, when a * tape is frozen, we only read from a single tape anyway. */ - if (!lt->buffer || lt->buffer_size != BLCKSZ) + if (!lt->buffer || lt->buffer_size != cluster_block_size) { if (lt->buffer) pfree(lt->buffer); - lt->buffer = palloc(BLCKSZ); - lt->buffer_size = BLCKSZ; + lt->buffer = palloc(cluster_block_size); + lt->buffer_size = cluster_block_size; } /* Read the first block, or reset if tape is empty */ @@ -1064,7 +1064,7 @@ LogicalTapeBackspace(LogicalTape *lt, size_t size) size_t seekpos = 0; Assert(lt->frozen); - Assert(lt->buffer_size == BLCKSZ); + Assert(lt->buffer_size == cluster_block_size); if (lt->buffer == NULL) ltsInitReadBuffer(lt); @@ -1134,7 +1134,7 @@ LogicalTapeSeek(LogicalTape *lt, long blocknum, int offset) { Assert(lt->frozen); Assert(offset >= 0 && offset <= TapeBlockPayloadSize); - Assert(lt->buffer_size == BLCKSZ); + Assert(lt->buffer_size == cluster_block_size); if (lt->buffer == NULL) ltsInitReadBuffer(lt); @@ -1167,7 +1167,7 @@ LogicalTapeTell(LogicalTape *lt, long *blocknum, int *offset) Assert(lt->offsetBlockNumber == 0L); /* With a larger buffer, 'pos' wouldn't be the same as offset within page */ - Assert(lt->buffer_size == BLCKSZ); + Assert(lt->buffer_size == cluster_block_size); *blocknum = lt->curBlockNumber; *offset = lt->pos; diff --git a/src/backend/utils/sort/sharedtuplestore.c b/src/backend/utils/sort/sharedtuplestore.c index 236be65f22..b34bed7466 100644 --- a/src/backend/utils/sort/sharedtuplestore.c +++ b/src/backend/utils/sort/sharedtuplestore.c @@ -37,7 +37,7 @@ */ #define STS_CHUNK_PAGES 4 #define STS_CHUNK_HEADER_SIZE offsetof(SharedTuplestoreChunk, data) -#define STS_CHUNK_DATA_SIZE (STS_CHUNK_PAGES * BLCKSZ - STS_CHUNK_HEADER_SIZE) +#define STS_CHUNK_DATA_SIZE (STS_CHUNK_PAGES * cluster_block_size - STS_CHUNK_HEADER_SIZE) /* Chunk written to disk. */ typedef struct SharedTuplestoreChunk @@ -198,7 +198,7 @@ sts_flush_chunk(SharedTuplestoreAccessor *accessor) { size_t size; - size = STS_CHUNK_PAGES * BLCKSZ; + size = STS_CHUNK_PAGES * cluster_block_size; BufFileWrite(accessor->write_file, accessor->write_chunk, size); memset(accessor->write_chunk, 0, size); accessor->write_pointer = &accessor->write_chunk->data[0]; @@ -332,11 +332,11 @@ sts_puttuple(SharedTuplestoreAccessor *accessor, void *meta_data, /* First time through. Allocate chunk. */ accessor->write_chunk = (SharedTuplestoreChunk *) MemoryContextAllocZero(accessor->context, - STS_CHUNK_PAGES * BLCKSZ); + STS_CHUNK_PAGES * cluster_block_size); accessor->write_chunk->ntuples = 0; accessor->write_pointer = &accessor->write_chunk->data[0]; accessor->write_end = (char *) - accessor->write_chunk + STS_CHUNK_PAGES * BLCKSZ; + accessor->write_chunk + STS_CHUNK_PAGES * cluster_block_size; } else { @@ -445,7 +445,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data) } remaining_size = size - sizeof(uint32); this_chunk_size = Min(remaining_size, - BLCKSZ * STS_CHUNK_PAGES - accessor->read_bytes); + cluster_block_size * STS_CHUNK_PAGES - accessor->read_bytes); destination = accessor->read_buffer + sizeof(uint32); BufFileReadExact(accessor->read_file, destination, this_chunk_size); accessor->read_bytes += this_chunk_size; @@ -468,7 +468,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data) errdetail_internal("Expected overflow chunk."))); accessor->read_next_page += STS_CHUNK_PAGES; this_chunk_size = Min(remaining_size, - BLCKSZ * STS_CHUNK_PAGES - + cluster_block_size * STS_CHUNK_PAGES - STS_CHUNK_HEADER_SIZE); BufFileReadExact(accessor->read_file, destination, this_chunk_size); accessor->read_bytes += this_chunk_size; diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index c7a6c03f97..f6f98bb273 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -179,8 +179,8 @@ typedef enum */ #define MINORDER 6 /* minimum merge order */ #define MAXORDER 500 /* maximum merge order */ -#define TAPE_BUFFER_OVERHEAD BLCKSZ -#define MERGE_BUFFER_SIZE (BLCKSZ * 32) +#define TAPE_BUFFER_OVERHEAD cluster_block_size +#define MERGE_BUFFER_SIZE (cluster_block_size * 32) /* @@ -1000,7 +1000,7 @@ tuplesort_updatemax(Tuplesortstate *state) if (state->tapeset) { isSpaceDisk = true; - spaceUsed = LogicalTapeSetBlocks(state->tapeset) * BLCKSZ; + spaceUsed = LogicalTapeSetBlocks(state->tapeset) * cluster_block_size; } else { diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 905b979947..84b0cc7954 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -1108,8 +1108,8 @@ test_config_settings(void) for (i = 0; i < bufslen; i++) { - /* Use same amount of memory, independent of BLCKSZ */ - test_buffs = (trial_bufs[i] * 8192) / BLCKSZ; + /* Use same amount of memory, independent of cluster_block_size */ + test_buffs = (trial_bufs[i] * 8192) / cluster_block_size; if (test_buffs <= ok_buffers) { test_buffs = ok_buffers; @@ -1121,10 +1121,10 @@ test_config_settings(void) } n_buffers = test_buffs; - if ((n_buffers * (BLCKSZ / 1024)) % 1024 == 0) - printf("%dMB\n", (n_buffers * (BLCKSZ / 1024)) / 1024); + if ((n_buffers * (cluster_block_size / 1024)) % 1024 == 0) + printf("%dMB\n", (n_buffers * (cluster_block_size / 1024)) / 1024); else - printf("%dkB\n", n_buffers * (BLCKSZ / 1024)); + printf("%dkB\n", n_buffers * (cluster_block_size / 1024)); printf(_("selecting default time zone ... ")); fflush(stdout); @@ -1216,12 +1216,12 @@ setup_config(void) conflines = replace_guc_value(conflines, "max_connections", repltok, false); - if ((n_buffers * (BLCKSZ / 1024)) % 1024 == 0) + if ((n_buffers * (cluster_block_size / 1024)) % 1024 == 0) snprintf(repltok, sizeof(repltok), "%dMB", - (n_buffers * (BLCKSZ / 1024)) / 1024); + (n_buffers * (cluster_block_size / 1024)) / 1024); else snprintf(repltok, sizeof(repltok), "%dkB", - n_buffers * (BLCKSZ / 1024)); + n_buffers * (cluster_block_size / 1024)); conflines = replace_guc_value(conflines, "shared_buffers", repltok, false); @@ -1296,21 +1296,21 @@ setup_config(void) #if DEFAULT_BACKEND_FLUSH_AFTER > 0 snprintf(repltok, sizeof(repltok), "%dkB", - DEFAULT_BACKEND_FLUSH_AFTER * (BLCKSZ / 1024)); + DEFAULT_BACKEND_FLUSH_AFTER * (cluster_block_size / 1024)); conflines = replace_guc_value(conflines, "backend_flush_after", repltok, true); #endif #if DEFAULT_BGWRITER_FLUSH_AFTER > 0 snprintf(repltok, sizeof(repltok), "%dkB", - DEFAULT_BGWRITER_FLUSH_AFTER * (BLCKSZ / 1024)); + DEFAULT_BGWRITER_FLUSH_AFTER * (cluster_block_size / 1024)); conflines = replace_guc_value(conflines, "bgwriter_flush_after", repltok, true); #endif #if DEFAULT_CHECKPOINT_FLUSH_AFTER > 0 snprintf(repltok, sizeof(repltok), "%dkB", - DEFAULT_CHECKPOINT_FLUSH_AFTER * (BLCKSZ / 1024)); + DEFAULT_CHECKPOINT_FLUSH_AFTER * (cluster_block_size / 1024)); conflines = replace_guc_value(conflines, "checkpoint_flush_after", repltok, true); #endif diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c index 19eb67e485..0323bc4937 100644 --- a/src/bin/pg_checksums/pg_checksums.c +++ b/src/bin/pg_checksums/pg_checksums.c @@ -204,18 +204,18 @@ scan_file(const char *fn, int segmentno) for (blockno = 0;; blockno++) { uint16 csum; - int r = read(f, buf.data, BLCKSZ); + int r = read(f, buf.data, cluster_block_size); if (r == 0) break; - if (r != BLCKSZ) + if (r != cluster_block_size) { if (r < 0) pg_fatal("could not read block %u in file \"%s\": %m", blockno, fn); else pg_fatal("could not read block %u in file \"%s\": read %d of %d", - blockno, fn, r, BLCKSZ); + blockno, fn, r, cluster_block_size); } blocks_scanned++; @@ -259,19 +259,19 @@ scan_file(const char *fn, int segmentno) header->pd_checksum = csum; /* Seek back to beginning of block */ - if (lseek(f, -BLCKSZ, SEEK_CUR) < 0) + if (lseek(f, -cluster_block_size, SEEK_CUR) < 0) pg_fatal("seek failed for block %u in file \"%s\": %m", blockno, fn); /* Write block with checksum */ - w = write(f, buf.data, BLCKSZ); - if (w != BLCKSZ) + w = write(f, buf.data, cluster_block_size); + if (w != cluster_block_size) { if (w < 0) pg_fatal("could not write block %u in file \"%s\": %m", blockno, fn); else pg_fatal("could not write block %u in file \"%s\": wrote %d of %d", - blockno, fn, w, BLCKSZ); + blockno, fn, w, cluster_block_size); } } @@ -551,11 +551,11 @@ main(int argc, char *argv[]) if (ControlFile->pg_control_version != PG_CONTROL_VERSION) pg_fatal("cluster is not compatible with this version of pg_checksums"); - if (ControlFile->blcksz != BLCKSZ) + if (ControlFile->blcksz != cluster_block_size) { pg_log_error("database cluster is not compatible"); pg_log_error_detail("The database cluster was initialized with block size %u, but pg_checksums was compiled with block size %u.", - ControlFile->blcksz, BLCKSZ); + ControlFile->blcksz, cluster_block_size); exit(1); } diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c index 25ecdaaa15..84c65069aa 100644 --- a/src/bin/pg_resetwal/pg_resetwal.c +++ b/src/bin/pg_resetwal/pg_resetwal.c @@ -686,14 +686,14 @@ GuessControlValues(void) ControlFile.maxAlign = MAXIMUM_ALIGNOF; ControlFile.floatFormat = FLOATFORMAT_VALUE; - ControlFile.blcksz = BLCKSZ; + ControlFile.blcksz = cluster_block_size; ControlFile.relseg_size = RELSEG_SIZE; ControlFile.xlog_blcksz = XLOG_BLCKSZ; ControlFile.xlog_seg_size = DEFAULT_XLOG_SEG_SIZE; ControlFile.nameDataLen = NAMEDATALEN; ControlFile.indexMaxKeys = INDEX_MAX_KEYS; - ControlFile.toast_max_chunk_size = TOAST_MAX_CHUNK_SIZE; - ControlFile.loblksize = LOBLKSIZE; + ControlFile.toast_max_chunk_size = cluster_toast_max_chunk_size; + ControlFile.loblksize = cluster_loblksize; ControlFile.float8ByVal = FLOAT8PASSBYVAL; /* diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c index bd5c598e20..a28de44170 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c @@ -333,7 +333,7 @@ process_target_wal_block_change(ForkNumber forknum, RelFileLocator rlocator, { off_t end_offset; - end_offset = (blkno_inseg + 1) * BLCKSZ; + end_offset = (blkno_inseg + 1) * cluster_block_size; if (end_offset <= entry->source_size && end_offset <= entry->target_size) datapagemap_add(&entry->target_pages_to_overwrite, blkno_inseg); } @@ -468,7 +468,7 @@ calculate_totals(filemap_t *filemap) iter = datapagemap_iterate(&entry->target_pages_to_overwrite); while (datapagemap_next(iter, &blk)) - filemap->fetch_size += BLCKSZ; + filemap->fetch_size += cluster_block_size; pg_free(iter); } diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c index 7f69f02441..d77dc890c9 100644 --- a/src/bin/pg_rewind/pg_rewind.c +++ b/src/bin/pg_rewind/pg_rewind.c @@ -571,8 +571,8 @@ perform_rewind(filemap_t *filemap, rewind_source *source, iter = datapagemap_iterate(&entry->target_pages_to_overwrite); while (datapagemap_next(iter, &blkno)) { - offset = blkno * BLCKSZ; - source->queue_fetch_range(source, entry->path, offset, BLCKSZ); + offset = blkno * cluster_block_size; + source->queue_fetch_range(source, entry->path, offset, cluster_block_size); } pg_free(iter); } diff --git a/src/bin/pg_upgrade/file.c b/src/bin/pg_upgrade/file.c index d173602882..3143cf28aa 100644 --- a/src/bin/pg_upgrade/file.c +++ b/src/bin/pg_upgrade/file.c @@ -96,7 +96,7 @@ copyFile(const char *src, const char *dst, schemaName, relName, dst, strerror(errno)); /* copy in fairly large chunks for best efficiency */ -#define COPY_BUF_SIZE (50 * BLCKSZ) +#define COPY_BUF_SIZE (50 * cluster_block_size) buffer = (char *) pg_malloc(COPY_BUF_SIZE); @@ -187,7 +187,7 @@ rewriteVisibilityMap(const char *fromfile, const char *tofile, struct stat statbuf; /* Compute number of old-format bytes per new page */ - rewriteVmBytesPerPage = (BLCKSZ - SizeOfPageHeaderData) / 2; + rewriteVmBytesPerPage = (cluster_block_size - SizeOfPageHeaderData) / 2; if ((src_fd = open(fromfile, O_RDONLY | PG_BINARY, 0)) < 0) pg_fatal("error while copying relation \"%s.%s\": could not open file \"%s\": %s", @@ -220,7 +220,7 @@ rewriteVisibilityMap(const char *fromfile, const char *tofile, PageHeaderData pageheader; bool old_lastblk; - if ((bytesRead = read(src_fd, buffer.data, BLCKSZ)) != BLCKSZ) + if ((bytesRead = read(src_fd, buffer.data, cluster_block_size)) != cluster_block_size) { if (bytesRead < 0) pg_fatal("error while copying relation \"%s.%s\": could not read file \"%s\": %s", @@ -230,7 +230,7 @@ rewriteVisibilityMap(const char *fromfile, const char *tofile, schemaName, relName, fromfile); } - totalBytesRead += BLCKSZ; + totalBytesRead += cluster_block_size; old_lastblk = (totalBytesRead == src_filesize); /* Save the page header data */ @@ -296,7 +296,7 @@ rewriteVisibilityMap(const char *fromfile, const char *tofile, pg_checksum_page(new_vmbuf.data, new_blkno); errno = 0; - if (write(dst_fd, new_vmbuf.data, BLCKSZ) != BLCKSZ) + if (write(dst_fd, new_vmbuf.data, cluster_block_size) != cluster_block_size) { /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c index b9acfed3b7..b3ad58ec66 100644 --- a/src/bin/pg_waldump/pg_waldump.c +++ b/src/bin/pg_waldump/pg_waldump.c @@ -531,7 +531,7 @@ XLogRecordSaveFPWs(XLogReaderState *record, const char *savepath) if (!file) pg_fatal("could not open file \"%s\": %m", filename); - if (fwrite(page, BLCKSZ, 1, file) != 1) + if (fwrite(page, cluster_block_size, 1, file) != 1) pg_fatal("could not write file \"%s\": %m", filename); if (fclose(file) != 0) diff --git a/src/common/file_utils.c b/src/common/file_utils.c index 74833c4acb..ea74aa1eb5 100644 --- a/src/common/file_utils.c +++ b/src/common/file_utils.c @@ -540,7 +540,7 @@ pg_pwritev_with_retry(int fd, const struct iovec *iov, int iovcnt, off_t offset) ssize_t pg_pwrite_zeros(int fd, size_t size, off_t offset) { - static const PGIOAlignedBlock zbuffer = {{0}}; /* worth BLCKSZ */ + static const PGIOAlignedBlock zbuffer = {{0}}; /* worth cluster_block_size */ void *zerobuf_addr = unconstify(PGIOAlignedBlock *, &zbuffer)->data; struct iovec iov[PG_IOV_MAX]; size_t remaining_size = size; @@ -558,10 +558,10 @@ pg_pwrite_zeros(int fd, size_t size, off_t offset) iov[iovcnt].iov_base = zerobuf_addr; - if (remaining_size < BLCKSZ) + if (remaining_size < cluster_block_size) this_iov_size = remaining_size; else - this_iov_size = BLCKSZ; + this_iov_size = cluster_block_size; iov[iovcnt].iov_len = this_iov_size; remaining_size -= this_iov_size; diff --git a/src/include/access/brin_page.h b/src/include/access/brin_page.h index 3670ca6010..a4881cb971 100644 --- a/src/include/access/brin_page.h +++ b/src/include/access/brin_page.h @@ -85,12 +85,12 @@ typedef struct RevmapContents ItemPointerData rm_tids[1]; } RevmapContents; -#define REVMAP_CONTENT_SIZE \ - (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \ +#define revmap_content_size \ + (cluster_block_size - MAXALIGN(SizeOfPageHeaderData) - \ offsetof(RevmapContents, rm_tids) - \ MAXALIGN(sizeof(BrinSpecialSpace))) /* max num of items in the array */ #define REVMAP_PAGE_MAXITEMS \ - (REVMAP_CONTENT_SIZE / sizeof(ItemPointerData)) + (revmap_content_size / sizeof(ItemPointerData)) #endif /* BRIN_PAGE_H */ diff --git a/src/include/access/ginblock.h b/src/include/access/ginblock.h index c59790ec5a..191471cb1c 100644 --- a/src/include/access/ginblock.h +++ b/src/include/access/ginblock.h @@ -248,7 +248,7 @@ typedef signed char GinNullCategory; */ #define GinMaxItemSize \ Min(INDEX_SIZE_MASK, \ - MAXALIGN_DOWN(((BLCKSZ - \ + MAXALIGN_DOWN(((cluster_block_size - \ MAXALIGN(SizeOfPageHeaderData + 3 * sizeof(ItemIdData)) - \ MAXALIGN(sizeof(GinPageOpaqueData))) / 3))) @@ -318,7 +318,7 @@ typedef signed char GinNullCategory; GinPageGetOpaque(page)->maxoff * sizeof(PostingItem)) #define GinDataPageMaxDataSize \ - (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \ + (cluster_block_size - MAXALIGN(SizeOfPageHeaderData) \ - MAXALIGN(sizeof(ItemPointerData)) \ - MAXALIGN(sizeof(GinPageOpaqueData))) @@ -326,7 +326,7 @@ typedef signed char GinNullCategory; * List pages */ #define GinListPageSize \ - ( BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(GinPageOpaqueData)) ) + ( cluster_block_size - SizeOfPageHeaderData - MAXALIGN(sizeof(GinPageOpaqueData)) ) /* * A compressed posting list. diff --git a/src/include/access/gist.h b/src/include/access/gist.h index 0235716c06..5fff12eb65 100644 --- a/src/include/access/gist.h +++ b/src/include/access/gist.h @@ -96,7 +96,7 @@ typedef GISTPageOpaqueData *GISTPageOpaque; * key size using opclass parameters. */ #define GISTMaxIndexTupleSize \ - MAXALIGN_DOWN((BLCKSZ - SizeOfPageHeaderData - sizeof(GISTPageOpaqueData)) / \ + MAXALIGN_DOWN((cluster_block_size - SizeOfPageHeaderData - sizeof(GISTPageOpaqueData)) / \ 4 - sizeof(ItemIdData)) #define GISTMaxIndexKeySize \ diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index 3edc740a3f..4c1dd74db7 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -54,7 +54,7 @@ typedef struct /* Returns free space in node buffer page */ #define PAGE_FREE_SPACE(nbp) (nbp->freespace) /* Checks if node buffer page is empty */ -#define PAGE_IS_EMPTY(nbp) (nbp->freespace == BLCKSZ - BUFFER_PAGE_DATA_OFFSET) +#define PAGE_IS_EMPTY(nbp) (nbp->freespace == cluster_block_size - BUFFER_PAGE_DATA_OFFSET) /* Checks if node buffers page don't contain sufficient space for index tuple */ #define PAGE_NO_SPACE(nbp, itup) (PAGE_FREE_SPACE(nbp) < \ MAXALIGN(IndexTupleSize(itup))) @@ -171,7 +171,7 @@ typedef struct GISTScanOpaqueData GistNSN curPageLSN; /* pos in the WAL stream when page was read */ /* In a non-ordered search, returnable heap items are stored here: */ - GISTSearchHeapItem pageData[BLCKSZ / sizeof(IndexTupleData)]; + GISTSearchHeapItem pageData[cluster_block_size / sizeof(IndexTupleData)]; OffsetNumber nPageData; /* number of valid items in array */ OffsetNumber curPageData; /* next item to return */ MemoryContext pageDataCxt; /* context holding the fetched tuples, for @@ -474,7 +474,7 @@ extern void gistadjustmembers(Oid opfamilyoid, /* gistutil.c */ #define GiSTPageSize \ - ( BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(GISTPageOpaqueData)) ) + ( cluster_block_size - SizeOfPageHeaderData - MAXALIGN(sizeof(GISTPageOpaqueData)) ) #define GIST_MIN_FILLFACTOR 10 #define GIST_DEFAULT_FILLFACTOR 90 diff --git a/src/include/access/hash.h b/src/include/access/hash.h index 9e035270a1..4f6df08020 100644 --- a/src/include/access/hash.h +++ b/src/include/access/hash.h @@ -224,10 +224,10 @@ typedef HashScanOpaqueData *HashScanOpaque; * needing to fit into the metapage. (With 8K block size, 1024 bitmaps * limit us to 256 GB of overflow space...). For smaller block size we * can not use 1024 bitmaps as it will lead to the meta page data crossing - * the block size boundary. So we use BLCKSZ to determine the maximum number + * the block size boundary. So we use cluster_block_size to determine the maximum number * of bitmaps. */ -#define HASH_MAX_BITMAPS Min(BLCKSZ / 8, 1024) +#define HASH_MAX_BITMAPS Min(cluster_block_size / 8, 1024) #define HASH_SPLITPOINT_PHASE_BITS 2 #define HASH_SPLITPOINT_PHASES_PER_GRP (1 << HASH_SPLITPOINT_PHASE_BITS) @@ -279,7 +279,7 @@ typedef struct HashOptions ((HashOptions *) (relation)->rd_options)->fillfactor : \ HASH_DEFAULT_FILLFACTOR) #define HashGetTargetPageUsage(relation) \ - (BLCKSZ * HashGetFillFactor(relation) / 100) + (cluster_block_size * HashGetFillFactor(relation) / 100) /* * Maximum size of a hash index item (it's okay to have only one per page) diff --git a/src/include/access/heaptoast.h b/src/include/access/heaptoast.h index 5c0a796f66..3658856a60 100644 --- a/src/include/access/heaptoast.h +++ b/src/include/access/heaptoast.h @@ -21,14 +21,14 @@ * Find the maximum size of a tuple if there are to be N tuples per page. */ #define MaximumBytesPerTuple(tuplesPerPage) \ - MAXALIGN_DOWN((BLCKSZ - \ + MAXALIGN_DOWN((cluster_block_size - \ MAXALIGN(SizeOfPageHeaderData + (tuplesPerPage) * sizeof(ItemIdData))) \ / (tuplesPerPage)) /* * These symbols control toaster activation. If a tuple is larger than - * TOAST_TUPLE_THRESHOLD, we will try to toast it down to no more than - * TOAST_TUPLE_TARGET bytes through compressing compressible fields and + * toast_tuple_threshold, we will try to toast it down to no more than + * cluster_toast_tuple_target bytes through compressing compressible fields and * moving EXTENDED and EXTERNAL data out-of-line. * * The numbers need not be the same, though they currently are. It doesn't @@ -40,14 +40,14 @@ * * XXX while these can be modified without initdb, some thought needs to be * given to needs_toast_table() in toasting.c before unleashing random - * changes. Also see LOBLKSIZE in large_object.h, which can *not* be + * changes. Also see cluster_loblksize in large_object.h, which can *not* be * changed without initdb. */ #define TOAST_TUPLES_PER_PAGE 4 -#define TOAST_TUPLE_THRESHOLD MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE) +#define toast_tuple_threshold MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE) -#define TOAST_TUPLE_TARGET TOAST_TUPLE_THRESHOLD +#define cluster_toast_tuple_target toast_tuple_threshold /* * The code will also consider moving MAIN data out-of-line, but only as a @@ -58,7 +58,7 @@ */ #define TOAST_TUPLES_PER_PAGE_MAIN 1 -#define TOAST_TUPLE_TARGET_MAIN MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE_MAIN) +#define toast_tuple_target_main MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE_MAIN) /* * If an index value is larger than TOAST_INDEX_TARGET, we will try to @@ -69,19 +69,19 @@ /* * When we store an oversize datum externally, we divide it into chunks - * containing at most TOAST_MAX_CHUNK_SIZE data bytes. This number *must* + * containing at most cluster_toast_max_chunk_size data bytes. This number *must* * be small enough that the completed toast-table tuple (including the * ID and sequence fields and all overhead) will fit on a page. * The coding here sets the size on the theory that we want to fit * EXTERN_TUPLES_PER_PAGE tuples of maximum size onto a page. * - * NB: Changing TOAST_MAX_CHUNK_SIZE requires an initdb. + * NB: Changing cluster_toast_max_chunk_size requires an initdb. */ #define EXTERN_TUPLES_PER_PAGE 4 /* tweak only this */ #define EXTERN_TUPLE_MAX_SIZE MaximumBytesPerTuple(EXTERN_TUPLES_PER_PAGE) -#define TOAST_MAX_CHUNK_SIZE \ +#define cluster_toast_max_chunk_size \ (EXTERN_TUPLE_MAX_SIZE - \ MAXALIGN(SizeofHeapTupleHeader) - \ sizeof(Oid) - \ diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h index e01f4f35c8..005a530e0a 100644 --- a/src/include/access/htup_details.h +++ b/src/include/access/htup_details.h @@ -551,7 +551,7 @@ StaticAssertDecl(MaxOffsetNumber < SpecTokenOffsetNumber, /* * MaxHeapTupleSize is the maximum allowed size of a heap tuple, including - * header and MAXALIGN alignment padding. Basically it's BLCKSZ minus the + * header and MAXALIGN alignment padding. Basically it's cluster_block_size minus the * other stuff that has to be on a disk page. Since heap pages use no * "special space", there's no deduction for that. * @@ -560,7 +560,7 @@ StaticAssertDecl(MaxOffsetNumber < SpecTokenOffsetNumber, * ItemIds and tuples have different alignment requirements, don't assume that * you can, say, fit 2 tuples of size MaxHeapTupleSize/2 on the same page. */ -#define MaxHeapTupleSize (BLCKSZ - MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData))) +#define MaxHeapTupleSize (cluster_block_size - MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData))) #define MinHeapTupleSize MAXALIGN(SizeofHeapTupleHeader) /* @@ -575,7 +575,7 @@ StaticAssertDecl(MaxOffsetNumber < SpecTokenOffsetNumber, * require increases in the size of work arrays. */ #define MaxHeapTuplesPerPage \ - ((int) ((BLCKSZ - SizeOfPageHeaderData) / \ + ((int) ((cluster_block_size - SizeOfPageHeaderData) / \ (MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData)))) /* diff --git a/src/include/access/itup.h b/src/include/access/itup.h index 2e2b8c7a47..8a68b9405b 100644 --- a/src/include/access/itup.h +++ b/src/include/access/itup.h @@ -164,7 +164,7 @@ index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) * But such a page always has at least MAXALIGN special space, so we're safe. */ #define MaxIndexTuplesPerPage \ - ((int) ((BLCKSZ - SizeOfPageHeaderData) / \ + ((int) ((cluster_block_size - SizeOfPageHeaderData) / \ (MAXALIGN(sizeof(IndexTupleData) + 1) + sizeof(ItemIdData)))) #endif /* ITUP_H */ diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index 8891fa7973..033624e9c3 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -183,7 +183,7 @@ typedef struct BTMetaPageData * than necessary as a result, which is considered acceptable. */ #define MaxTIDsPerBTreePage \ - (int) ((BLCKSZ - SizeOfPageHeaderData - sizeof(BTPageOpaqueData)) / \ + (int) ((cluster_block_size - SizeOfPageHeaderData - sizeof(BTPageOpaqueData)) / \ sizeof(ItemPointerData)) /* @@ -1057,7 +1057,7 @@ typedef struct BTScanOpaqueData /* * If we are doing an index-only scan, these are the tuple storage * workspaces for the currPos and markPos respectively. Each is of size - * BLCKSZ, so it can hold as much as a full page's worth of tuples. + * cluster_block_size, so it can hold as much as a full page's worth of tuples. */ char *currTuples; /* tuple storage for currPos */ char *markTuples; /* tuple storage for markPos */ @@ -1104,7 +1104,7 @@ typedef struct BTOptions ((BTOptions *) (relation)->rd_options)->fillfactor : \ BTREE_DEFAULT_FILLFACTOR) #define BTGetTargetPageFreeSpace(relation) \ - (BLCKSZ * (100 - BTGetFillFactor(relation)) / 100) + (cluster_block_size * (100 - BTGetFillFactor(relation)) / 100) #define BTGetDeduplicateItems(relation) \ (AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \ relation->rd_rel->relam == BTREE_AM_OID), \ diff --git a/src/include/access/slru.h b/src/include/access/slru.h index a8a424d92d..f008a01acb 100644 --- a/src/include/access/slru.h +++ b/src/include/access/slru.h @@ -19,7 +19,7 @@ /* - * Define SLRU segment size. A page is the same BLCKSZ as is used everywhere + * Define SLRU segment size. A page is the same cluster_block_size as is used everywhere * else in Postgres. The segment size can be chosen somewhat arbitrarily; * we make it 32 pages by default, or 256Kb, i.e. 1M transactions for CLOG * or 64K transactions for SUBTRANS. diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h index c6ef46fc20..ea803eccd8 100644 --- a/src/include/access/spgist_private.h +++ b/src/include/access/spgist_private.h @@ -36,7 +36,7 @@ typedef struct SpGistOptions ((SpGistOptions *) (relation)->rd_options)->fillfactor : \ SPGIST_DEFAULT_FILLFACTOR) #define SpGistGetTargetPageFreeSpace(relation) \ - (BLCKSZ * (100 - SpGistGetFillFactor(relation)) / 100) + (cluster_block_size * (100 - SpGistGetFillFactor(relation)) / 100) /* SPGiST leaf tuples have one key column, optionally have included columns */ @@ -444,8 +444,8 @@ typedef SpGistDeadTupleData *SpGistDeadTuple; */ /* Page capacity after allowing for fixed header and special space */ -#define SPGIST_PAGE_CAPACITY \ - MAXALIGN_DOWN(BLCKSZ - \ +#define spgist_page_capacity \ + MAXALIGN_DOWN(cluster_block_size - \ SizeOfPageHeaderData - \ MAXALIGN(sizeof(SpGistPageOpaqueData))) diff --git a/src/include/access/xlogrecord.h b/src/include/access/xlogrecord.h index f355e08e1d..5395678786 100644 --- a/src/include/access/xlogrecord.h +++ b/src/include/access/xlogrecord.h @@ -122,21 +122,21 @@ typedef struct XLogRecordBlockHeader * in the middle, which contains only zero bytes. Since we know that the * "hole" is all zeros, we remove it from the stored data (and it's not counted * in the XLOG record's CRC, either). Hence, the amount of block data actually - * present is (BLCKSZ - ). + * present is (cluster_block_size - ). * * Additionally, when wal_compression is enabled, we will try to compress full * page images using one of the supported algorithms, after removing the * "hole". This can reduce the WAL volume, but at some extra cost of CPU spent * on the compression during WAL logging. In this case, since the "hole" * length cannot be calculated by subtracting the number of page image bytes - * from BLCKSZ, basically it needs to be stored as an extra information. + * from cluster_block_size, basically it needs to be stored as an extra information. * But when no "hole" exists, we can assume that the "hole" length is zero * and no such an extra information needs to be stored. Note that * the original version of page image is stored in WAL instead of the * compressed one if the number of bytes saved by compression is less than * the length of extra information. Hence, when a page image is successfully * compressed, the amount of block data actually present is less than - * BLCKSZ - the length of "hole" bytes - the length of extra information. + * cluster_block_size - the length of "hole" bytes - the length of extra information. */ typedef struct XLogRecordBlockImageHeader { diff --git a/src/include/backup/basebackup_sink.h b/src/include/backup/basebackup_sink.h index 224732e333..53b0e6e485 100644 --- a/src/include/backup/basebackup_sink.h +++ b/src/include/backup/basebackup_sink.h @@ -80,7 +80,7 @@ typedef struct bbsink_state * 'bbs_ops' is the relevant callback table. * * 'bbs_buffer' is the buffer into which data destined for the bbsink - * should be stored. It must be a multiple of BLCKSZ. + * should be stored. It must be a multiple of cluster_block_size. * * 'bbs_buffer_length' is the allocated length of the buffer. * @@ -183,7 +183,7 @@ bbsink_begin_backup(bbsink *sink, bbsink_state *state, int buffer_length) sink->bbs_ops->begin_backup(sink); Assert(sink->bbs_buffer != NULL); - Assert((sink->bbs_buffer_length % BLCKSZ) == 0); + Assert((sink->bbs_buffer_length % cluster_block_size) == 0); } /* Begin an archive. */ diff --git a/src/include/c.h b/src/include/c.h index 82f8e9d4c7..f156886798 100644 --- a/src/include/c.h +++ b/src/include/c.h @@ -1123,7 +1123,7 @@ extern void ExceptionalCondition(const char *conditionName, ((var) = ((var) < 0) ? 1 : -(var)) /* - * Use this, not "char buf[BLCKSZ]", to declare a field or local variable + * Use this, not "char buf[cluster_block_size]", to declare a field or local variable * holding a page buffer, if that page might be accessed as a page. Otherwise * the variable might be under-aligned, causing problems on alignment-picky * hardware. We include both "double" and "int64" in the union to ensure that @@ -1132,7 +1132,7 @@ extern void ExceptionalCondition(const char *conditionName, */ typedef union PGAlignedBlock { - char data[BLCKSZ]; + char data[cluster_block_size]; double force_align_d; int64 force_align_i64; } PGAlignedBlock; @@ -1150,7 +1150,7 @@ typedef union PGIOAlignedBlock #ifdef pg_attribute_aligned pg_attribute_aligned(PG_IO_ALIGN_SIZE) #endif - char data[BLCKSZ]; + char data[cluster_block_size]; double force_align_d; int64 force_align_i64; } PGIOAlignedBlock; diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h index 8a6e67a445..0446bfb407 100644 --- a/src/include/pg_config_manual.h +++ b/src/include/pg_config_manual.h @@ -33,7 +33,7 @@ * * The minimum value is 8 (GIN indexes use 8-argument support functions). * The maximum possible value is around 600 (limited by index tuple size in - * pg_proc's index; BLCKSZ larger than 8K would allow more). Values larger + * pg_proc's index; cluster_block_size larger than 8K would allow more). Values larger * than needed will waste memory and processing time, but do not directly * cost disk space. * diff --git a/src/include/postgres_ext.h b/src/include/postgres_ext.h index 240ad4e93b..1ed3d694ee 100644 --- a/src/include/postgres_ext.h +++ b/src/include/postgres_ext.h @@ -70,4 +70,5 @@ typedef PG_INT64_TYPE pg_int64; #define PG_DIAG_SOURCE_LINE 'L' #define PG_DIAG_SOURCE_FUNCTION 'R' +#define cluster_block_size BLCKSZ #endif /* POSTGRES_EXT_H */ diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h index b379c76e27..b33ea1a261 100644 --- a/src/include/storage/bufmgr.h +++ b/src/include/storage/bufmgr.h @@ -323,7 +323,7 @@ BufferGetBlock(Buffer buffer) if (BufferIsLocal(buffer)) return LocalBufferBlockPointers[-buffer - 1]; else - return (Block) (BufferBlocks + ((Size) (buffer - 1)) * BLCKSZ); + return (Block) (BufferBlocks + ((Size) (buffer - 1)) * cluster_block_size); } /* @@ -341,7 +341,7 @@ static inline Size BufferGetPageSize(Buffer buffer) { AssertMacro(BufferIsValid(buffer)); - return (Size) BLCKSZ; + return (Size) cluster_block_size; } /* diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h index 424ecba028..20d8d7277a 100644 --- a/src/include/storage/bufpage.h +++ b/src/include/storage/bufpage.h @@ -325,7 +325,7 @@ static inline void PageValidateSpecialPointer(Page page) { Assert(page); - Assert(((PageHeader) page)->pd_special <= BLCKSZ); + Assert(((PageHeader) page)->pd_special <= cluster_block_size); Assert(((PageHeader) page)->pd_special >= SizeOfPageHeaderData); } @@ -476,14 +476,14 @@ do { \ PIV_LOG_WARNING | PIV_REPORT_STAT) /* - * Check that BLCKSZ is a multiple of sizeof(size_t). In + * Check that cluster_block_size is a multiple of sizeof(size_t). In * PageIsVerifiedExtended(), it is much faster to check if a page is * full of zeroes using the native word size. Note that this assertion * is kept within a header to make sure that StaticAssertDecl() works * across various combinations of platforms and compilers. */ -StaticAssertDecl(BLCKSZ == ((BLCKSZ / sizeof(size_t)) * sizeof(size_t)), - "BLCKSZ has to be a multiple of sizeof(size_t)"); +StaticAssertDecl(cluster_block_size == ((cluster_block_size / sizeof(size_t)) * sizeof(size_t)), + "cluster_block_size has to be a multiple of sizeof(size_t)"); extern void PageInit(Page page, Size pageSize, Size specialSize); extern bool PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags); diff --git a/src/include/storage/checksum_impl.h b/src/include/storage/checksum_impl.h index 7b157161a2..18951c36c7 100644 --- a/src/include/storage/checksum_impl.h +++ b/src/include/storage/checksum_impl.h @@ -111,7 +111,7 @@ typedef union { PageHeaderData phdr; - uint32 data[BLCKSZ / (sizeof(uint32) * N_SUMS)][N_SUMS]; + uint32 data[cluster_block_size / (sizeof(uint32) * N_SUMS)][N_SUMS]; } PGChecksummablePage; /* @@ -151,13 +151,13 @@ pg_checksum_block(const PGChecksummablePage *page) j; /* ensure that the size is compatible with the algorithm */ - Assert(sizeof(PGChecksummablePage) == BLCKSZ); + Assert(sizeof(PGChecksummablePage) == cluster_block_size); /* initialize partial checksums to their corresponding offsets */ memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets)); /* main checksum calculation */ - for (i = 0; i < (uint32) (BLCKSZ / (sizeof(uint32) * N_SUMS)); i++) + for (i = 0; i < (uint32) (cluster_block_size / (sizeof(uint32) * N_SUMS)); i++) for (j = 0; j < N_SUMS; j++) CHECKSUM_COMP(sums[j], page->data[i][j]); diff --git a/src/include/storage/fsm_internals.h b/src/include/storage/fsm_internals.h index 9e314c83fa..3e96d2335a 100644 --- a/src/include/storage/fsm_internals.h +++ b/src/include/storage/fsm_internals.h @@ -48,10 +48,10 @@ typedef FSMPageData *FSMPage; * Number of non-leaf and leaf nodes, and nodes in total, on an FSM page. * These definitions are internal to fsmpage.c. */ -#define NodesPerPage (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \ +#define NodesPerPage (cluster_block_size - MAXALIGN(SizeOfPageHeaderData) - \ offsetof(FSMPageData, fp_nodes)) -#define NonLeafNodesPerPage (BLCKSZ / 2 - 1) +#define NonLeafNodesPerPage (cluster_block_size / 2 - 1) #define LeafNodesPerPage (NodesPerPage - NonLeafNodesPerPage) /* diff --git a/src/include/storage/large_object.h b/src/include/storage/large_object.h index db521f23eb..9fab334c7b 100644 --- a/src/include/storage/large_object.h +++ b/src/include/storage/large_object.h @@ -54,7 +54,7 @@ typedef struct LargeObjectDesc /* * Each "page" (tuple) of a large object can hold this much data * - * We could set this as high as BLCKSZ less some overhead, but it seems + * We could set this as high as cluster_block_size less some overhead, but it seems * better to make it a smaller value, so that not as much space is used * up when a page-tuple is updated. Note that the value is deliberately * chosen large enough to trigger the tuple toaster, so that we will @@ -65,15 +65,15 @@ typedef struct LargeObjectDesc * since clients will often be written to send data in power-of-2 blocks. * This avoids unnecessary tuple updates caused by partial-page writes. * - * NB: Changing LOBLKSIZE requires an initdb. + * NB: Changing cluster_loblksize requires an initdb. */ -#define LOBLKSIZE (BLCKSZ / 4) +#define cluster_loblksize (cluster_block_size / 4) /* * Maximum length in bytes for a large object. To make this larger, we'd * have to widen pg_largeobject.pageno as well as various internal variables. */ -#define MAX_LARGE_OBJECT_SIZE ((int64) INT_MAX * LOBLKSIZE) +#define MAX_LARGE_OBJECT_SIZE ((int64) INT_MAX * cluster_loblksize) /* diff --git a/src/include/storage/off.h b/src/include/storage/off.h index 3540308069..7fbd45a084 100644 --- a/src/include/storage/off.h +++ b/src/include/storage/off.h @@ -25,7 +25,7 @@ typedef uint16 OffsetNumber; #define InvalidOffsetNumber ((OffsetNumber) 0) #define FirstOffsetNumber ((OffsetNumber) 1) -#define MaxOffsetNumber ((OffsetNumber) (BLCKSZ / sizeof(ItemIdData))) +#define MaxOffsetNumber ((OffsetNumber) (cluster_block_size / sizeof(ItemIdData))) /* ---------------- * support macros diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index 1426a353cd..08256699b4 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -368,14 +368,14 @@ typedef struct StdRdOptions * Returns the relation's desired space usage per page in bytes. */ #define RelationGetTargetPageUsage(relation, defaultff) \ - (BLCKSZ * RelationGetFillFactor(relation, defaultff) / 100) + (cluster_block_size * RelationGetFillFactor(relation, defaultff) / 100) /* * RelationGetTargetPageFreeSpace * Returns the relation's desired freespace per page in bytes. */ #define RelationGetTargetPageFreeSpace(relation, defaultff) \ - (BLCKSZ * (100 - RelationGetFillFactor(relation, defaultff)) / 100) + (cluster_block_size * (100 - RelationGetFillFactor(relation, defaultff)) / 100) /* * RelationIsUsedAsCatalogTable diff --git a/src/test/modules/test_slru/test_slru.c b/src/test/modules/test_slru/test_slru.c index ae21444c47..1d64fe0e69 100644 --- a/src/test/modules/test_slru/test_slru.c +++ b/src/test/modules/test_slru/test_slru.c @@ -77,7 +77,7 @@ test_slru_page_write(PG_FUNCTION_ARGS) /* write given data to the page, up to the limit of the page */ strncpy(TestSlruCtl->shared->page_buffer[slotno], data, - BLCKSZ - 1); + cluster_block_size - 1); SimpleLruWritePage(TestSlruCtl, slotno); LWLockRelease(TestSLRULock); diff --git a/src/test/regress/expected/btree_index.out b/src/test/regress/expected/btree_index.out index 93ed5e8cc0..12dc4aae8a 100644 --- a/src/test/regress/expected/btree_index.out +++ b/src/test/regress/expected/btree_index.out @@ -320,7 +320,7 @@ CREATE UNIQUE INDEX dedup_unique ON dedup_unique_test_table (a) WITH (deduplicat CREATE UNIQUE INDEX plain_unique ON dedup_unique_test_table (a) WITH (deduplicate_items=off); -- Generate enough garbage tuples in index to ensure that even the unique index -- with deduplication enabled has to check multiple leaf pages during unique --- checking (at least with a BLCKSZ of 8192 or less) +-- checking (at least with a cluster_block_size of 8192 or less) DO $$ BEGIN FOR r IN 1..1350 LOOP @@ -331,7 +331,7 @@ END$$; -- Exercise the LP_DEAD-bit-set tuple deletion code with a posting list tuple. -- The implementation prefers deleting existing items to merging any duplicate -- tuples into a posting list, so we need an explicit test to make sure we get --- coverage (note that this test also assumes BLCKSZ is 8192 or less): +-- coverage (note that this test also assumes cluster_block_size is 8192 or less): DROP INDEX plain_unique; DELETE FROM dedup_unique_test_table WHERE a = 1; INSERT INTO dedup_unique_test_table SELECT i FROM generate_series(0,450) i; diff --git a/src/test/regress/expected/largeobject.out b/src/test/regress/expected/largeobject.out index bdcede6728..81bfe9887a 100644 --- a/src/test/regress/expected/largeobject.out +++ b/src/test/regress/expected/largeobject.out @@ -353,7 +353,7 @@ SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; 670800 (1 row) --- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block +-- with the default cluster_block_size, cluster_loblksize = 2048, so this positions us for a block -- edge case SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; lo_lseek diff --git a/src/test/regress/expected/largeobject_1.out b/src/test/regress/expected/largeobject_1.out index d700910c35..b78dc77f7a 100644 --- a/src/test/regress/expected/largeobject_1.out +++ b/src/test/regress/expected/largeobject_1.out @@ -353,7 +353,7 @@ SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; 680800 (1 row) --- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block +-- with the default cluster_block_size, cluster_loblksize = 2048, so this positions us for a block -- edge case SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; lo_lseek diff --git a/src/test/regress/sql/btree_index.sql b/src/test/regress/sql/btree_index.sql index 239f4a4755..f4e2ddf4f8 100644 --- a/src/test/regress/sql/btree_index.sql +++ b/src/test/regress/sql/btree_index.sql @@ -173,7 +173,7 @@ CREATE UNIQUE INDEX dedup_unique ON dedup_unique_test_table (a) WITH (deduplicat CREATE UNIQUE INDEX plain_unique ON dedup_unique_test_table (a) WITH (deduplicate_items=off); -- Generate enough garbage tuples in index to ensure that even the unique index -- with deduplication enabled has to check multiple leaf pages during unique --- checking (at least with a BLCKSZ of 8192 or less) +-- checking (at least with a cluster_block_size of 8192 or less) DO $$ BEGIN FOR r IN 1..1350 LOOP @@ -185,7 +185,7 @@ END$$; -- Exercise the LP_DEAD-bit-set tuple deletion code with a posting list tuple. -- The implementation prefers deleting existing items to merging any duplicate -- tuples into a posting list, so we need an explicit test to make sure we get --- coverage (note that this test also assumes BLCKSZ is 8192 or less): +-- coverage (note that this test also assumes cluster_block_size is 8192 or less): DROP INDEX plain_unique; DELETE FROM dedup_unique_test_table WHERE a = 1; INSERT INTO dedup_unique_test_table SELECT i FROM generate_series(0,450) i; diff --git a/src/test/regress/sql/largeobject.sql b/src/test/regress/sql/largeobject.sql index 800e4fcc6a..9dd258210b 100644 --- a/src/test/regress/sql/largeobject.sql +++ b/src/test/regress/sql/largeobject.sql @@ -191,7 +191,7 @@ UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS inte -- verify length of large object SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; --- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block +-- with the default cluster_block_size, cluster_loblksize = 2048, so this positions us for a block -- edge case SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm index a50f730260..f6a40ae730 100644 --- a/src/tools/msvc/Solution.pm +++ b/src/tools/msvc/Solution.pm @@ -212,7 +212,7 @@ sub GenerateFiles ALIGNOF_PG_INT128_TYPE => undef, ALIGNOF_SHORT => 2, AC_APPLE_UNIVERSAL_BUILD => undef, - BLCKSZ => 1024 * $self->{options}->{blocksize}, + cluster_block_size => 1024 * $self->{options}->{blocksize}, CONFIGURE_ARGS => '"' . $self->GetFakeConfigure() . '"', DEF_PGPORT => $port, DEF_PGPORT_STR => qq{"$port"}, -- 2.39.2