Re: HASH_BLOBS hazards (was Re: PATCH: logical_work_mem and logical streaming of large in-progress transactions) - Mailing list pgsql-hackers
From | Tom Lane |
---|---|
Subject | Re: HASH_BLOBS hazards (was Re: PATCH: logical_work_mem and logical streaming of large in-progress transactions) |
Date | |
Msg-id | 1128815.1607990120@sss.pgh.pa.us Whole thread Raw |
In response to | Re: HASH_BLOBS hazards (was Re: PATCH: logical_work_mem and logical streaming of large in-progress transactions) (Tom Lane <tgl@sss.pgh.pa.us>) |
Responses |
Re: HASH_BLOBS hazards (was Re: PATCH: logical_work_mem and logical streaming of large in-progress transactions)
|
List | pgsql-hackers |
Here's a rolled-up patch that does some further documentation work and gets rid of the unnecessary memset's as well. regards, tom lane diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 2dc9e44ae6..651227f510 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -2607,7 +2607,8 @@ createConnHash(void) ctl.keysize = NAMEDATALEN; ctl.entrysize = sizeof(remoteConnHashEnt); - return hash_create("Remote Con hash", NUMCONN, &ctl, HASH_ELEM); + return hash_create("Remote Con hash", NUMCONN, &ctl, + HASH_ELEM | HASH_STRINGS); } static void diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 70cfdb2c9d..2f00344b7f 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -567,7 +567,6 @@ pgss_shmem_startup(void) pgss->stats.dealloc = 0; } - memset(&info, 0, sizeof(info)); info.keysize = sizeof(pgssHashKey); info.entrysize = sizeof(pgssEntry); pgss_hash = ShmemInitHash("pg_stat_statements hash", diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index ab3226287d..66581e5414 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -119,14 +119,11 @@ GetConnection(UserMapping *user, bool will_prep_stmt) { HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(ConnCacheKey); ctl.entrysize = sizeof(ConnCacheEntry); - /* allocate ConnectionHash in the cache context */ - ctl.hcxt = CacheMemoryContext; ConnectionHash = hash_create("postgres_fdw connections", 8, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + HASH_ELEM | HASH_BLOBS); /* * Register some callback functions that manage connection cleanup. diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c index 3433c19712..b4766dc5ff 100644 --- a/contrib/postgres_fdw/shippable.c +++ b/contrib/postgres_fdw/shippable.c @@ -93,7 +93,6 @@ InitializeShippableCache(void) HASHCTL ctl; /* Create the hash table. */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(ShippableCacheKey); ctl.entrysize = sizeof(ShippableCacheEntry); ShippableCacheHash = diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index 85986ec24a..e9a9741154 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -714,7 +714,6 @@ load_categories_hash(char *cats_sql, MemoryContext per_query_ctx) MemoryContext SPIcontext; /* initialize the category hash table */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = MAX_CATNAME_LEN; ctl.entrysize = sizeof(crosstab_HashEnt); ctl.hcxt = per_query_ctx; @@ -726,7 +725,7 @@ load_categories_hash(char *cats_sql, MemoryContext per_query_ctx) crosstab_hash = hash_create("crosstab hash", INIT_CATS, &ctl, - HASH_ELEM | HASH_CONTEXT); + HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); /* Connect to SPI manager */ if ((ret = SPI_connect()) < 0) diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index 4ad67c88b4..217c199a14 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -76,7 +76,6 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel) * nodeBuffersTab hash is association between index blocks and it's * buffers. */ - memset(&hashCtl, 0, sizeof(hashCtl)); hashCtl.keysize = sizeof(BlockNumber); hashCtl.entrysize = sizeof(GISTNodeBuffer); hashCtl.hcxt = CurrentMemoryContext; diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index a664ecf494..c77a189907 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -1363,7 +1363,6 @@ _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, bool found; /* Initialize hash tables used to track TIDs */ - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(ItemPointerData); hash_ctl.entrysize = sizeof(ItemPointerData); hash_ctl.hcxt = CurrentMemoryContext; diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index 39e33763df..65942cc428 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -266,7 +266,6 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm state->rs_cxt = rw_cxt; /* Initialize hash tables used to track update chains */ - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(TidHashKey); hash_ctl.entrysize = sizeof(UnresolvedTupData); hash_ctl.hcxt = state->rs_cxt; @@ -824,7 +823,6 @@ logical_begin_heap_rewrite(RewriteState state) state->rs_begin_lsn = GetXLogInsertRecPtr(); state->rs_num_rewrite_mappings = 0; - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(TransactionId); hash_ctl.entrysize = sizeof(RewriteMappingFile); hash_ctl.hcxt = state->rs_cxt; diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 32a3099c1f..e0ca3859a9 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -113,7 +113,6 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno, /* create hash table when first needed */ HASHCTL ctl; - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(xl_invalid_page_key); ctl.entrysize = sizeof(xl_invalid_page); diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c index 6a2c6685a0..f2e7bab62a 100644 --- a/src/backend/catalog/pg_enum.c +++ b/src/backend/catalog/pg_enum.c @@ -188,7 +188,6 @@ init_enum_blacklist(void) { HASHCTL hash_ctl; - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(Oid); hash_ctl.hcxt = TopTransactionContext; diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c index 17f37eb39f..5c3c78a0e6 100644 --- a/src/backend/catalog/pg_inherits.c +++ b/src/backend/catalog/pg_inherits.c @@ -171,7 +171,6 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents) *rel_numparents; ListCell *l; - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(SeenRelsEntry); ctl.hcxt = CurrentMemoryContext; diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index c0763c63e2..e04afd9963 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -2375,7 +2375,6 @@ AddEventToPendingNotifies(Notification *n) ListCell *l; /* Create the hash table */ - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Notification *); hash_ctl.entrysize = sizeof(NotificationHash); hash_ctl.hash = notification_hash; diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 4b18be5b27..89087a7be3 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -406,15 +406,13 @@ InitQueryHashTable(void) { HASHCTL hash_ctl; - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = NAMEDATALEN; hash_ctl.entrysize = sizeof(PreparedStatement); prepared_queries = hash_create("Prepared Queries", 32, &hash_ctl, - HASH_ELEM); + HASH_ELEM | HASH_STRINGS); } /* diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 632b34af61..fa2eea8af2 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -1087,7 +1087,6 @@ create_seq_hashtable(void) { HASHCTL ctl; - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(SeqTableData); diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index 86594bd056..97bfc8bd71 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -521,7 +521,6 @@ ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate, HTAB *htab; int i; - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(SubplanResultRelHashElem); ctl.hcxt = CurrentMemoryContext; diff --git a/src/backend/nodes/extensible.c b/src/backend/nodes/extensible.c index ab04459c55..3a6cfc44d3 100644 --- a/src/backend/nodes/extensible.c +++ b/src/backend/nodes/extensible.c @@ -47,11 +47,11 @@ RegisterExtensibleNodeEntry(HTAB **p_htable, const char *htable_label, { HASHCTL ctl; - memset(&ctl, 0, sizeof(HASHCTL)); ctl.keysize = EXTNODENAME_MAX_LEN; ctl.entrysize = sizeof(ExtensibleNodeEntry); - *p_htable = hash_create(htable_label, 100, &ctl, HASH_ELEM); + *p_htable = hash_create(htable_label, 100, &ctl, + HASH_ELEM | HASH_STRINGS); } if (strlen(extnodename) >= EXTNODENAME_MAX_LEN) diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c index 0edd873dca..d6e83e5f8e 100644 --- a/src/backend/optimizer/util/predtest.c +++ b/src/backend/optimizer/util/predtest.c @@ -1982,7 +1982,6 @@ lookup_proof_cache(Oid pred_op, Oid clause_op, bool refute_it) /* First time through: initialize the hash table */ HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(OprProofCacheKey); ctl.entrysize = sizeof(OprProofCacheEntry); OprProofCacheHash = hash_create("Btree proof lookup cache", 256, diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 76245c1ff3..9c9a738c80 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -400,7 +400,6 @@ build_join_rel_hash(PlannerInfo *root) ListCell *l; /* Create the hash table */ - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Relids); hash_ctl.entrysize = sizeof(JoinHashEntry); hash_ctl.hash = bitmap_hash; diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c index 6613a3a8f8..e72d3676f1 100644 --- a/src/backend/parser/parse_oper.c +++ b/src/backend/parser/parse_oper.c @@ -999,7 +999,6 @@ find_oper_cache_entry(OprCacheKey *key) /* First time through: initialize the hash table */ HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(OprCacheKey); ctl.entrysize = sizeof(OprCacheEntry); OprCacheHash = hash_create("Operator lookup cache", 256, diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c index 9a292290ed..5b0a15ac0b 100644 --- a/src/backend/partitioning/partdesc.c +++ b/src/backend/partitioning/partdesc.c @@ -286,13 +286,13 @@ CreatePartitionDirectory(MemoryContext mcxt) PartitionDirectory pdir; HASHCTL ctl; - MemSet(&ctl, 0, sizeof(HASHCTL)); + pdir = palloc(sizeof(PartitionDirectoryData)); + pdir->pdir_mcxt = mcxt; + ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartitionDirectoryEntry); ctl.hcxt = mcxt; - pdir = palloc(sizeof(PartitionDirectoryData)); - pdir->pdir_mcxt = mcxt; pdir->pdir_hash = hash_create("partition directory", 256, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 7e28944d2f..ed127a1032 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -2043,7 +2043,6 @@ do_autovacuum(void) pg_class_desc = CreateTupleDescCopy(RelationGetDescr(classRel)); /* create hash table for toast <-> main relid mapping */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(av_relation); diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index 429c8010ef..a62c6d4d0a 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -1161,7 +1161,6 @@ CompactCheckpointerRequestQueue(void) skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests); /* Initialize temporary hash table */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(CheckpointerRequest); ctl.entrysize = sizeof(struct CheckpointerSlotMapping); ctl.hcxt = CurrentMemoryContext; diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 7c75a25d21..6b60f293e9 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -1265,7 +1265,6 @@ pgstat_collect_oids(Oid catalogid, AttrNumber anum_oid) HeapTuple tup; Snapshot snapshot; - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(Oid); hash_ctl.hcxt = CurrentMemoryContext; @@ -1815,7 +1814,6 @@ pgstat_init_function_usage(FunctionCallInfo fcinfo, /* First time through - initialize function stat table */ HASHCTL hash_ctl; - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(PgStat_BackendFunctionEntry); pgStatFunctions = hash_create("Function stat entries", @@ -1975,7 +1973,6 @@ get_tabstat_entry(Oid rel_id, bool isshared) { HASHCTL ctl; - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(TabStatHashEntry); @@ -4994,7 +4991,6 @@ reset_dbentry_counters(PgStat_StatDBEntry *dbentry) dbentry->stat_reset_timestamp = GetCurrentTimestamp(); dbentry->stats_timestamp = 0; - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(PgStat_StatTabEntry); dbentry->tables = hash_create("Per-database table", @@ -5423,7 +5419,6 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) /* * Create the DB hashtable */ - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(PgStat_StatDBEntry); hash_ctl.hcxt = pgStatLocalContext; @@ -5608,7 +5603,6 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) break; } - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(PgStat_StatTabEntry); hash_ctl.hcxt = pgStatLocalContext; diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c index 07aa52977f..f4dbbbe2dd 100644 --- a/src/backend/replication/logical/relation.c +++ b/src/backend/replication/logical/relation.c @@ -111,7 +111,6 @@ logicalrep_relmap_init(void) ALLOCSET_DEFAULT_SIZES); /* Initialize the relation hash table. */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(LogicalRepRelId); ctl.entrysize = sizeof(LogicalRepRelMapEntry); ctl.hcxt = LogicalRepRelMapContext; @@ -120,7 +119,6 @@ logicalrep_relmap_init(void) HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* Initialize the type hash table. */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(LogicalRepTyp); ctl.hcxt = LogicalRepRelMapContext; @@ -606,7 +604,6 @@ logicalrep_partmap_init(void) ALLOCSET_DEFAULT_SIZES); /* Initialize the relation hash table. */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); /* partition OID */ ctl.entrysize = sizeof(LogicalRepPartMapEntry); ctl.hcxt = LogicalRepPartMapContext; diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 15dc51a94d..7359fa9df2 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -1619,8 +1619,6 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn) if (!rbtxn_has_catalog_changes(txn) || dlist_is_empty(&txn->tuplecids)) return; - memset(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = sizeof(ReorderBufferTupleCidKey); hash_ctl.entrysize = sizeof(ReorderBufferTupleCidEnt); hash_ctl.hcxt = rb->context; @@ -4116,7 +4114,6 @@ ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn) Assert(txn->toast_hash == NULL); - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(ReorderBufferToastEnt); hash_ctl.hcxt = rb->context; diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 1904f3471c..6259606537 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -372,7 +372,6 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) { HASHCTL ctl; - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(struct tablesync_start_time_mapping); last_start_times = hash_create("Logical replication table sync worker start times", diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 9c997aed83..49d25b02d7 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -867,22 +867,18 @@ static void init_rel_sync_cache(MemoryContext cachectx) { HASHCTL ctl; - MemoryContext old_ctxt; if (RelationSyncCache != NULL) return; /* Make a new hash table for the cache */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(RelationSyncEntry); ctl.hcxt = cachectx; - old_ctxt = MemoryContextSwitchTo(cachectx); RelationSyncCache = hash_create("logical replication output relation cache", 128, &ctl, HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); - (void) MemoryContextSwitchTo(old_ctxt); Assert(RelationSyncCache != NULL); diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index ad0d1a9abc..c5e8707151 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -2505,7 +2505,6 @@ InitBufferPoolAccess(void) memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray)); - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(int32); hash_ctl.entrysize = sizeof(PrivateRefCountEntry); diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 6ffd7b3306..cd3475e9e1 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -465,7 +465,6 @@ InitLocalBuffers(void) } /* Create the lookup hash table */ - MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(BufferTag); info.entrysize = sizeof(LocalBufferLookupEnt); diff --git a/src/backend/storage/file/reinit.c b/src/backend/storage/file/reinit.c index 0c2094f766..8700f7f19a 100644 --- a/src/backend/storage/file/reinit.c +++ b/src/backend/storage/file/reinit.c @@ -30,7 +30,7 @@ static void ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, typedef struct { - char oid[OIDCHARS + 1]; + Oid reloid; /* hash key */ } unlogged_relation_entry; /* @@ -172,10 +172,11 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op) * need to be reset. Otherwise, this cleanup operation would be * O(n^2). */ - memset(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(unlogged_relation_entry); + ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(unlogged_relation_entry); - hash = hash_create("unlogged hash", 32, &ctl, HASH_ELEM); + ctl.hcxt = CurrentMemoryContext; + hash = hash_create("unlogged relation OIDs", 32, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* Scan the directory. */ dbspace_dir = AllocateDir(dbspacedirname); @@ -198,9 +199,8 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op) * Put the OID portion of the name into the hash table, if it * isn't already. */ - memset(ent.oid, 0, sizeof(ent.oid)); - memcpy(ent.oid, de->d_name, oidchars); - hash_search(hash, &ent, HASH_ENTER, NULL); + ent.reloid = atooid(de->d_name); + (void) hash_search(hash, &ent, HASH_ENTER, NULL); } /* Done with the first pass. */ @@ -224,7 +224,6 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op) { ForkNumber forkNum; int oidchars; - bool found; unlogged_relation_entry ent; /* Skip anything that doesn't look like a relation data file. */ @@ -238,14 +237,10 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op) /* * See whether the OID portion of the name shows up in the hash - * table. + * table. If so, nuke it! */ - memset(ent.oid, 0, sizeof(ent.oid)); - memcpy(ent.oid, de->d_name, oidchars); - hash_search(hash, &ent, HASH_FIND, &found); - - /* If so, nuke it! */ - if (found) + ent.reloid = atooid(de->d_name); + if (hash_search(hash, &ent, HASH_FIND, NULL)) { snprintf(rm_path, sizeof(rm_path), "%s/%s", dbspacedirname, de->d_name); diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 97716f6aef..b0fc9f160d 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -292,7 +292,6 @@ void InitShmemIndex(void) { HASHCTL info; - int hash_flags; /* * Create the shared memory shmem index. @@ -304,11 +303,11 @@ InitShmemIndex(void) */ info.keysize = SHMEM_INDEX_KEYSIZE; info.entrysize = sizeof(ShmemIndexEnt); - hash_flags = HASH_ELEM; ShmemIndex = ShmemInitHash("ShmemIndex", SHMEM_INDEX_SIZE, SHMEM_INDEX_SIZE, - &info, hash_flags); + &info, + HASH_ELEM | HASH_STRINGS); } /* @@ -329,6 +328,11 @@ InitShmemIndex(void) * whose maximum size is certain, this should be equal to max_size; that * ensures that no run-time out-of-shared-memory failures can occur. * + * *infoP and hash_flags should specify at least the entry sizes and key + * comparison semantics (see hash_create()). Flag bits and values specific + * to shared-memory hash tables are added here, except that callers may + * choose to specify HASH_PARTITION and/or HASH_FIXED_SIZE. + * * Note: before Postgres 9.0, this function returned NULL for some failure * cases. Now, it always throws error instead, so callers need not check * for NULL. diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 52b2809dac..4ea3cf1f5c 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -81,7 +81,6 @@ InitRecoveryTransactionEnvironment(void) * Initialize the hash table for tracking the list of locks held by each * transaction. */ - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(TransactionId); hash_ctl.entrysize = sizeof(RecoveryLockListsEntry); RecoveryLockLists = hash_create("RecoveryLockLists", diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index d86566f455..53472dd21e 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -419,7 +419,6 @@ InitLocks(void) * Allocate hash table for LOCK structs. This stores per-locked-object * information. */ - MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(LOCKTAG); info.entrysize = sizeof(LOCK); info.num_partitions = NUM_LOCK_PARTITIONS; diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 108e652179..26bcce9735 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -342,7 +342,6 @@ init_lwlock_stats(void) ALLOCSET_DEFAULT_SIZES); MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true); - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(lwlock_stats_key); ctl.entrysize = sizeof(lwlock_stats); ctl.hcxt = lwlock_stats_cxt; diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index 8a365b400c..e42e131543 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -1096,7 +1096,6 @@ InitPredicateLocks(void) * Allocate hash table for PREDICATELOCKTARGET structs. This stores * per-predicate-lock-target information. */ - MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(PREDICATELOCKTARGETTAG); info.entrysize = sizeof(PREDICATELOCKTARGET); info.num_partitions = NUM_PREDICATELOCK_PARTITIONS; @@ -1129,7 +1128,6 @@ InitPredicateLocks(void) * Allocate hash table for PREDICATELOCK structs. This stores per * xact-lock-of-a-target information. */ - MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(PREDICATELOCKTAG); info.entrysize = sizeof(PREDICATELOCK); info.hash = predicatelock_hash; @@ -1212,7 +1210,6 @@ InitPredicateLocks(void) * Allocate hash table for SERIALIZABLEXID structs. This stores per-xid * information for serializable transactions which have accessed data. */ - MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(SERIALIZABLEXIDTAG); info.entrysize = sizeof(SERIALIZABLEXID); @@ -1853,7 +1850,6 @@ CreateLocalPredicateLockHash(void) /* Initialize the backend-local hash table of parent locks */ Assert(LocalPredicateLockHash == NULL); - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG); hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK); LocalPredicateLockHash = hash_create("Local predicate lock", diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c index dcc09df0c7..072bdd118f 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c @@ -154,7 +154,6 @@ smgropen(RelFileNode rnode, BackendId backend) /* First time through: initialize the hash table */ HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(RelFileNodeBackend); ctl.entrysize = sizeof(SMgrRelationData); SMgrRelationHash = hash_create("smgr relation table", 400, diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c index 1d635d596c..a49588f6b9 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c @@ -150,7 +150,6 @@ InitSync(void) ALLOCSET_DEFAULT_SIZES); MemoryContextAllowInCriticalSection(pendingOpsCxt, true); - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(FileTag); hash_ctl.entrysize = sizeof(PendingFsyncEntry); hash_ctl.hcxt = pendingOpsCxt; diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c index 2eed0cd137..19e9611a3a 100644 --- a/src/backend/tsearch/ts_typanalyze.c +++ b/src/backend/tsearch/ts_typanalyze.c @@ -180,7 +180,6 @@ compute_tsvector_stats(VacAttrStats *stats, * worry about overflowing the initial size. Also we don't need to pay any * attention to locking and memory management. */ - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(LexemeHashKey); hash_ctl.entrysize = sizeof(TrackItem); hash_ctl.hash = lexeme_hash; diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c index 4912cabc61..cb2a834193 100644 --- a/src/backend/utils/adt/array_typanalyze.c +++ b/src/backend/utils/adt/array_typanalyze.c @@ -277,7 +277,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, * worry about overflowing the initial size. Also we don't need to pay any * attention to locking and memory management. */ - MemSet(&elem_hash_ctl, 0, sizeof(elem_hash_ctl)); elem_hash_ctl.keysize = sizeof(Datum); elem_hash_ctl.entrysize = sizeof(TrackItem); elem_hash_ctl.hash = element_hash; @@ -289,7 +288,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); /* hashtable for array distinct elements counts */ - MemSet(&count_hash_ctl, 0, sizeof(count_hash_ctl)); count_hash_ctl.keysize = sizeof(int); count_hash_ctl.entrysize = sizeof(DECountItem); count_hash_ctl.hcxt = CurrentMemoryContext; diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 12557ce3af..7a25415078 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -3439,14 +3439,13 @@ get_json_object_as_hash(char *json, int len, const char *funcname) JsonLexContext *lex = makeJsonLexContextCstringLen(json, len, GetDatabaseEncoding(), true); JsonSemAction *sem; - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = NAMEDATALEN; ctl.entrysize = sizeof(JsonHashEntry); ctl.hcxt = CurrentMemoryContext; tab = hash_create("json object hashtable", 100, &ctl, - HASH_ELEM | HASH_CONTEXT); + HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); state = palloc0(sizeof(JHashState)); sem = palloc0(sizeof(JsonSemAction)); @@ -3831,14 +3830,13 @@ populate_recordset_object_start(void *state) return; /* Object at level 1: set up a new hash table for this object */ - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = NAMEDATALEN; ctl.entrysize = sizeof(JsonHashEntry); ctl.hcxt = CurrentMemoryContext; _state->json_hash = hash_create("json object hashtable", 100, &ctl, - HASH_ELEM | HASH_CONTEXT); + HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); } static void diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index b6d05ac98d..c39d67645c 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1297,7 +1297,6 @@ lookup_collation_cache(Oid collation, bool set_flags) /* First time through, initialize the hash table */ HASHCTL ctl; - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(collation_cache_entry); collation_cache = hash_create("Collation cache", 100, &ctl, diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 02b1a3868f..5ab134a853 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -2540,7 +2540,6 @@ ri_InitHashTables(void) { HASHCTL ctl; - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(RI_ConstraintInfo); ri_constraint_cache = hash_create("RI constraint cache", @@ -2552,14 +2551,12 @@ ri_InitHashTables(void) InvalidateConstraintCacheCallBack, (Datum) 0); - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(RI_QueryKey); ctl.entrysize = sizeof(RI_QueryHashEntry); ri_query_cache = hash_create("RI query cache", RI_INIT_QUERYHASHSIZE, &ctl, HASH_ELEM | HASH_BLOBS); - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(RI_CompareKey); ctl.entrysize = sizeof(RI_CompareHashEntry); ri_compare_cache = hash_create("RI compare cache", diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index ad582f99a5..7d4443e807 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -3464,14 +3464,14 @@ set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, * We use a hash table to hold known names, so that this process is O(N) * not O(N^2) for N names. */ - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = NAMEDATALEN; hash_ctl.entrysize = sizeof(NameHashEntry); hash_ctl.hcxt = CurrentMemoryContext; names_hash = hash_create("set_rtable_names names", list_length(dpns->rtable), &hash_ctl, - HASH_ELEM | HASH_CONTEXT); + HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); + /* Preload the hash table with names appearing in parent_namespaces */ foreach(lc, parent_namespaces) { diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c index 05ac366b40..934a84e03f 100644 --- a/src/backend/utils/cache/attoptcache.c +++ b/src/backend/utils/cache/attoptcache.c @@ -79,7 +79,6 @@ InitializeAttoptCache(void) HASHCTL ctl; /* Initialize the hash table. */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(AttoptCacheKey); ctl.entrysize = sizeof(AttoptCacheEntry); AttoptCacheHash = diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c index 0427795395..0877bc7e0e 100644 --- a/src/backend/utils/cache/evtcache.c +++ b/src/backend/utils/cache/evtcache.c @@ -118,7 +118,6 @@ BuildEventTriggerCache(void) EventTriggerCacheState = ETCS_REBUILD_STARTED; /* Create new hash table. */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(EventTriggerEvent); ctl.entrysize = sizeof(EventTriggerCacheEntry); ctl.hcxt = EventTriggerCacheContext; diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 66393becfb..3bd5e18042 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -1607,7 +1607,6 @@ LookupOpclassInfo(Oid operatorClassOid, /* First time through: initialize the opclass cache */ HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(OpClassCacheEnt); OpClassCache = hash_create("Operator class cache", 64, @@ -3775,7 +3774,6 @@ RelationCacheInitialize(void) /* * create hashtable that indexes the relcache */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(RelIdCacheEnt); RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE, diff --git a/src/backend/utils/cache/relfilenodemap.c b/src/backend/utils/cache/relfilenodemap.c index 0dbdbff603..38e6379974 100644 --- a/src/backend/utils/cache/relfilenodemap.c +++ b/src/backend/utils/cache/relfilenodemap.c @@ -110,17 +110,15 @@ InitializeRelfilenodeMap(void) relfilenode_skey[0].sk_attno = Anum_pg_class_reltablespace; relfilenode_skey[1].sk_attno = Anum_pg_class_relfilenode; - /* Initialize the hash table. */ - MemSet(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(RelfilenodeMapKey); - ctl.entrysize = sizeof(RelfilenodeMapEntry); - ctl.hcxt = CacheMemoryContext; - /* * Only create the RelfilenodeMapHash now, so we don't end up partially * initialized when fmgr_info_cxt() above ERRORs out with an out of memory * error. */ + ctl.keysize = sizeof(RelfilenodeMapKey); + ctl.entrysize = sizeof(RelfilenodeMapEntry); + ctl.hcxt = CacheMemoryContext; + RelfilenodeMapHash = hash_create("RelfilenodeMap cache", 64, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c index e0c3c1b1c1..c8387e2541 100644 --- a/src/backend/utils/cache/spccache.c +++ b/src/backend/utils/cache/spccache.c @@ -79,7 +79,6 @@ InitializeTableSpaceCache(void) HASHCTL ctl; /* Initialize the hash table. */ - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(TableSpaceCacheEntry); TableSpaceCacheHash = diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c index f9f7912cb8..a2867fac7d 100644 --- a/src/backend/utils/cache/ts_cache.c +++ b/src/backend/utils/cache/ts_cache.c @@ -117,7 +117,6 @@ lookup_ts_parser_cache(Oid prsId) /* First time through: initialize the hash table */ HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(TSParserCacheEntry); TSParserCacheHash = hash_create("Tsearch parser cache", 4, @@ -215,7 +214,6 @@ lookup_ts_dictionary_cache(Oid dictId) /* First time through: initialize the hash table */ HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(TSDictionaryCacheEntry); TSDictionaryCacheHash = hash_create("Tsearch dictionary cache", 8, @@ -365,7 +363,6 @@ init_ts_config_cache(void) { HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(TSConfigCacheEntry); TSConfigCacheHash = hash_create("Tsearch configuration cache", 16, diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index 5883fde367..1e331098c0 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -341,7 +341,6 @@ lookup_type_cache(Oid type_id, int flags) /* First time through: initialize the hash table */ HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(TypeCacheEntry); TypeCacheHash = hash_create("Type information cache", 64, @@ -1874,7 +1873,6 @@ assign_record_type_typmod(TupleDesc tupDesc) /* First time through: initialize the hash table */ HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(TupleDesc); /* just the pointer */ ctl.entrysize = sizeof(RecordCacheEntry); ctl.hash = record_type_typmod_hash; diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c index bd779fdaf7..adb31e109f 100644 --- a/src/backend/utils/fmgr/dfmgr.c +++ b/src/backend/utils/fmgr/dfmgr.c @@ -680,13 +680,12 @@ find_rendezvous_variable(const char *varName) { HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = NAMEDATALEN; ctl.entrysize = sizeof(rendezvousHashEntry); rendezvousHash = hash_create("Rendezvous variable hash", 16, &ctl, - HASH_ELEM); + HASH_ELEM | HASH_STRINGS); } /* Find or create the hashtable entry for this varName */ diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index 2681b7fbc6..fa5f7ac615 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -565,7 +565,6 @@ record_C_func(HeapTuple procedureTuple, { HASHCTL hash_ctl; - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(CFuncHashTabEntry); CFuncHash = hash_create("CFuncHash", diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index d14d875c93..fbd849b8f7 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -30,11 +30,12 @@ * dynahash.c provides support for these types of lookup keys: * * 1. Null-terminated C strings (truncated if necessary to fit in keysize), - * compared as though by strcmp(). This is the default behavior. + * compared as though by strcmp(). This is selected by specifying the + * HASH_STRINGS flag to hash_create. * * 2. Arbitrary binary data of size keysize, compared as though by memcmp(). * (Caller must ensure there are no undefined padding bits in the keys!) - * This is selected by specifying HASH_BLOBS flag to hash_create. + * This is selected by specifying the HASH_BLOBS flag to hash_create. * * 3. More complex key behavior can be selected by specifying user-supplied * hashing, comparison, and/or key-copying functions. At least a hashing @@ -47,8 +48,8 @@ * locks. * - Shared memory hashes are allocated in a fixed size area at startup and * are discoverable by name from other processes. - * - Because entries don't need to be moved in the case of hash conflicts, has - * better performance for large entries + * - Because entries don't need to be moved in the case of hash conflicts, + * dynahash has better performance for large entries. * - Guarantees stable pointers to entries. * * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group @@ -316,6 +317,28 @@ string_compare(const char *key1, const char *key2, Size keysize) * *info: additional table parameters, as indicated by flags * flags: bitmask indicating which parameters to take from *info * + * The flags value *must* include HASH_ELEM. (Formerly, this was nominally + * optional, but the default keysize and entrysize values were useless.) + * The flags value must also include exactly one of HASH_STRINGS, HASH_BLOBS, + * or HASH_FUNCTION, to define the key hashing semantics (C strings, + * binary blobs, or custom, respectively). Callers specifying a custom + * hash function will likely also want to use HASH_COMPARE, and perhaps + * also HASH_KEYCOPY, to control key comparison and copying. + * Another often-used flag is HASH_CONTEXT, to allocate the hash table + * under info->hcxt rather than under TopMemoryContext; the default + * behavior is only suitable for session-lifespan hash tables. + * Other flags bits are special-purpose and seldom used, except for those + * associated with shared-memory hash tables, for which see ShmemInitHash(). + * + * Fields in *info are read only when the associated flags bit is set. + * It is not necessary to initialize other fields of *info. + * Neither tabname nor *info need persist after the hash_create() call. + * + * Note: It is deprecated for callers of hash_create() to explicitly specify + * string_hash, tag_hash, uint32_hash, or oid_hash. Just set HASH_BLOBS or + * HASH_STRINGS. Use HASH_FUNCTION only when you want something other than + * one of these. + * * Note: for a shared-memory hashtable, nelem needs to be a pretty good * estimate, since we can't expand the table on the fly. But an unshared * hashtable can be expanded on-the-fly, so it's better for nelem to be @@ -323,11 +346,19 @@ string_compare(const char *key1, const char *key2, Size keysize) * large nelem will penalize hash_seq_search speed without buying much. */ HTAB * -hash_create(const char *tabname, long nelem, HASHCTL *info, int flags) +hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags) { HTAB *hashp; HASHHDR *hctl; + /* + * Hash tables now allocate space for key and data, but you have to say + * how much space to allocate. + */ + Assert(flags & HASH_ELEM); + Assert(info->keysize > 0); + Assert(info->entrysize >= info->keysize); + /* * For shared hash tables, we have a local hash header (HTAB struct) that * we allocate in TopMemoryContext; all else is in shared memory. @@ -370,28 +401,43 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags) * Select the appropriate hash function (see comments at head of file). */ if (flags & HASH_FUNCTION) + { + Assert(!(flags & (HASH_BLOBS | HASH_STRINGS))); hashp->hash = info->hash; + } else if (flags & HASH_BLOBS) { + Assert(!(flags & HASH_STRINGS)); /* We can optimize hashing for common key sizes */ - Assert(flags & HASH_ELEM); if (info->keysize == sizeof(uint32)) hashp->hash = uint32_hash; else hashp->hash = tag_hash; } else - hashp->hash = string_hash; /* default hash function */ + { + /* + * string_hash used to be considered the default hash method, and in a + * non-assert build it effectively still is. But we now consider it + * an assertion error to not say HASH_STRINGS explicitly. To help + * catch mistaken usage of HASH_STRINGS, we also insist on a + * reasonably long string length: if the keysize is only 4 or 8 bytes, + * it's almost certainly an integer or pointer not a string. + */ + Assert(flags & HASH_STRINGS); + Assert(info->keysize > 8); + + hashp->hash = string_hash; + } /* * If you don't specify a match function, it defaults to string_compare if - * you used string_hash (either explicitly or by default) and to memcmp - * otherwise. + * you used string_hash, and to memcmp otherwise. * * Note: explicitly specifying string_hash is deprecated, because this * might not work for callers in loadable modules on some platforms due to * referencing a trampoline instead of the string_hash function proper. - * Just let it default, eh? + * Specify HASH_STRINGS instead. */ if (flags & HASH_COMPARE) hashp->match = info->match; @@ -505,16 +551,9 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags) hctl->dsize = info->dsize; } - /* - * hash table now allocates space for key and data but you have to say how - * much space to allocate - */ - if (flags & HASH_ELEM) - { - Assert(info->entrysize >= info->keysize); - hctl->keysize = info->keysize; - hctl->entrysize = info->entrysize; - } + /* remember the entry sizes, too */ + hctl->keysize = info->keysize; + hctl->entrysize = info->entrysize; /* make local copies of heavily-used constant fields */ hashp->keysize = hctl->keysize; @@ -593,10 +632,6 @@ hdefault(HTAB *hashp) hctl->dsize = DEF_DIRSIZE; hctl->nsegs = 0; - /* rather pointless defaults for key & entry size */ - hctl->keysize = sizeof(char *); - hctl->entrysize = 2 * sizeof(char *); - hctl->num_partitions = 0; /* not partitioned */ /* table has no fixed maximum size */ diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index ec6f80ee99..283dfe2d9e 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -119,7 +119,7 @@ EnablePortalManager(void) * create, initially */ PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER, - &ctl, HASH_ELEM); + &ctl, HASH_ELEM | HASH_STRINGS); } /* diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c index 4ee9ef0ffe..9626f98100 100644 --- a/src/backend/utils/time/combocid.c +++ b/src/backend/utils/time/combocid.c @@ -223,7 +223,6 @@ GetComboCommandId(CommandId cmin, CommandId cmax) sizeComboCids = CCID_ARRAY_SIZE; usedComboCids = 0; - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(ComboCidKeyData); hash_ctl.entrysize = sizeof(ComboCidEntryData); hash_ctl.hcxt = TopTransactionContext; diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h index bebf89b3c4..13c6602217 100644 --- a/src/include/utils/hsearch.h +++ b/src/include/utils/hsearch.h @@ -64,25 +64,36 @@ typedef struct HTAB HTAB; /* Only those fields indicated by hash_flags need be set */ typedef struct HASHCTL { + /* Used if HASH_PARTITION flag is set: */ long num_partitions; /* # partitions (must be power of 2) */ + /* Used if HASH_SEGMENT flag is set: */ long ssize; /* segment size */ + /* Used if HASH_DIRSIZE flag is set: */ long dsize; /* (initial) directory size */ long max_dsize; /* limit to dsize if dir size is limited */ + /* Used if HASH_ELEM flag is set (which is now required): */ Size keysize; /* hash key length in bytes */ Size entrysize; /* total user element size in bytes */ + /* Used if HASH_FUNCTION flag is set: */ HashValueFunc hash; /* hash function */ + /* Used if HASH_COMPARE flag is set: */ HashCompareFunc match; /* key comparison function */ + /* Used if HASH_KEYCOPY flag is set: */ HashCopyFunc keycopy; /* key copying function */ + /* Used if HASH_ALLOC flag is set: */ HashAllocFunc alloc; /* memory allocator */ + /* Used if HASH_CONTEXT flag is set: */ MemoryContext hcxt; /* memory context to use for allocations */ + /* Used if HASH_SHARED_MEM flag is set: */ HASHHDR *hctl; /* location of header in shared mem */ } HASHCTL; -/* Flags to indicate which parameters are supplied */ +/* Flag bits for hash_create; most indicate which parameters are supplied */ #define HASH_PARTITION 0x0001 /* Hashtable is used w/partitioned locking */ #define HASH_SEGMENT 0x0002 /* Set segment size */ #define HASH_DIRSIZE 0x0004 /* Set directory size (initial and max) */ -#define HASH_ELEM 0x0010 /* Set keysize and entrysize */ +#define HASH_ELEM 0x0008 /* Set keysize and entrysize (now required!) */ +#define HASH_STRINGS 0x0010 /* Select support functions for string keys */ #define HASH_BLOBS 0x0020 /* Select support functions for binary keys */ #define HASH_FUNCTION 0x0040 /* Set user defined hash function */ #define HASH_COMPARE 0x0080 /* Set user defined comparison function */ @@ -93,7 +104,6 @@ typedef struct HASHCTL #define HASH_ATTACH 0x1000 /* Do not initialize hctl */ #define HASH_FIXED_SIZE 0x2000 /* Initial size is a hard limit */ - /* max_dsize value to indicate expansible directory */ #define NO_MAX_DSIZE (-1) @@ -116,13 +126,9 @@ typedef struct /* * prototypes for functions in dynahash.c - * - * Note: It is deprecated for callers of hash_create to explicitly specify - * string_hash, tag_hash, uint32_hash, or oid_hash. Just set HASH_BLOBS or - * not. Use HASH_FUNCTION only when you want something other than those. */ extern HTAB *hash_create(const char *tabname, long nelem, - HASHCTL *info, int flags); + const HASHCTL *info, int flags); extern void hash_destroy(HTAB *hashp); extern void hash_stats(const char *where, HTAB *hashp); extern void *hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 4de756455d..6299adf71a 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -458,7 +458,6 @@ _PG_init(void) /* * Create hash tables. */ - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(plperl_interp_desc); plperl_interp_hash = hash_create("PL/Perl interpreters", @@ -466,7 +465,6 @@ _PG_init(void) &hash_ctl, HASH_ELEM | HASH_BLOBS); - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(plperl_proc_key); hash_ctl.entrysize = sizeof(plperl_proc_ptr); plperl_proc_hash = hash_create("PL/Perl procedures", @@ -580,13 +578,12 @@ select_perl_context(bool trusted) { HASHCTL hash_ctl; - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = NAMEDATALEN; hash_ctl.entrysize = sizeof(plperl_query_entry); interp_desc->query_hash = hash_create("PL/Perl queries", 32, &hash_ctl, - HASH_ELEM); + HASH_ELEM | HASH_STRINGS); } /* diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index b610b28d70..555da952e1 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -2567,7 +2567,6 @@ plpgsql_HashTableInit(void) /* don't allow double-initialization */ Assert(plpgsql_HashTable == NULL); - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(PLpgSQL_func_hashkey); ctl.entrysize = sizeof(plpgsql_HashEnt); plpgsql_HashTable = hash_create("PLpgSQL function hash", diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index ccbc50fc45..112f6ab0ae 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -4058,7 +4058,6 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, { estate->simple_eval_estate = simple_eval_estate; /* Private cast hash just lives in function's main context */ - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(plpgsql_CastHashKey); ctl.entrysize = sizeof(plpgsql_CastHashEntry); ctl.hcxt = CurrentMemoryContext; @@ -4077,7 +4076,6 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, shared_cast_context = AllocSetContextCreate(TopMemoryContext, "PLpgSQL cast info", ALLOCSET_DEFAULT_SIZES); - memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(plpgsql_CastHashKey); ctl.entrysize = sizeof(plpgsql_CastHashEntry); ctl.hcxt = shared_cast_context; diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c index 7f54d093ac..0365acc95b 100644 --- a/src/pl/plpython/plpy_plpymodule.c +++ b/src/pl/plpython/plpy_plpymodule.c @@ -214,7 +214,6 @@ PLy_add_exceptions(PyObject *plpy) PLy_exc_spi_error = PLy_create_exception("plpy.SPIError", NULL, NULL, "SPIError", plpy); - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(int); hash_ctl.entrysize = sizeof(PLyExceptionEntry); PLy_spi_exceptions = hash_create("PL/Python SPI exceptions", 256, diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c index 1f05c633ef..b7c0b5cebe 100644 --- a/src/pl/plpython/plpy_procedure.c +++ b/src/pl/plpython/plpy_procedure.c @@ -34,7 +34,6 @@ init_procedure_caches(void) { HASHCTL hash_ctl; - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(PLyProcedureKey); hash_ctl.entrysize = sizeof(PLyProcedureEntry); PLy_procedure_cache = hash_create("PL/Python procedures", 32, &hash_ctl, diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index a3a2dc8e89..e11837559d 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -439,7 +439,6 @@ _PG_init(void) /************************************************************ * Create the hash table for working interpreters ************************************************************/ - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(pltcl_interp_desc); pltcl_interp_htab = hash_create("PL/Tcl interpreters", @@ -450,7 +449,6 @@ _PG_init(void) /************************************************************ * Create the hash table for function lookup ************************************************************/ - memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(pltcl_proc_key); hash_ctl.entrysize = sizeof(pltcl_proc_ptr); pltcl_proc_htab = hash_create("PL/Tcl functions", diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c index 3f0fb51e91..4a360f5077 100644 --- a/src/timezone/pgtz.c +++ b/src/timezone/pgtz.c @@ -203,15 +203,13 @@ init_timezone_hashtable(void) { HASHCTL hash_ctl; - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = TZ_STRLEN_MAX + 1; hash_ctl.entrysize = sizeof(pg_tz_cache); timezone_cache = hash_create("Timezones", 4, &hash_ctl, - HASH_ELEM); + HASH_ELEM | HASH_STRINGS); if (!timezone_cache) return false;
pgsql-hackers by date: