More Perl cleanups - Mailing list pgsql-hackers

From Dagfinn Ilmari Mannsåker
Subject More Perl cleanups
Date
Msg-id 87jz8rzf3h.fsf@wibble.ilmari.org
Whole thread Raw
Responses Re: More Perl cleanups
List pgsql-hackers
Hi Hackers,

In some recent-ish commits (ce1b0f9d, fd4c4ede and cc2c9fa6) we
converted a lot of the TAP tests to use long command line options and
fat commas (=>) to separate command line options from their arguments,
so that perltidy does't break lines between them.

However, those patches were nowhere near complete, so here's a follow-up
to fix all the cases I could find.  While I was there I failed to resist
the urge to do some other minor tidy-ups that I think make things
neater, but perltidy has no opinion on. I also eliminated some variables
that were only initialised and then used once.

The second patch might be more controversial: it eliminates unnecessary
quoting from hash keys, both inside curly braces and before fat commas
(except where a hash has a mix of keys that need quoting and not).

- ilmari

From 1b02c5e26a15b12693efcaabb538f220263c4230 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Dagfinn=20Ilmari=20Manns=C3=A5ker?= <ilmari@ilmari.org>
Date: Thu, 30 Jan 2025 12:52:01 +0000
Subject: [PATCH 2/2] perl: remove pointless quotes from hash keys

Both inside curly brackets and before fat commas

Some nominally-unnecessary quotes are left in place where there's a
mix of keys that need and don't need quoting in the same hash.
---
 contrib/auto_explain/t/001_auto_explain.pl    |  2 +-
 contrib/basebackup_to_shell/t/001_basic.pl    |  6 +-
 doc/src/sgml/generate-keywords-table.pl       | 14 +--
 src/backend/catalog/Catalog.pm                | 14 +--
 src/backend/parser/check_keywords.pl          |  8 +-
 src/backend/snowball/snowball_create.pl       |  4 +-
 src/backend/utils/Gen_fmgrtab.pl              |  4 +-
 src/backend/utils/mb/Unicode/UCS_to_most.pl   | 54 +++++-----
 src/bin/pg_basebackup/t/010_pg_basebackup.pl  |  2 +-
 .../t/011_in_place_tablespace.pl              |  2 +-
 src/bin/pg_basebackup/t/020_pg_receivewal.pl  |  8 +-
 src/bin/pg_basebackup/t/030_pg_recvlogical.pl |  4 +-
 src/bin/pg_combinebackup/t/008_promote.pl     |  2 +-
 src/bin/pg_dump/t/002_pg_dump.pl              | 10 +-
 src/bin/pg_upgrade/t/002_pg_upgrade.pl        |  2 +-
 src/bin/pg_verifybackup/t/003_corruption.pl   | 98 +++++++++----------
 src/bin/pg_verifybackup/t/008_untar.pl        | 54 +++++-----
 src/bin/pg_verifybackup/t/009_extract.pl      | 40 ++++----
 src/bin/pg_verifybackup/t/010_client_untar.pl | 68 ++++++-------
 src/interfaces/ecpg/preproc/parse.pl          | 50 +++++-----
 src/interfaces/libpq/t/001_uri.pl             |  2 +-
 src/pl/plperl/plc_perlboot.pl                 |  4 +-
 src/test/authentication/t/001_password.pl     | 22 ++---
 src/test/authentication/t/002_saslprep.pl     |  2 +-
 .../authentication/t/004_file_inclusion.pl    |  8 +-
 src/test/ldap/LdapServer.pm                   |  4 +-
 src/test/ldap/t/001_auth.pl                   | 42 ++++----
 src/test/ldap/t/002_bindpasswd.pl             |  2 +-
 .../t/001_mutated_bindpasswd.pl               |  2 +-
 .../modules/oauth_validator/t/001_server.pl   | 10 +-
 .../modules/oauth_validator/t/002_client.pl   |  6 +-
 .../modules/oauth_validator/t/OAuth/Server.pm | 18 ++--
 src/test/modules/test_pg_dump/t/001_base.pl   |  2 +-
 .../perl/PostgreSQL/Test/AdjustUpgrade.pm     |  2 +-
 .../perl/PostgreSQL/Test/BackgroundPsql.pm    | 10 +-
 src/test/perl/PostgreSQL/Test/Cluster.pm      | 18 ++--
 src/test/perl/PostgreSQL/Test/Kerberos.pm     |  6 +-
 src/test/perl/PostgreSQL/Test/Utils.pm        |  4 +-
 .../postmaster/t/002_connection_limits.pl     |  2 +-
 src/test/recovery/t/001_stream_rep.pl         |  2 +-
 src/test/recovery/t/006_logical_decoding.pl   |  4 +-
 .../t/010_logical_decoding_timelines.pl       | 10 +-
 src/test/recovery/t/021_row_visibility.pl     |  2 +-
 src/test/recovery/t/032_relfilenode_reuse.pl  |  2 +-
 .../t/035_standby_logical_decoding.pl         | 12 +--
 src/test/ssl/t/002_scram.pl                   |  4 +-
 src/test/ssl/t/003_sslinfo.pl                 |  6 +-
 src/test/subscription/t/027_nosuperuser.pl    |  6 +-
 src/tools/win32tzlist.pl                      | 14 +--
 49 files changed, 337 insertions(+), 337 deletions(-)

diff --git a/contrib/auto_explain/t/001_auto_explain.pl b/contrib/auto_explain/t/001_auto_explain.pl
index 80c0c19af58..25252604b7d 100644
--- a/contrib/auto_explain/t/001_auto_explain.pl
+++ b/contrib/auto_explain/t/001_auto_explain.pl
@@ -28,7 +28,7 @@ sub query_log
 }
 
 my $node = PostgreSQL::Test::Cluster->new('main');
-$node->init('auth_extra' => [ '--create-role' => 'regress_user1' ]);
+$node->init(auth_extra => [ '--create-role' => 'regress_user1' ]);
 $node->append_conf('postgresql.conf',
     "session_preload_libraries = 'auto_explain'");
 $node->append_conf('postgresql.conf', "auto_explain.log_min_duration = 0");
diff --git a/contrib/basebackup_to_shell/t/001_basic.pl b/contrib/basebackup_to_shell/t/001_basic.pl
index 3ee4603bd3a..68ba69e034b 100644
--- a/contrib/basebackup_to_shell/t/001_basic.pl
+++ b/contrib/basebackup_to_shell/t/001_basic.pl
@@ -10,7 +10,7 @@ use Test::More;
 # For testing purposes, we just want basebackup_to_shell to write standard
 # input to a file.  However, Windows doesn't have "cat" or any equivalent, so
 # we use "gzip" for this purpose.
-my $gzip = $ENV{'GZIP_PROGRAM'};
+my $gzip = $ENV{GZIP_PROGRAM};
 if (!defined $gzip || $gzip eq '')
 {
     plan skip_all => 'gzip not available';
@@ -24,8 +24,8 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
 # Make sure pg_hba.conf is set up to allow connections from backupuser.
 # This is only needed on Windows machines that don't use UNIX sockets.
 $node->init(
-    'allows_streaming' => 1,
-    'auth_extra' => [ '--create-role' => 'backupuser' ]);
+    allows_streaming => 1,
+    auth_extra => [ '--create-role' => 'backupuser' ]);
 
 $node->append_conf('postgresql.conf',
     "shared_preload_libraries = 'basebackup_to_shell'");
diff --git a/doc/src/sgml/generate-keywords-table.pl b/doc/src/sgml/generate-keywords-table.pl
index 76c4689872f..2b3ee8793c6 100644
--- a/doc/src/sgml/generate-keywords-table.pl
+++ b/doc/src/sgml/generate-keywords-table.pl
@@ -43,7 +43,7 @@ while (<$fh>)
 {
     if (/^PG_KEYWORD\("(\w+)", \w+, (\w+)_KEYWORD\, (\w+)\)/)
     {
-        $keywords{ uc $1 }{'pg'}{ lc $2 } = 1;
+        $keywords{ uc $1 }{pg}{ lc $2 } = 1;
         $as_keywords{ uc $1 } = 1 if $3 eq 'AS_LABEL';
     }
 }
@@ -94,19 +94,19 @@ foreach my $word (sort keys %keywords)
     print "    <entry><token>$printword</token></entry>\n";
 
     print "    <entry>";
-    if ($keywords{$word}{pg}{'unreserved'})
+    if ($keywords{$word}{pg}{unreserved})
     {
         print "non-reserved";
     }
-    elsif ($keywords{$word}{pg}{'col_name'})
+    elsif ($keywords{$word}{pg}{col_name})
     {
         print "non-reserved (cannot be function or type)";
     }
-    elsif ($keywords{$word}{pg}{'type_func_name'})
+    elsif ($keywords{$word}{pg}{type_func_name})
     {
         print "reserved (can be function or type)";
     }
-    elsif ($keywords{$word}{pg}{'reserved'})
+    elsif ($keywords{$word}{pg}{reserved})
     {
         print "reserved";
     }
@@ -119,11 +119,11 @@ foreach my $word (sort keys %keywords)
     foreach my $ver (@sql_versions)
     {
         print "    <entry>";
-        if ($keywords{$word}{$ver}{'reserved'})
+        if ($keywords{$word}{$ver}{reserved})
         {
             print "reserved";
         }
-        elsif ($keywords{$word}{$ver}{'nonreserved'})
+        elsif ($keywords{$word}{$ver}{nonreserved})
         {
             print "non-reserved";
         }
diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm
index 5a912549b82..527d9af38ef 100644
--- a/src/backend/catalog/Catalog.pm
+++ b/src/backend/catalog/Catalog.pm
@@ -28,13 +28,13 @@ sub ParseHeader
     # There are a few types which are given one name in the C source, but a
     # different name at the SQL level.  These are enumerated here.
     my %RENAME_ATTTYPE = (
-        'int16' => 'int2',
-        'int32' => 'int4',
-        'int64' => 'int8',
-        'Oid' => 'oid',
-        'NameData' => 'name',
-        'TransactionId' => 'xid',
-        'XLogRecPtr' => 'pg_lsn');
+        int16 => 'int2',
+        int32 => 'int4',
+        int64 => 'int8',
+        Oid => 'oid',
+        NameData => 'name',
+        TransactionId => 'xid',
+        XLogRecPtr => 'pg_lsn');
 
     my %catalog;
     my $declaring_attributes = 0;
diff --git a/src/backend/parser/check_keywords.pl b/src/backend/parser/check_keywords.pl
index 2f25b2a1071..177bc5d99e6 100644
--- a/src/backend/parser/check_keywords.pl
+++ b/src/backend/parser/check_keywords.pl
@@ -47,10 +47,10 @@ $, = ' ';     # set output field separator
 $\ = "\n";    # set output record separator
 
 my %keyword_categories;
-$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD';
-$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD';
-$keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD';
-$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD';
+$keyword_categories{unreserved_keyword} = 'UNRESERVED_KEYWORD';
+$keyword_categories{col_name_keyword} = 'COL_NAME_KEYWORD';
+$keyword_categories{type_func_name_keyword} = 'TYPE_FUNC_NAME_KEYWORD';
+$keyword_categories{reserved_keyword} = 'RESERVED_KEYWORD';
 
 open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename");
 
diff --git a/src/backend/snowball/snowball_create.pl b/src/backend/snowball/snowball_create.pl
index dffa8feb769..1f25baa83af 100644
--- a/src/backend/snowball/snowball_create.pl
+++ b/src/backend/snowball/snowball_create.pl
@@ -50,8 +50,8 @@ our @languages = qw(
 # @languages.
 
 our %ascii_languages = (
-    'hindi' => 'english',
-    'russian' => 'english',);
+    hindi => 'english',
+    russian => 'english',);
 
 GetOptions(
     'depfile' => \$depfile,
diff --git a/src/backend/utils/Gen_fmgrtab.pl b/src/backend/utils/Gen_fmgrtab.pl
index 247e1c6ab4c..c4db6480674 100644
--- a/src/backend/utils/Gen_fmgrtab.pl
+++ b/src/backend/utils/Gen_fmgrtab.pl
@@ -208,8 +208,8 @@ foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
 # Create the fmgr_builtins table, collect data for fmgr_builtin_oid_index
 print $tfh "\nconst FmgrBuiltin fmgr_builtins[] = {\n";
 my %bmap;
-$bmap{'t'} = 'true';
-$bmap{'f'} = 'false';
+$bmap{t} = 'true';
+$bmap{f} = 'false';
 my @fmgr_builtin_oid_index;
 my $last_builtin_oid = 0;
 my $fmgr_count = 0;
diff --git a/src/backend/utils/mb/Unicode/UCS_to_most.pl b/src/backend/utils/mb/Unicode/UCS_to_most.pl
index b0009692521..4470ed9cd38 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_most.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_most.pl
@@ -23,33 +23,33 @@ use convutils;
 my $this_script = 'src/backend/utils/mb/Unicode/UCS_to_most.pl';
 
 my %filename = (
-    'WIN866' => 'CP866.TXT',
-    'WIN874' => 'CP874.TXT',
-    'WIN1250' => 'CP1250.TXT',
-    'WIN1251' => 'CP1251.TXT',
-    'WIN1252' => 'CP1252.TXT',
-    'WIN1253' => 'CP1253.TXT',
-    'WIN1254' => 'CP1254.TXT',
-    'WIN1255' => 'CP1255.TXT',
-    'WIN1256' => 'CP1256.TXT',
-    'WIN1257' => 'CP1257.TXT',
-    'WIN1258' => 'CP1258.TXT',
-    'ISO8859_2' => '8859-2.TXT',
-    'ISO8859_3' => '8859-3.TXT',
-    'ISO8859_4' => '8859-4.TXT',
-    'ISO8859_5' => '8859-5.TXT',
-    'ISO8859_6' => '8859-6.TXT',
-    'ISO8859_7' => '8859-7.TXT',
-    'ISO8859_8' => '8859-8.TXT',
-    'ISO8859_9' => '8859-9.TXT',
-    'ISO8859_10' => '8859-10.TXT',
-    'ISO8859_13' => '8859-13.TXT',
-    'ISO8859_14' => '8859-14.TXT',
-    'ISO8859_15' => '8859-15.TXT',
-    'ISO8859_16' => '8859-16.TXT',
-    'KOI8R' => 'KOI8-R.TXT',
-    'KOI8U' => 'KOI8-U.TXT',
-    'GBK' => 'CP936.TXT');
+    WIN866 => 'CP866.TXT',
+    WIN874 => 'CP874.TXT',
+    WIN1250 => 'CP1250.TXT',
+    WIN1251 => 'CP1251.TXT',
+    WIN1252 => 'CP1252.TXT',
+    WIN1253 => 'CP1253.TXT',
+    WIN1254 => 'CP1254.TXT',
+    WIN1255 => 'CP1255.TXT',
+    WIN1256 => 'CP1256.TXT',
+    WIN1257 => 'CP1257.TXT',
+    WIN1258 => 'CP1258.TXT',
+    ISO8859_2 => '8859-2.TXT',
+    ISO8859_3 => '8859-3.TXT',
+    ISO8859_4 => '8859-4.TXT',
+    ISO8859_5 => '8859-5.TXT',
+    ISO8859_6 => '8859-6.TXT',
+    ISO8859_7 => '8859-7.TXT',
+    ISO8859_8 => '8859-8.TXT',
+    ISO8859_9 => '8859-9.TXT',
+    ISO8859_10 => '8859-10.TXT',
+    ISO8859_13 => '8859-13.TXT',
+    ISO8859_14 => '8859-14.TXT',
+    ISO8859_15 => '8859-15.TXT',
+    ISO8859_16 => '8859-16.TXT',
+    KOI8R => 'KOI8-R.TXT',
+    KOI8U => 'KOI8-U.TXT',
+    GBK => 'CP936.TXT');
 
 # make maps for all encodings if not specified
 my @charsets = (scalar(@ARGV) > 0) ? @ARGV : sort keys(%filename);
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 89ff26b6314..7cdd4442755 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -470,7 +470,7 @@ SKIP:
     $node2->init_from_backup(
         $node, 'tarbackup2',
         tar_program => $tar,
-        'tablespace_map' => { $tblspcoid => $realRepTsDir });
+        tablespace_map => { $tblspcoid => $realRepTsDir });
 
     $node2->start;
     my $result = $node2->safe_psql('postgres', 'SELECT * FROM test1');
diff --git a/src/bin/pg_basebackup/t/011_in_place_tablespace.pl b/src/bin/pg_basebackup/t/011_in_place_tablespace.pl
index 9e53dada4fa..ec942e54eee 100644
--- a/src/bin/pg_basebackup/t/011_in_place_tablespace.pl
+++ b/src/bin/pg_basebackup/t/011_in_place_tablespace.pl
@@ -17,7 +17,7 @@ my @pg_basebackup_defs =
 
 # Set up an instance.
 my $node = PostgreSQL::Test::Cluster->new('main');
-$node->init('allows_streaming' => 1);
+$node->init(allows_streaming => 1);
 $node->start();
 
 # Create an in-place tablespace.
diff --git a/src/bin/pg_basebackup/t/020_pg_receivewal.pl b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
index 4be96affd7b..499b6e5d298 100644
--- a/src/bin/pg_basebackup/t/020_pg_receivewal.pl
+++ b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
@@ -58,12 +58,12 @@ $primary->command_ok(
     [ 'pg_receivewal', '--slot' => $slot_name, '--create-slot' ],
     'creating a replication slot');
 my $slot = $primary->slot($slot_name);
-is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
-is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
+is($slot->{slot_type}, 'physical', 'physical replication slot was created');
+is($slot->{restart_lsn}, '', 'restart LSN of new slot is null');
 $primary->command_ok(
     [ 'pg_receivewal', '--slot' => $slot_name, '--drop-slot' ],
     'dropping a replication slot');
-is($primary->slot($slot_name)->{'slot_type'},
+is($primary->slot($slot_name)->{slot_type},
     '', 'replication slot was removed');
 
 # Generate some WAL.  Use --synchronous at the same time to add more
@@ -318,7 +318,7 @@ $primary->wait_for_catchup($standby);
 # Get a walfilename from before the promotion to make sure it is archived
 # after promotion
 my $standby_slot = $standby->slot($archive_slot);
-my $replication_slot_lsn = $standby_slot->{'restart_lsn'};
+my $replication_slot_lsn = $standby_slot->{restart_lsn};
 
 # pg_walfile_name() is not supported while in recovery, so use the primary
 # to build the segment name.  Both nodes are on the same timeline, so this
diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
index a6e10600161..c5b165cfe13 100644
--- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
+++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
@@ -53,7 +53,7 @@ $node->command_ok(
     'slot created');
 
 my $slot = $node->slot('test');
-isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
+isnt($slot->{restart_lsn}, '', 'restart lsn is defined for new slot');
 
 $node->psql('postgres', 'CREATE TABLE test_table(x integer)');
 $node->psql('postgres',
@@ -95,7 +95,7 @@ $node->command_ok(
     'slot with two-phase created');
 
 $slot = $node->slot('test');
-isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
+isnt($slot->{restart_lsn}, '', 'restart lsn is defined for new slot');
 
 $node->safe_psql('postgres',
     "BEGIN; INSERT INTO test_table values (11); PREPARE TRANSACTION 'test'");
diff --git a/src/bin/pg_combinebackup/t/008_promote.pl b/src/bin/pg_combinebackup/t/008_promote.pl
index 732f6397103..3a15983f4a1 100644
--- a/src/bin/pg_combinebackup/t/008_promote.pl
+++ b/src/bin/pg_combinebackup/t/008_promote.pl
@@ -52,7 +52,7 @@ EOM
 # then stop recovery at some arbitrary LSN, not just when it hits the end of
 # WAL, so use a recovery target.
 my $node2 = PostgreSQL::Test::Cluster->new('node2');
-$node2->init_from_backup($node1, 'backup1', 'has_streaming' => 1);
+$node2->init_from_backup($node1, 'backup1', has_streaming => 1);
 $node2->append_conf('postgresql.conf', <<EOM);
 recovery_target_lsn = '$lsn'
 recovery_target_action = 'pause'
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index c7bffc1b045..03c2d50740c 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -119,7 +119,7 @@ my %pgdump_runs = (
         # Give coverage for manually compressed blobs.toc files during
         # restore.
         compress_cmd => {
-            program => $ENV{'GZIP_PROGRAM'},
+            program => $ENV{GZIP_PROGRAM},
             args => [ '-f', "$tempdir/compression_gzip_dir/blobs_*.toc", ],
         },
         # Verify that only data files were compressed
@@ -147,7 +147,7 @@ my %pgdump_runs = (
         ],
         # Decompress the generated file to run through the tests.
         compress_cmd => {
-            program => $ENV{'GZIP_PROGRAM'},
+            program => $ENV{GZIP_PROGRAM},
             args => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ],
         },
     },
@@ -215,7 +215,7 @@ my %pgdump_runs = (
         ],
         # Decompress the generated file to run through the tests.
         compress_cmd => {
-            program => $ENV{'LZ4'},
+            program => $ENV{LZ4},
             args => [
                 '-d', '-f',
                 "$tempdir/compression_lz4_plain.sql.lz4",
@@ -263,7 +263,7 @@ my %pgdump_runs = (
         # Give coverage for manually compressed blobs.toc files during
         # restore.
         compress_cmd => {
-            program => $ENV{'ZSTD'},
+            program => $ENV{ZSTD},
             args => [
                 '-z', '-f',
                 '--rm', "$tempdir/compression_zstd_dir/blobs_*.toc",
@@ -295,7 +295,7 @@ my %pgdump_runs = (
         ],
         # Decompress the generated file to run through the tests.
         compress_cmd => {
-            program => $ENV{'ZSTD'},
+            program => $ENV{ZSTD},
             args => [
                 '-d', '-f',
                 "$tempdir/compression_zstd_plain.sql.zst", "-o",
diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
index 00051b85035..9438e407bc9 100644
--- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl
+++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
@@ -164,7 +164,7 @@ push @initdb_params, ('--lc-collate', $original_datcollate);
 push @initdb_params, ('--lc-ctype', $original_datctype);
 
 # add --locale-provider, if supported
-my %provider_name = ('b' => 'builtin', 'i' => 'icu', 'c' => 'libc');
+my %provider_name = (b => 'builtin', i => 'icu', c => 'libc');
 if ($oldnode->pg_version >= 15)
 {
     push @initdb_params,
diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl
index 84f23b8bc3d..2f280905a22 100644
--- a/src/bin/pg_verifybackup/t/003_corruption.pl
+++ b/src/bin/pg_verifybackup/t/003_corruption.pl
@@ -34,86 +34,86 @@ EOM
 
 my @scenario = (
     {
-        'name' => 'extra_file',
-        'mutilate' => \&mutilate_extra_file,
-        'fails_like' =>
+        name => 'extra_file',
+        mutilate => \&mutilate_extra_file,
+        fails_like =>
           qr/extra_file.*present (on disk|in "[^"]+") but not in the manifest/
     },
     {
-        'name' => 'extra_tablespace_file',
-        'mutilate' => \&mutilate_extra_tablespace_file,
-        'fails_like' =>
+        name => 'extra_tablespace_file',
+        mutilate => \&mutilate_extra_tablespace_file,
+        fails_like =>
           qr/extra_ts_file.*present (on disk|in "[^"]+") but not in the manifest/
     },
     {
-        'name' => 'missing_file',
-        'mutilate' => \&mutilate_missing_file,
-        'fails_like' =>
+        name => 'missing_file',
+        mutilate => \&mutilate_missing_file,
+        fails_like =>
           qr/pg_xact\/0000.*present in the manifest but not (on disk|in "[^"]+")/
     },
     {
-        'name' => 'missing_tablespace',
-        'mutilate' => \&mutilate_missing_tablespace,
-        'fails_like' =>
+        name => 'missing_tablespace',
+        mutilate => \&mutilate_missing_tablespace,
+        fails_like =>
           qr/pg_tblspc.*present in the manifest but not (on disk|in "[^"]+")/
     },
     {
-        'name' => 'append_to_file',
-        'mutilate' => \&mutilate_append_to_file,
-        'fails_like' =>
+        name => 'append_to_file',
+        mutilate => \&mutilate_append_to_file,
+        fails_like =>
           qr/has size \d+ (on disk|in "[^"]+") but size \d+ in the manifest/
     },
     {
-        'name' => 'truncate_file',
-        'mutilate' => \&mutilate_truncate_file,
-        'fails_like' =>
+        name => 'truncate_file',
+        mutilate => \&mutilate_truncate_file,
+        fails_like =>
           qr/has size 0 (on disk|in "[^"]+") but size \d+ in the manifest/
     },
     {
-        'name' => 'replace_file',
-        'mutilate' => \&mutilate_replace_file,
-        'fails_like' => qr/checksum mismatch for file/
+        name => 'replace_file',
+        mutilate => \&mutilate_replace_file,
+        fails_like => qr/checksum mismatch for file/
     },
     {
-        'name' => 'system_identifier',
-        'mutilate' => \&mutilate_system_identifier,
-        'fails_like' =>
+        name => 'system_identifier',
+        mutilate => \&mutilate_system_identifier,
+        fails_like =>
           qr/manifest system identifier is .*, but control file has/
     },
     {
-        'name' => 'bad_manifest',
-        'mutilate' => \&mutilate_bad_manifest,
-        'fails_like' => qr/manifest checksum mismatch/
+        name => 'bad_manifest',
+        mutilate => \&mutilate_bad_manifest,
+        fails_like => qr/manifest checksum mismatch/
     },
     {
-        'name' => 'open_file_fails',
-        'mutilate' => \&mutilate_open_file_fails,
-        'fails_like' => qr/could not open file/,
-        'needs_unix_permissions' => 1
+        name => 'open_file_fails',
+        mutilate => \&mutilate_open_file_fails,
+        fails_like => qr/could not open file/,
+        needs_unix_permissions => 1
     },
     {
-        'name' => 'open_directory_fails',
-        'mutilate' => \&mutilate_open_directory_fails,
-        'cleanup' => \&cleanup_open_directory_fails,
-        'fails_like' => qr/could not open directory/,
-        'needs_unix_permissions' => 1
+        name => 'open_directory_fails',
+        mutilate => \&mutilate_open_directory_fails,
+        cleanup => \&cleanup_open_directory_fails,
+        fails_like => qr/could not open directory/,
+        needs_unix_permissions => 1
     },
     {
-        'name' => 'search_directory_fails',
-        'mutilate' => \&mutilate_search_directory_fails,
-        'cleanup' => \&cleanup_search_directory_fails,
-        'fails_like' => qr/could not stat file or directory/,
-        'needs_unix_permissions' => 1
+        name => 'search_directory_fails',
+        mutilate => \&mutilate_search_directory_fails,
+        cleanup => \&cleanup_search_directory_fails,
+        fails_like => qr/could not stat file or directory/,
+        needs_unix_permissions => 1
     });
 
 for my $scenario (@scenario)
 {
-    my $name = $scenario->{'name'};
+    my $name = $scenario->{name};
 
   SKIP:
     {
         skip "unix-style permissions not supported on Windows", 4
-          if ($scenario->{'needs_unix_permissions'}
+          if ($scenario->{needs_unix_permissions}
             && ($windows_os || $Config::Config{osname} eq 'cygwin'));
 
         # Take a backup and check that it verifies OK.
@@ -137,23 +137,23 @@ for my $scenario (@scenario)
             "intact backup verified");
 
         # Mutilate the backup in some way.
-        $scenario->{'mutilate'}->($backup_path);
+        $scenario->{mutilate}->($backup_path);
 
         # Now check that the backup no longer verifies.
         command_fails_like(
             [ 'pg_verifybackup', $backup_path ],
-            $scenario->{'fails_like'},
+            $scenario->{fails_like},
             "corrupt backup fails verification: $name");
 
         # Run cleanup hook, if provided.
-        $scenario->{'cleanup'}->($backup_path)
-          if exists $scenario->{'cleanup'};
+        $scenario->{cleanup}->($backup_path)
+          if exists $scenario->{cleanup};
 
         # Turn it into a tar-format backup and see if we can still detect the
         # same problem, unless the scenario needs UNIX permissions or we don't
         # have a TAR program available. Note that this destructively modifies
         # the backup directory.
-        if (   !$scenario->{'needs_unix_permissions'}
+        if (   !$scenario->{needs_unix_permissions}
             || !defined $tar
             || $tar eq '')
         {
@@ -197,7 +197,7 @@ for my $scenario (@scenario)
             # here, because pg_waldump can't yet read WAL from a tarfile.
             command_fails_like(
                 [ 'pg_verifybackup', '--no-parse-wal', $tar_backup_path ],
-                $scenario->{'fails_like'},
+                $scenario->{fails_like},
                 "corrupt backup fails verification: $name");
 
             # Use rmtree to reclaim space.
diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl
index deed3ec247d..a98a28424fe 100644
--- a/src/bin/pg_verifybackup/t/008_untar.pl
+++ b/src/bin/pg_verifybackup/t/008_untar.pl
@@ -35,48 +35,48 @@ my $extract_path = $primary->backup_dir . '/extracted-backup';
 
 my @test_configuration = (
     {
-        'compression_method' => 'none',
-        'backup_flags' => [],
-        'backup_archive' => [ 'base.tar', "$tsoid.tar" ],
-        'enabled' => 1
+        compression_method => 'none',
+        backup_flags => [],
+        backup_archive => [ 'base.tar', "$tsoid.tar" ],
+        enabled => 1
     },
     {
-        'compression_method' => 'gzip',
-        'backup_flags' => [ '--compress', 'server-gzip' ],
-        'backup_archive' => [ 'base.tar.gz', "$tsoid.tar.gz" ],
-        'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+        compression_method => 'gzip',
+        backup_flags => [ '--compress', 'server-gzip' ],
+        backup_archive => [ 'base.tar.gz', "$tsoid.tar.gz" ],
+        enabled => check_pg_config("#define HAVE_LIBZ 1")
     },
     {
-        'compression_method' => 'lz4',
-        'backup_flags' => [ '--compress', 'server-lz4' ],
-        'backup_archive' => [ 'base.tar.lz4', "$tsoid.tar.lz4" ],
-        'enabled' => check_pg_config("#define USE_LZ4 1")
+        compression_method => 'lz4',
+        backup_flags => [ '--compress', 'server-lz4' ],
+        backup_archive => [ 'base.tar.lz4', "$tsoid.tar.lz4" ],
+        enabled => check_pg_config("#define USE_LZ4 1")
     },
     {
-        'compression_method' => 'zstd',
-        'backup_flags' => [ '--compress', 'server-zstd' ],
-        'backup_archive' => [ 'base.tar.zst', "$tsoid.tar.zst" ],
-        'enabled' => check_pg_config("#define USE_ZSTD 1")
+        compression_method => 'zstd',
+        backup_flags => [ '--compress', 'server-zstd' ],
+        backup_archive => [ 'base.tar.zst', "$tsoid.tar.zst" ],
+        enabled => check_pg_config("#define USE_ZSTD 1")
     },
     {
-        'compression_method' => 'zstd',
-        'backup_flags' => [ '--compress', 'server-zstd:level=1,long' ],
-        'backup_archive' => [ 'base.tar.zst', "$tsoid.tar.zst" ],
-        'enabled' => check_pg_config("#define USE_ZSTD 1")
+        compression_method => 'zstd',
+        backup_flags => [ '--compress', 'server-zstd:level=1,long' ],
+        backup_archive => [ 'base.tar.zst', "$tsoid.tar.zst" ],
+        enabled => check_pg_config("#define USE_ZSTD 1")
     });
 
 for my $tc (@test_configuration)
 {
-    my $method = $tc->{'compression_method'};
+    my $method = $tc->{compression_method};
 
   SKIP:
     {
         skip "$method compression not supported by this build", 3
-          if !$tc->{'enabled'};
+          if !$tc->{enabled};
         skip "no decompressor available for $method", 3
-          if exists $tc->{'decompress_program'}
-          && (!defined $tc->{'decompress_program'}
-            || $tc->{'decompress_program'} eq '');
+          if exists $tc->{decompress_program}
+          && (!defined $tc->{decompress_program}
+            || $tc->{decompress_program} eq '');
 
         # Take a server-side backup.
         $primary->command_ok(
@@ -85,7 +85,7 @@ for my $tc (@test_configuration)
                 '--checkpoint' => 'fast',
                 '--target' => "server:$backup_path",
                 '--wal-method' => 'fetch',
-                @{ $tc->{'backup_flags'} },
+                @{ $tc->{backup_flags} },
             ],
             "server side backup, compression $method");
 
@@ -94,7 +94,7 @@ for my $tc (@test_configuration)
         my $backup_files = join(',',
             sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
         my $expected_backup_files =
-          join(',', sort ('backup_manifest', @{ $tc->{'backup_archive'} }));
+          join(',', sort ('backup_manifest', @{ $tc->{backup_archive} }));
         is($backup_files, $expected_backup_files,
             "found expected backup files, compression $method");
 
diff --git a/src/bin/pg_verifybackup/t/009_extract.pl b/src/bin/pg_verifybackup/t/009_extract.pl
index 25605291217..04d378415d3 100644
--- a/src/bin/pg_verifybackup/t/009_extract.pl
+++ b/src/bin/pg_verifybackup/t/009_extract.pl
@@ -16,42 +16,42 @@ $primary->start;
 
 my @test_configuration = (
     {
-        'compression_method' => 'none',
-        'backup_flags' => [],
-        'enabled' => 1
+        compression_method => 'none',
+        backup_flags => [],
+        enabled => 1
     },
     {
-        'compression_method' => 'gzip',
-        'backup_flags' => [ '--compress', 'server-gzip:5' ],
-        'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+        compression_method => 'gzip',
+        backup_flags => [ '--compress', 'server-gzip:5' ],
+        enabled => check_pg_config("#define HAVE_LIBZ 1")
     },
     {
-        'compression_method' => 'lz4',
-        'backup_flags' => [ '--compress', 'server-lz4:5' ],
-        'enabled' => check_pg_config("#define USE_LZ4 1")
+        compression_method => 'lz4',
+        backup_flags => [ '--compress', 'server-lz4:5' ],
+        enabled => check_pg_config("#define USE_LZ4 1")
     },
     {
-        'compression_method' => 'zstd',
-        'backup_flags' => [ '--compress', 'server-zstd:5' ],
-        'enabled' => check_pg_config("#define USE_ZSTD 1")
+        compression_method => 'zstd',
+        backup_flags => [ '--compress', 'server-zstd:5' ],
+        enabled => check_pg_config("#define USE_ZSTD 1")
     },
     {
-        'compression_method' => 'parallel zstd',
-        'backup_flags' => [ '--compress', 'server-zstd:workers=3' ],
-        'enabled' => check_pg_config("#define USE_ZSTD 1"),
-        'possibly_unsupported' =>
+        compression_method => 'parallel zstd',
+        backup_flags => [ '--compress', 'server-zstd:workers=3' ],
+        enabled => check_pg_config("#define USE_ZSTD 1"),
+        possibly_unsupported =>
           qr/could not set compression worker count to 3: Unsupported parameter/
     });
 
 for my $tc (@test_configuration)
 {
     my $backup_path = $primary->backup_dir . '/' . 'extract_backup';
-    my $method = $tc->{'compression_method'};
+    my $method = $tc->{compression_method};
 
   SKIP:
     {
         skip "$method compression not supported by this build", 2
-          if !$tc->{'enabled'};
+          if !$tc->{enabled};
 
         # A backup with a valid compression method should work.
         my $backup_stdout = '';
@@ -77,8 +77,8 @@ for my $tc (@test_configuration)
             print "# standard error was:\n$backup_stderr";
         }
         if (  !$backup_result
-            && $tc->{'possibly_unsupported'}
-            && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+            && $tc->{possibly_unsupported}
+            && $backup_stderr =~ /$tc->{possibly_unsupported}/)
         {
             skip "compression with $method not supported by this build", 2;
         }
diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl
index d8d2b06c7ee..acc3dfdfe20 100644
--- a/src/bin/pg_verifybackup/t/010_client_untar.pl
+++ b/src/bin/pg_verifybackup/t/010_client_untar.pl
@@ -20,56 +20,56 @@ my $extract_path = $primary->backup_dir . '/extracted-backup';
 
 my @test_configuration = (
     {
-        'compression_method' => 'none',
-        'backup_flags' => [],
-        'backup_archive' => 'base.tar',
-        'enabled' => 1
+        compression_method => 'none',
+        backup_flags => [],
+        backup_archive => 'base.tar',
+        enabled => 1
     },
     {
-        'compression_method' => 'gzip',
-        'backup_flags' => [ '--compress', 'client-gzip:5' ],
-        'backup_archive' => 'base.tar.gz',
-        'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+        compression_method => 'gzip',
+        backup_flags => [ '--compress', 'client-gzip:5' ],
+        backup_archive => 'base.tar.gz',
+        enabled => check_pg_config("#define HAVE_LIBZ 1")
     },
     {
-        'compression_method' => 'lz4',
-        'backup_flags' => [ '--compress', 'client-lz4:5' ],
-        'backup_archive' => 'base.tar.lz4',
-        'enabled' => check_pg_config("#define USE_LZ4 1")
+        compression_method => 'lz4',
+        backup_flags => [ '--compress', 'client-lz4:5' ],
+        backup_archive => 'base.tar.lz4',
+        enabled => check_pg_config("#define USE_LZ4 1")
     },
     {
-        'compression_method' => 'zstd',
-        'backup_flags' => [ '--compress', 'client-zstd:5' ],
-        'backup_archive' => 'base.tar.zst',
-        'enabled' => check_pg_config("#define USE_ZSTD 1")
+        compression_method => 'zstd',
+        backup_flags => [ '--compress', 'client-zstd:5' ],
+        backup_archive => 'base.tar.zst',
+        enabled => check_pg_config("#define USE_ZSTD 1")
     },
     {
-        'compression_method' => 'zstd',
-        'backup_flags' => [ '--compress', 'client-zstd:level=1,long' ],
-        'backup_archive' => 'base.tar.zst',
-        'enabled' => check_pg_config("#define USE_ZSTD 1")
+        compression_method => 'zstd',
+        backup_flags => [ '--compress', 'client-zstd:level=1,long' ],
+        backup_archive => 'base.tar.zst',
+        enabled => check_pg_config("#define USE_ZSTD 1")
     },
     {
-        'compression_method' => 'parallel zstd',
-        'backup_flags' => [ '--compress', 'client-zstd:workers=3' ],
-        'backup_archive' => 'base.tar.zst',
-        'enabled' => check_pg_config("#define USE_ZSTD 1"),
-        'possibly_unsupported' =>
+        compression_method => 'parallel zstd',
+        backup_flags => [ '--compress', 'client-zstd:workers=3' ],
+        backup_archive => 'base.tar.zst',
+        enabled => check_pg_config("#define USE_ZSTD 1"),
+        possibly_unsupported =>
           qr/could not set compression worker count to 3: Unsupported parameter/
     });
 
 for my $tc (@test_configuration)
 {
-    my $method = $tc->{'compression_method'};
+    my $method = $tc->{compression_method};
 
   SKIP:
     {
         skip "$method compression not supported by this build", 3
-          if !$tc->{'enabled'};
+          if !$tc->{enabled};
         skip "no decompressor available for $method", 3
-          if exists $tc->{'decompress_program'}
-          && (!defined $tc->{'decompress_program'}
-            || $tc->{'decompress_program'} eq '');
+          if exists $tc->{decompress_program}
+          && (!defined $tc->{decompress_program}
+            || $tc->{decompress_program} eq '');
 
         # Take a client-side backup.
         my $backup_stdout = '';
@@ -81,7 +81,7 @@ for my $tc (@test_configuration)
                 '--wal-method' => 'fetch',
                 '--checkpoint' => 'fast',
                 '--format' => 'tar',
-                @{ $tc->{'backup_flags'} }
+                @{ $tc->{backup_flags} }
             ],
             '>' => \$backup_stdout,
             '2>' => \$backup_stderr);
@@ -94,8 +94,8 @@ for my $tc (@test_configuration)
             print "# standard error was:\n$backup_stderr";
         }
         if (  !$backup_result
-            && $tc->{'possibly_unsupported'}
-            && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+            && $tc->{possibly_unsupported}
+            && $backup_stderr =~ /$tc->{possibly_unsupported}/)
         {
             skip "compression with $method not supported by this build", 3;
         }
@@ -108,7 +108,7 @@ for my $tc (@test_configuration)
         my $backup_files = join(',',
             sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
         my $expected_backup_files =
-          join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+          join(',', sort ('backup_manifest', $tc->{backup_archive}));
         is($backup_files, $expected_backup_files,
             "found expected backup files, compression $method");
 
diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl
index f22ca213c21..257174e277e 100644
--- a/src/interfaces/ecpg/preproc/parse.pl
+++ b/src/interfaces/ecpg/preproc/parse.pl
@@ -39,12 +39,12 @@ GetOptions(
 
 # Substitutions to apply to tokens whenever they are seen in a rule.
 my %replace_token = (
-    'BCONST' => 'ecpg_bconst',
-    'FCONST' => 'ecpg_fconst',
-    'Sconst' => 'ecpg_sconst',
-    'XCONST' => 'ecpg_xconst',
-    'IDENT' => 'ecpg_ident',
-    'PARAM' => 'ecpg_param',);
+    BCONST => 'ecpg_bconst',
+    FCONST => 'ecpg_fconst',
+    Sconst => 'ecpg_sconst',
+    XCONST => 'ecpg_xconst',
+    IDENT => 'ecpg_ident',
+    PARAM => 'ecpg_param',);
 
 my %replace_token_used;
 
@@ -53,24 +53,24 @@ my %replace_token_used;
 # for that nonterminal.  (In either case, ecpg.trailer had better provide
 # a substitute rule, since the default won't do.)
 my %replace_types = (
-    'PrepareStmt' => '<prep>',
-    'ExecuteStmt' => '<exec>',
-    'opt_array_bounds' => '<index>',
+    PrepareStmt => '<prep>',
+    ExecuteStmt => '<exec>',
+    opt_array_bounds => '<index>',
 
     # "ignore" means: do not create type and rules for this nonterminal
-    'parse_toplevel' => 'ignore',
-    'stmtmulti' => 'ignore',
-    'CreateAsStmt' => 'ignore',
-    'DeallocateStmt' => 'ignore',
-    'ColId' => 'ignore',
-    'type_function_name' => 'ignore',
-    'ColLabel' => 'ignore',
-    'Sconst' => 'ignore',
-    'opt_distinct_clause' => 'ignore',
-    'PLpgSQL_Expr' => 'ignore',
-    'PLAssignStmt' => 'ignore',
-    'plassign_target' => 'ignore',
-    'plassign_equals' => 'ignore',);
+    parse_toplevel => 'ignore',
+    stmtmulti => 'ignore',
+    CreateAsStmt => 'ignore',
+    DeallocateStmt => 'ignore',
+    ColId => 'ignore',
+    type_function_name => 'ignore',
+    ColLabel => 'ignore',
+    Sconst => 'ignore',
+    opt_distinct_clause => 'ignore',
+    PLpgSQL_Expr => 'ignore',
+    PLAssignStmt => 'ignore',
+    plassign_target => 'ignore',
+    plassign_equals => 'ignore',);
 
 my %replace_types_used;
 
@@ -565,7 +565,7 @@ sub emit_rule_action
     # Emit the addons entry's code block.
     # We have an array to add to the buffer, we'll add it directly instead of
     # calling add_to_buffer, which does not know about arrays.
-    push(@{ $buff{'rules'} }, @{ $rec->{lines} });
+    push(@{ $buff{rules} }, @{ $rec->{lines} });
 
     if ($rectype eq 'addon')
     {
@@ -686,8 +686,8 @@ sub emit_rule
 =top
     load ecpg.addons into %addons hash.  The result is something like
     %addons = {
-        'stmt ClosePortalStmt' => { 'type' => 'block', 'lines' => [ "{", "if (INFORMIX_MODE)" ..., "}" ], 'used' => 0
},
-        'stmt ViewStmt' => { 'type' => 'rule', 'lines' => [ "| ECPGAllocateDescr", ... ], 'used' => 0 }
+        'stmt ClosePortalStmt' => { type => 'block', lines => [ "{", "if (INFORMIX_MODE)" ..., "}" ], used => 0 },
+        'stmt ViewStmt' => { type => 'rule', lines => [ "| ECPGAllocateDescr", ... ], used => 0 }
     }
 
 =cut
diff --git a/src/interfaces/libpq/t/001_uri.pl b/src/interfaces/libpq/t/001_uri.pl
index b0edcb3be88..bc797cc85f5 100644
--- a/src/interfaces/libpq/t/001_uri.pl
+++ b/src/interfaces/libpq/t/001_uri.pl
@@ -264,7 +264,7 @@ sub test_uri
 
     ($uri, $expect{stdout}, $expect{stderr}, %envvars) = @$_;
 
-    $expect{'exit'} = $expect{stderr} eq '';
+    $expect{exit} = $expect{stderr} eq '';
     %ENV = (%ENV, %envvars);
 
     my $cmd = [ 'libpq_uri_regress', $uri ];
diff --git a/src/pl/plperl/plc_perlboot.pl b/src/pl/plperl/plc_perlboot.pl
index 28a1a4cd6f2..6c1100981fa 100644
--- a/src/pl/plperl/plc_perlboot.pl
+++ b/src/pl/plperl/plc_perlboot.pl
@@ -116,12 +116,12 @@ sub ::encode_array_constructor
     sub to_str
     {
         my $self = shift;
-        return ::encode_typed_literal($self->{'array'}, $self->{'typeoid'});
+        return ::encode_typed_literal($self->{array}, $self->{typeoid});
     }
 
     sub to_arr
     {
-        return shift->{'array'};
+        return shift->{array};
     }
 
     1;
diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl
index 8269c470b59..00fa0cf4c43 100644
--- a/src/test/authentication/t/001_password.pl
+++ b/src/test/authentication/t/001_password.pl
@@ -142,7 +142,7 @@ $node->safe_psql(
     'postgres',
     "CREATE TABLE sysuser_data (n) AS SELECT NULL FROM generate_series(1, 10);
      GRANT ALL ON sysuser_data TO scram_role;");
-$ENV{"PGPASSWORD"} = 'pass';
+$ENV{PGPASSWORD} = 'pass';
 
 # Create a role that contains a comma to stress the parsing.
 $node->safe_psql('postgres',
@@ -465,10 +465,10 @@ $node->connect_fails(
     expected_stderr => qr/server requested SCRAM-SHA-256 authentication/);
 
 # Test that bad passwords are rejected.
-$ENV{"PGPASSWORD"} = 'badpass';
+$ENV{PGPASSWORD} = 'badpass';
 test_conn($node, 'user=scram_role', 'scram-sha-256', 2,
     log_unlike => [qr/connection authenticated:/]);
-$ENV{"PGPASSWORD"} = 'pass';
+$ENV{PGPASSWORD} = 'pass';
 
 # For "md5" method, all users should be able to connect (SCRAM
 # authentication will be performed for the user with a SCRAM secret.)
@@ -550,19 +550,19 @@ is($res, 't',
 # Tests for channel binding without SSL.
 # Using the password authentication method; channel binding can't work
 reset_pg_hba($node, 'all', 'all', 'password');
-$ENV{"PGCHANNELBINDING"} = 'require';
+$ENV{PGCHANNELBINDING} = 'require';
 test_conn($node, 'user=scram_role', 'scram-sha-256', 2);
 # SSL not in use; channel binding still can't work
 reset_pg_hba($node, 'all', 'all', 'scram-sha-256');
-$ENV{"PGCHANNELBINDING"} = 'require';
+$ENV{PGCHANNELBINDING} = 'require';
 test_conn($node, 'user=scram_role', 'scram-sha-256', 2);
 
 # Test .pgpass processing; but use a temp file, don't overwrite the real one!
 my $pgpassfile = "${PostgreSQL::Test::Utils::tmp_check}/pgpass";
 
-delete $ENV{"PGPASSWORD"};
-delete $ENV{"PGCHANNELBINDING"};
-$ENV{"PGPASSFILE"} = $pgpassfile;
+delete $ENV{PGPASSWORD};
+delete $ENV{PGCHANNELBINDING};
+$ENV{PGPASSFILE} = $pgpassfile;
 
 unlink($pgpassfile);
 append_to_file(
@@ -633,7 +633,7 @@ test_conn(
     2, log_unlike => [qr/connection authenticated:/]);
 
 unlink($pgpassfile);
-delete $ENV{"PGPASSFILE"};
+delete $ENV{PGPASSFILE};
 
 note "Authentication tests with specific HBA policies on roles";
 
@@ -648,7 +648,7 @@ CREATE ROLE regress_member LOGIN SUPERUSER IN ROLE regress_regression_group PASS
 CREATE ROLE regress_not_member LOGIN SUPERUSER PASSWORD 'pass';});
 
 # Test role with exact matching, no members allowed.
-$ENV{"PGPASSWORD"} = 'pass';
+$ENV{PGPASSWORD} = 'pass';
 reset_pg_hba($node, 'all', 'regress_regression_group', 'scram-sha-256');
 test_conn(
     $node,
@@ -704,7 +704,7 @@ test_conn(
     ]);
 
 # Test role membership is respected for samerole
-$ENV{"PGDATABASE"} = 'regress_regression_group';
+$ENV{PGDATABASE} = 'regress_regression_group';
 reset_pg_hba($node, 'samerole', 'all', 'scram-sha-256');
 test_conn(
     $node,
diff --git a/src/test/authentication/t/002_saslprep.pl b/src/test/authentication/t/002_saslprep.pl
index cdf0f965252..0a38471e339 100644
--- a/src/test/authentication/t/002_saslprep.pl
+++ b/src/test/authentication/t/002_saslprep.pl
@@ -46,7 +46,7 @@ sub test_login
     my $testname =
       "authentication $status_string for role $role with password $password";
 
-    $ENV{"PGPASSWORD"} = $password;
+    $ENV{PGPASSWORD} = $password;
     if ($expected_res eq 0)
     {
         $node->connect_ok($connstr, $testname);
diff --git a/src/test/authentication/t/004_file_inclusion.pl b/src/test/authentication/t/004_file_inclusion.pl
index b9d3663542d..4f6a94a542c 100644
--- a/src/test/authentication/t/004_file_inclusion.pl
+++ b/src/test/authentication/t/004_file_inclusion.pl
@@ -21,7 +21,7 @@ if (!$use_unix_sockets)
 # are used to respectively track pg_hba_file_rules.rule_number and
 # pg_ident_file_mappings.map_number, which are the global counters associated
 # to each view tracking the priority of each entry processed.
-my %line_counters = ('hba_rule' => 0, 'ident_rule' => 0);
+my %line_counters = (hba_rule => 0, ident_rule => 0);
 
 # Add some data to the given HBA configuration file, generating the contents
 # expected to match pg_hba_file_rules.
@@ -61,7 +61,7 @@ sub add_hba_line
     return '' if ($entry =~ qr/^include/);
 
     # Increment pg_hba_file_rules.rule_number and save it.
-    $globline = ++$line_counters{'hba_rule'};
+    $globline = ++$line_counters{hba_rule};
 
     # Generate the expected pg_hba_file_rules line
     @tokens = split(/ /, $entry);
@@ -119,7 +119,7 @@ sub add_ident_line
     return '' if ($entry =~ qr/^include/);
 
     # Increment pg_ident_file_mappings.map_number and get it.
-    $globline = ++$line_counters{'ident_rule'};
+    $globline = ++$line_counters{ident_rule};
 
     # Generate the expected pg_ident_file_mappings line
     @tokens = split(/ /, $entry);
@@ -213,7 +213,7 @@ add_hba_line($node, $hba_file, 'local @../dbnames.conf all reject');
 $node->append_conf('dbnames.conf', "db1");
 $node->append_conf('dbnames.conf', "db3");
 $hba_expected .= "\n"
-  . $line_counters{'hba_rule'} . "|"
+  . $line_counters{hba_rule} . "|"
   . basename($hba_file) . "|"
   . $line_counters{$hba_file}
   . '|local|{db1,db3}|{all}|reject||';
diff --git a/src/test/ldap/LdapServer.pm b/src/test/ldap/LdapServer.pm
index 58619a3db0a..3782f5f3ce6 100644
--- a/src/test/ldap/LdapServer.pm
+++ b/src/test/ldap/LdapServer.pm
@@ -290,8 +290,8 @@ sub _ldapenv
 {
     my $self = shift;
     my %env = %ENV;
-    $env{'LDAPURI'} = $self->{url};
-    $env{'LDAPBINDDN'} = $self->{rootdn};
+    $env{LDAPURI} = $self->{url};
+    $env{LDAPBINDDN} = $self->{rootdn};
     return %env;
 }
 
diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl
index 352b0fc1fa7..45025403b14 100644
--- a/src/test/ldap/t/001_auth.pl
+++ b/src/test/ldap/t/001_auth.pl
@@ -41,7 +41,7 @@ my ($ldap_server, $ldap_port, $ldaps_port, $ldap_url,
 ) = $ldap->prop(qw(server port s_port url s_url basedn rootdn));
 
 # don't bother to check the server's cert (though perhaps we should)
-$ENV{'LDAPTLS_REQCERT'} = "never";
+$ENV{LDAPTLS_REQCERT} = "never";
 
 note "setting up PostgreSQL instance";
 
@@ -82,7 +82,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access(
     $node, 'test0', 2,
     'simple bind authentication fails if user not found in LDAP',
@@ -92,7 +92,7 @@ test_access(
     'simple bind authentication fails with wrong password',
     log_unlike => [qr/connection authenticated:/]);
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access(
     $node, 'test1', 0,
     'simple bind authentication succeeds',
@@ -114,12 +114,12 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access($node, 'test0', 2,
     'search+bind authentication fails if user not found in LDAP');
 test_access($node, 'test1', 2,
     'search+bind authentication fails with wrong password');
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access(
     $node, 'test1', 0,
     'search+bind authentication succeeds',
@@ -135,12 +135,12 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access($node, 'test0', 2,
     'search+bind authentication fails if user not found in LDAP');
 test_access($node, 'test1', 2,
     'search+bind authentication fails with wrong password');
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'search+bind authentication succeeds');
 
 note "LDAP URLs";
@@ -151,13 +151,13 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access($node, 'test0', 2,
     'simple bind with LDAP URL authentication fails if user not found in LDAP'
 );
 test_access($node, 'test1', 2,
     'simple bind with LDAP URL authentication fails with wrong password');
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0,
     'simple bind with LDAP URL authentication succeeds');
 
@@ -166,13 +166,13 @@ $node->append_conf('pg_hba.conf',
     qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn?uid?sub"});
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access($node, 'test0', 2,
     'search+bind with LDAP URL authentication fails if user not found in LDAP'
 );
 test_access($node, 'test1', 2,
     'search+bind with LDAP URL authentication fails with wrong password');
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0,
     'search+bind with LDAP URL authentication succeeds');
 
@@ -184,14 +184,14 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access(
     $node, 'test1', 0,
     'search filter finds by uid',
     log_like => [
         qr/connection authenticated: identity="uid=test1,dc=example,dc=net" method=ldap/
     ],);
-$ENV{"PGPASSWORD"} = 'secret2';
+$ENV{PGPASSWORD} = 'secret2';
 test_access(
     $node,
     'test2@example.net',
@@ -209,9 +209,9 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'search filter finds by uid');
-$ENV{"PGPASSWORD"} = 'secret2';
+$ENV{PGPASSWORD} = 'secret2';
 test_access($node, 'test2@example.net', 0, 'search filter finds by mail');
 
 # This is not documented: You can combine ldapurl and other ldap*
@@ -223,7 +223,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'combined LDAP URL and search filter');
 
 note "diagnostic message";
@@ -235,7 +235,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 2, 'any attempt fails due to bad search pattern');
 
 note "TLS";
@@ -247,7 +247,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'StartTLS');
 
 # request LDAPS with ldapscheme=ldaps
@@ -257,7 +257,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'LDAPS');
 
 # request LDAPS with ldapurl=ldaps://...
@@ -267,7 +267,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'LDAPS with URL');
 
 # bad combination of LDAPS and StartTLS
@@ -277,7 +277,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 2, 'bad combination of LDAPS and StartTLS');
 
 done_testing();
diff --git a/src/test/ldap/t/002_bindpasswd.pl b/src/test/ldap/t/002_bindpasswd.pl
index f8beba2b279..c5384d32088 100644
--- a/src/test/ldap/t/002_bindpasswd.pl
+++ b/src/test/ldap/t/002_bindpasswd.pl
@@ -78,7 +78,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 2,
     'search+bind authentication fails with wrong ldapbindpasswd');
 
diff --git a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
index 9b062e1c800..30a3018b35b 100644
--- a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
+++ b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
@@ -71,7 +71,7 @@ sub test_access
 
 note "use ldapbindpasswd";
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 
 unlink($node->data_dir . '/pg_hba.conf');
 $node->append_conf('pg_hba.conf',
diff --git a/src/test/modules/oauth_validator/t/001_server.pl b/src/test/modules/oauth_validator/t/001_server.pl
index 6fa59fbeb25..97dbac68ef5 100644
--- a/src/test/modules/oauth_validator/t/001_server.pl
+++ b/src/test/modules/oauth_validator/t/001_server.pl
@@ -173,20 +173,20 @@ $user = "test";
 foreach my $c (@cases)
 {
     my $connstr =
-      "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id=f02c6361-0635
require_auth=$c->{'require_auth'}";
+      "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id=f02c6361-0635
require_auth=$c->{require_auth}";
 
-    if (defined $c->{'failure'})
+    if (defined $c->{failure})
     {
         $node->connect_fails(
             $connstr,
-            "require_auth=$c->{'require_auth'} fails",
-            expected_stderr => $c->{'failure'});
+            "require_auth=$c->{require_auth} fails",
+            expected_stderr => $c->{failure});
     }
     else
     {
         $node->connect_ok(
             $connstr,
-            "require_auth=$c->{'require_auth'} succeeds",
+            "require_auth=$c->{require_auth} succeeds",
             expected_stderr =>
               qr@Visit https://example\.com/ and enter the code: postgresuser@
         );
diff --git a/src/test/modules/oauth_validator/t/002_client.pl b/src/test/modules/oauth_validator/t/002_client.pl
index ab83258d736..c3453674b89 100644
--- a/src/test/modules/oauth_validator/t/002_client.pl
+++ b/src/test/modules/oauth_validator/t/002_client.pl
@@ -146,9 +146,9 @@ my @cases = (
 foreach my $c (@cases)
 {
     test(
-        "hook misbehavior: $c->{'flag'}",
-        flags => [ $c->{'flag'} ],
-        expected_stderr => $c->{'expected_error'});
+        "hook misbehavior: $c->{flag}",
+        flags => [ $c->{flag} ],
+        expected_stderr => $c->{expected_error});
 }
 
 done_testing();
diff --git a/src/test/modules/oauth_validator/t/OAuth/Server.pm b/src/test/modules/oauth_validator/t/OAuth/Server.pm
index 655b2870b0b..71586b86a62 100644
--- a/src/test/modules/oauth_validator/t/OAuth/Server.pm
+++ b/src/test/modules/oauth_validator/t/OAuth/Server.pm
@@ -74,7 +74,7 @@ sub port
 {
     my $self = shift;
 
-    return $self->{'port'};
+    return $self->{port};
 }
 
 =pod
@@ -102,9 +102,9 @@ sub run
     die "server did not advertise a valid port"
       unless Scalar::Util::looks_like_number($port);
 
-    $self->{'pid'} = $pid;
-    $self->{'port'} = $port;
-    $self->{'child'} = $read_fh;
+    $self->{pid} = $pid;
+    $self->{port} = $port;
+    $self->{child} = $read_fh;
 
     note("OAuth provider (PID $pid) is listening on port $port\n");
 }
@@ -121,14 +121,14 @@ sub stop
 {
     my $self = shift;
 
-    note("Sending SIGTERM to OAuth provider PID: $self->{'pid'}\n");
+    note("Sending SIGTERM to OAuth provider PID: $self->{pid}\n");
 
-    kill(15, $self->{'pid'});
-    $self->{'pid'} = undef;
+    kill(15, $self->{pid});
+    $self->{pid} = undef;
 
     # Closing the popen() handle waits for the process to exit.
-    close($self->{'child'});
-    $self->{'child'} = undef;
+    close($self->{child});
+    $self->{child} = undef;
 }
 
 =pod
diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl
index 3c3c6db3512..e67fec299f4 100644
--- a/src/test/modules/test_pg_dump/t/001_base.pl
+++ b/src/test/modules/test_pg_dump/t/001_base.pl
@@ -869,7 +869,7 @@ my %tests = (
 # Create a PG instance to test actually dumping from
 
 my $node = PostgreSQL::Test::Cluster->new('main');
-$node->init('auth_extra' => [ '--create-role' => 'regress_dump_login_role' ]);
+$node->init(auth_extra => [ '--create-role' => 'regress_dump_login_role' ]);
 $node->start;
 
 my $port = $node->port;
diff --git a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
index 81a8f44aa9f..702982dd3e6 100644
--- a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
+++ b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
@@ -188,7 +188,7 @@ sub adjust_database_contents
         }
 
         # this table had OIDs too, but we'll just drop it
-        if ($old_version >= 10 && $dbnames{'contrib_regression_postgres_fdw'})
+        if ($old_version >= 10 && $dbnames{contrib_regression_postgres_fdw})
         {
             _add_st(
                 $result,
diff --git a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
index 60bbd5dd445..3c2a6751f39 100644
--- a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
+++ b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
@@ -86,11 +86,11 @@ sub new
     my $class = shift;
     my ($interactive, $psql_params, $timeout, $wait) = @_;
     my $psql = {
-        'stdin' => '',
-        'stdout' => '',
-        'stderr' => '',
-        'query_timer_restart' => undef,
-        'query_cnt' => 1,
+        stdin => '',
+        stdout => '',
+        stderr => '',
+        query_timer_restart => undef,
+        query_cnt => 1,
     };
     my $run;
 
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 0750915a9a8..d8690fce351 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -1019,7 +1019,7 @@ sub init_from_backup
         PostgreSQL::Test::RecursiveCopy::copypath(
             $backup_path,
             $data_path,
-            'filterfn' => sub {
+            filterfn => sub {
                 my ($path) = @_;
                 if ($path =~ /^pg_tblspc\/(\d+)$/
                     && exists $params{tablespace_map}{$1})
@@ -1936,7 +1936,7 @@ END
         $node->teardown_node(fail_ok => 1);
 
         # skip clean if we are requested to retain the basedir
-        next if defined $ENV{'PG_TEST_NOCLEAN'};
+        next if defined $ENV{PG_TEST_NOCLEAN};
 
         # clean basedir on clean test invocation
         $node->clean_node
@@ -2977,11 +2977,11 @@ sub lsn
 {
     my ($self, $mode) = @_;
     my %modes = (
-        'insert' => 'pg_current_wal_insert_lsn()',
-        'flush' => 'pg_current_wal_flush_lsn()',
-        'write' => 'pg_current_wal_lsn()',
-        'receive' => 'pg_last_wal_receive_lsn()',
-        'replay' => 'pg_last_wal_replay_lsn()');
+        insert => 'pg_current_wal_insert_lsn()',
+        flush => 'pg_current_wal_flush_lsn()',
+        write => 'pg_current_wal_lsn()',
+        receive => 'pg_last_wal_receive_lsn()',
+        replay => 'pg_last_wal_replay_lsn()');
 
     $mode = '<undef>' if !defined($mode);
     croak "unknown mode for 'lsn': '$mode', valid modes are "
@@ -3233,7 +3233,7 @@ sub wait_for_catchup
     my ($self, $standby_name, $mode, $target_lsn) = @_;
     $mode = defined($mode) ? $mode : 'replay';
     my %valid_modes =
-      ('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1);
+      (sent => 1, write => 1, flush => 1, replay => 1);
     croak "unknown mode $mode for 'wait_for_catchup', valid modes are "
       . join(', ', keys(%valid_modes))
       unless exists($valid_modes{$mode});
@@ -3715,7 +3715,7 @@ sub create_logical_slot_on_standby
 
     $handle->finish();
 
-    is($self->slot($slot_name)->{'slot_type'},
+    is($self->slot($slot_name)->{slot_type},
         'logical', $slot_name . ' on standby created')
       or die "could not create slot" . $slot_name;
 }
diff --git a/src/test/perl/PostgreSQL/Test/Kerberos.pm b/src/test/perl/PostgreSQL/Test/Kerberos.pm
index b72dd2fbaf4..fa58936f75c 100644
--- a/src/test/perl/PostgreSQL/Test/Kerberos.pm
+++ b/src/test/perl/PostgreSQL/Test/Kerberos.pm
@@ -184,9 +184,9 @@ $realm = {
       or BAIL_OUT("could not create directory \"$kdc_datadir\"");
 
     # Ensure that we use test's config and cache files, not global ones.
-    $ENV{'KRB5_CONFIG'} = $krb5_conf;
-    $ENV{'KRB5_KDC_PROFILE'} = $kdc_conf;
-    $ENV{'KRB5CCNAME'} = $krb5_cache;
+    $ENV{KRB5_CONFIG} = $krb5_conf;
+    $ENV{KRB5_KDC_PROFILE} = $kdc_conf;
+    $ENV{KRB5CCNAME} = $krb5_cache;
 
     my $service_principal = "$ENV{with_krb_srvnam}/$host";
 
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
index d1ad131eadf..70761506f67 100644
--- a/src/test/perl/PostgreSQL/Test/Utils.pm
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -294,7 +294,7 @@ sub tempdir
     return File::Temp::tempdir(
         $prefix . '_XXXX',
         DIR => $tmp_check,
-        CLEANUP => not defined $ENV{'PG_TEST_NOCLEAN'});
+        CLEANUP => not defined $ENV{PG_TEST_NOCLEAN});
 }
 
 =pod
@@ -310,7 +310,7 @@ sub tempdir_short
 {
 
     return File::Temp::tempdir(
-        CLEANUP => not defined $ENV{'PG_TEST_NOCLEAN'});
+        CLEANUP => not defined $ENV{PG_TEST_NOCLEAN});
 }
 
 =pod
diff --git a/src/test/postmaster/t/002_connection_limits.pl b/src/test/postmaster/t/002_connection_limits.pl
index 85f5ef03dec..325a00efd47 100644
--- a/src/test/postmaster/t/002_connection_limits.pl
+++ b/src/test/postmaster/t/002_connection_limits.pl
@@ -13,7 +13,7 @@ use Test::More;
 # Initialize the server with specific low connection limits
 my $node = PostgreSQL::Test::Cluster->new('primary');
 $node->init(
-    'auth_extra' => [
+    auth_extra => [
         '--create-role' =>
           'regress_regular,regress_reserved,regress_superuser',
     ]);
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
index ccd8417d449..1c860025ac8 100644
--- a/src/test/recovery/t/001_stream_rep.pl
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -378,7 +378,7 @@ sub get_slot_xmins
     ]) or die "Timed out waiting for slot xmins to advance";
 
     my $slotinfo = $node->slot($slotname);
-    return ($slotinfo->{'xmin'}, $slotinfo->{'catalog_xmin'});
+    return ($slotinfo->{xmin}, $slotinfo->{catalog_xmin});
 }
 
 # There's no hot standby feedback and there are no logical slots on either peer
diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl
index a5678bc4dc4..572aa877600 100644
--- a/src/test/recovery/t/006_logical_decoding.pl
+++ b/src/test/recovery/t/006_logical_decoding.pl
@@ -161,7 +161,7 @@ SKIP:
     is($node_primary->psql('postgres', 'DROP DATABASE otherdb'),
         3, 'dropping a DB with active logical slots fails');
     $pg_recvlogical->kill_kill;
-    is($node_primary->slot('otherdb_slot')->{'slot_name'},
+    is($node_primary->slot('otherdb_slot')->{slot_name},
         undef, 'logical slot still exists');
 }
 
@@ -171,7 +171,7 @@ $node_primary->poll_query_until('otherdb',
 
 is($node_primary->psql('postgres', 'DROP DATABASE otherdb'),
     0, 'dropping a DB with inactive logical slots succeeds');
-is($node_primary->slot('otherdb_slot')->{'slot_name'},
+is($node_primary->slot('otherdb_slot')->{slot_name},
     undef, 'logical slot was actually dropped with DB');
 
 # Test logical slot advancing and its durability.
diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl
b/src/test/recovery/t/010_logical_decoding_timelines.pl
index 08615f1fca8..5954b3afe22 100644
--- a/src/test/recovery/t/010_logical_decoding_timelines.pl
+++ b/src/test/recovery/t/010_logical_decoding_timelines.pl
@@ -94,7 +94,7 @@ is( $node_replica->safe_psql(
         'postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']),
     '',
     'dropped DB dropme on standby');
-is($node_primary->slot('dropme_slot')->{'slot_name'},
+is($node_primary->slot('dropme_slot')->{slot_name},
     undef, 'logical slot was actually dropped on standby');
 
 # Back to testing failover...
@@ -123,14 +123,14 @@ $node_primary->poll_query_until(
     ]) or die "slot's catalog_xmin never became set";
 
 my $phys_slot = $node_primary->slot('phys_slot');
-isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of primary');
-isnt($phys_slot->{'catalog_xmin'},
+isnt($phys_slot->{xmin}, '', 'xmin assigned on physical slot of primary');
+isnt($phys_slot->{catalog_xmin},
     '', 'catalog_xmin assigned on physical slot of primary');
 
 # Ignore wrap-around here, we're on a new cluster:
 cmp_ok(
-    $phys_slot->{'xmin'}, '>=',
-    $phys_slot->{'catalog_xmin'},
+    $phys_slot->{xmin}, '>=',
+    $phys_slot->{catalog_xmin},
     'xmin on physical slot must not be lower than catalog_xmin');
 
 $node_primary->safe_psql('postgres', 'CHECKPOINT');
diff --git a/src/test/recovery/t/021_row_visibility.pl b/src/test/recovery/t/021_row_visibility.pl
index 42740745bfd..2fd3aa6e20d 100644
--- a/src/test/recovery/t/021_row_visibility.pl
+++ b/src/test/recovery/t/021_row_visibility.pl
@@ -48,7 +48,7 @@ $psql_primary{run} = IPC::Run::start(
     '2>' => \$psql_primary{stderr},
     $psql_timeout);
 
-my %psql_standby = ('stdin' => '', 'stdout' => '', 'stderr' => '');
+my %psql_standby = (stdin => '', stdout => '', stderr => '');
 $psql_standby{run} = IPC::Run::start(
     [
         'psql', '--no-psqlrc', '--no-align',
diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl
index 492ef115ba4..0ed966906a4 100644
--- a/src/test/recovery/t/032_relfilenode_reuse.pl
+++ b/src/test/recovery/t/032_relfilenode_reuse.pl
@@ -46,7 +46,7 @@ $psql_primary{run} = IPC::Run::start(
     '2>' => \$psql_primary{stderr},
     $psql_timeout);
 
-my %psql_standby = ('stdin' => '', 'stdout' => '', 'stderr' => '');
+my %psql_standby = (stdin => '', stdout => '', stderr => '');
 $psql_standby{run} = IPC::Run::start(
     [
         'psql', '--no-psqlrc', '--no-align',
diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl
index c31cab06f1c..d34ed576e1f 100644
--- a/src/test/recovery/t/035_standby_logical_decoding.pl
+++ b/src/test/recovery/t/035_standby_logical_decoding.pl
@@ -122,9 +122,9 @@ sub check_slots_dropped
 {
     my ($slot_prefix, $slot_user_handle) = @_;
 
-    is($node_standby->slot($slot_prefix . 'inactiveslot')->{'slot_type'},
+    is($node_standby->slot($slot_prefix . 'inactiveslot')->{slot_type},
         '', 'inactiveslot on standby dropped');
-    is($node_standby->slot($slot_prefix . 'activeslot')->{'slot_type'},
+    is($node_standby->slot($slot_prefix . 'activeslot')->{slot_type},
         '', 'activeslot on standby dropped');
 
     check_pg_recvlogical_stderr($slot_user_handle, "conflict with recovery");
@@ -328,9 +328,9 @@ $node_subscriber->init;
 $node_subscriber->start;
 
 my %psql_subscriber = (
-    'subscriber_stdin' => '',
-    'subscriber_stdout' => '',
-    'subscriber_stderr' => '');
+    subscriber_stdin => '',
+    subscriber_stdout => '',
+    subscriber_stderr => '');
 $psql_subscriber{run} = IPC::Run::start(
     [
         'psql', '--no-psqlrc', '--no-align',
@@ -886,7 +886,7 @@ is( $node_standby->safe_psql(
 
 check_slots_dropped('drop_db', $handle);
 
-is($node_standby->slot('otherslot')->{'slot_type'},
+is($node_standby->slot('otherslot')->{slot_type},
     'logical', 'otherslot on standby not dropped');
 
 # Cleanup : manually drop the slot that was not dropped.
diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl
index fffc51f4047..9e4947f4e3c 100644
--- a/src/test/ssl/t/002_scram.pl
+++ b/src/test/ssl/t/002_scram.pl
@@ -71,8 +71,8 @@ my $md5_works = ($node->psql('postgres', "select md5('')") == 0);
 $ssl_server->configure_test_server_for_ssl(
     $node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
     "scram-sha-256",
-    'password' => "pass",
-    'password_enc' => "scram-sha-256");
+    password => "pass",
+    password_enc => "scram-sha-256");
 switch_server_cert($node, certfile => 'server-cn-only');
 $ENV{PGPASSWORD} = "pass";
 $common_connstr =
diff --git a/src/test/ssl/t/003_sslinfo.pl b/src/test/ssl/t/003_sslinfo.pl
index b9eae8d641b..63a455d1fc3 100644
--- a/src/test/ssl/t/003_sslinfo.pl
+++ b/src/test/ssl/t/003_sslinfo.pl
@@ -186,9 +186,9 @@ foreach my $c (@cases)
     $result = $node->safe_psql(
         "trustdb",
         "SELECT ssl_client_cert_present();",
-        connstr => "$common_connstr dbname=trustdb $c->{'opts'}");
-    is($result, $c->{'present'},
-        "ssl_client_cert_present() for $c->{'opts'}");
+        connstr => "$common_connstr dbname=trustdb $c->{opts}");
+    is($result, $c->{present},
+        "ssl_client_cert_present() for $c->{opts}");
 }
 
 done_testing();
diff --git a/src/test/subscription/t/027_nosuperuser.pl b/src/test/subscription/t/027_nosuperuser.pl
index 36af1c16e7f..b33677ff3dc 100644
--- a/src/test/subscription/t/027_nosuperuser.pl
+++ b/src/test/subscription/t/027_nosuperuser.pl
@@ -374,8 +374,8 @@ SKIP:
     $node_subscriber1->wait_for_subscription_sync($node_publisher1,
         'regress_test_sub');
 
-    my $save_pgpassword = $ENV{"PGPASSWORD"};
-    $ENV{"PGPASSWORD"} = 'secret';
+    my $save_pgpassword = $ENV{PGPASSWORD};
+    $ENV{PGPASSWORD} = 'secret';
 
     # Setup pg_hba configuration so that logical replication connection without
     # password is not allowed.
@@ -404,7 +404,7 @@ SKIP:
         'subscription whose owner is a non-superuser must specify password parameter of the connection string'
     );
 
-    $ENV{"PGPASSWORD"} = $save_pgpassword;
+    $ENV{PGPASSWORD} = $save_pgpassword;
 
     # It should succeed after including the password parameter of the connection
     # string.
diff --git a/src/tools/win32tzlist.pl b/src/tools/win32tzlist.pl
index 706b1f78f80..ef8a84b694c 100755
--- a/src/tools/win32tzlist.pl
+++ b/src/tools/win32tzlist.pl
@@ -59,9 +59,9 @@ foreach my $keyname (@subkeys)
       unless ($vals{Std} && $vals{Dlt} && $vals{Display});
     push @system_zones,
       {
-        'std' => $vals{Std}->[2],
-        'dlt' => $vals{Dlt}->[2],
-        'display' => clean_displayname($vals{Display}->[2]),
+        std => $vals{Std}->[2],
+        dlt => $vals{Dlt}->[2],
+        display => clean_displayname($vals{Display}->[2]),
       };
 }
 
@@ -90,10 +90,10 @@ while ($pgtz =~
 {
     push @file_zones,
       {
-        'display' => clean_displayname($1),
-        'std' => $2,
-        'dlt' => $3,
-        'match' => $4,
+        display => clean_displayname($1),
+        std => $2,
+        dlt => $3,
+        match => $4,
       };
 }
 
-- 
2.43.0


Attachment

pgsql-hackers by date:

Previous
From: "David E. Wheeler"
Date:
Subject: PG_CFLAGS rpath Passthrough Issue
Next
From: Jeff Davis
Date:
Subject: Re: Optimization for lower(), upper(), casefold() functions.