Re: hash_create(): check return code - Mailing list pgsql-patches

From Neil Conway
Subject Re: hash_create(): check return code
Date
Msg-id 417B6768.7060107@samurai.com
Whole thread Raw
In response to Re: hash_create(): check return code  (Tom Lane <tgl@sss.pgh.pa.us>)
Responses Re: hash_create(): check return code
List pgsql-patches
Tom Lane wrote:
> With all due respect to Jan, that coding seems 100% bogus.  elog(ERROR)
> will work (it had better, because pgstat.c certainly calls routines that
> might do it) and the insistence on using exit() rather than proc_exit()
> is just plain wrong anyway.

Attached is a patch that makes the following cleanups:

- use elog(ERROR) in pgstat.c to indicate fatal errors, rather than
elog(LOG) followed by exit(1)

- use elog(ERROR) to indicate an error in hash_create() rather than
returning a NULL pointer

- adjust callers of hash_create() to assume the return value is non-NULL

There was one case in pgstat.c where I had to wrap the hash_create()
call in a PG_TRY() block to ensure a file handle is closed (this code
might be invoked by a regular backend it appears, so elog(ERROR) won't
necessarily close the file handle).

Barring any objections, I'll apply this to HEAD on Monday.

-Neil
--- contrib/dblink/dblink.c
+++ contrib/dblink/dblink.c
@@ -2043,19 +2043,11 @@
 createConnHash(void)
 {
     HASHCTL        ctl;
-    HTAB       *ptr;

     ctl.keysize = NAMEDATALEN;
     ctl.entrysize = sizeof(remoteConnHashEnt);

-    ptr = hash_create("Remote Con hash", NUMCONN, &ctl, HASH_ELEM);
-
-    if (!ptr)
-        ereport(ERROR,
-                (errcode(ERRCODE_OUT_OF_MEMORY),
-                 errmsg("out of memory")));
-
-    return (ptr);
+    return hash_create("Remote Con hash", NUMCONN, &ctl, HASH_ELEM);
 }

 static void
--- src/backend/commands/prepare.c
+++ src/backend/commands/prepare.c
@@ -264,9 +264,6 @@
                                    32,
                                    &hash_ctl,
                                    HASH_ELEM);
-
-    if (!prepared_queries)
-        elog(ERROR, "could not create hash table");
 }

 /*
--- src/backend/executor/execGrouping.c
+++ src/backend/executor/execGrouping.c
@@ -322,10 +322,6 @@
     hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets,
                                      &hash_ctl,
                 HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
-    if (hashtable->hashtab == NULL)
-        ereport(ERROR,
-                (errcode(ERRCODE_OUT_OF_MEMORY),
-                 errmsg("out of memory")));

     return hashtable;
 }
--- src/backend/executor/nodeIndexscan.c
+++ src/backend/executor/nodeIndexscan.c
@@ -1045,10 +1045,6 @@
                                     nbuckets,
                                     &hash_ctl,
                                HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
-    if (node->iss_DupHash == NULL)
-        ereport(ERROR,
-                (errcode(ERRCODE_OUT_OF_MEMORY),
-                 errmsg("out of memory")));
 }

 int
--- src/backend/postmaster/pgstat.c
+++ src/backend/postmaster/pgstat.c
@@ -1405,12 +1405,9 @@
      * good.
      */
     if (pgpipe(pgStatPipe) < 0)
-    {
-        ereport(LOG,
+        ereport(ERROR,
                 (errcode_for_socket_access(),
              errmsg("could not create pipe for statistics buffer: %m")));
-        exit(1);
-    }

 #ifdef EXEC_BACKEND
     /* child becomes collector process */
@@ -1420,9 +1417,8 @@
 #endif
     {
         case -1:
-            ereport(LOG,
+            ereport(ERROR,
                     (errmsg("could not fork statistics collector: %m")));
-            exit(1);

 #ifndef EXEC_BACKEND
         case 0:
@@ -1529,14 +1528,6 @@
     hash_ctl.hash = tag_hash;
     pgStatBeDead = hash_create("Dead Backends", PGSTAT_BE_HASH_SIZE,
                                &hash_ctl, HASH_ELEM | HASH_FUNCTION);
-    if (pgStatBeDead == NULL)
-    {
-        /* assume the problem is out-of-memory */
-        ereport(LOG,
-                (errcode(ERRCODE_OUT_OF_MEMORY),
-             errmsg("out of memory in statistics collector --- abort")));
-        exit(1);
-    }

     /*
      * Create the known backends table
@@ -1544,12 +1536,9 @@
     pgStatBeTable = (PgStat_StatBeEntry *) malloc(
                                sizeof(PgStat_StatBeEntry) * MaxBackends);
     if (pgStatBeTable == NULL)
-    {
-        ereport(LOG,
+        ereport(ERROR,
                 (errcode(ERRCODE_OUT_OF_MEMORY),
              errmsg("out of memory in statistics collector --- abort")));
-        exit(1);
-    }
     memset(pgStatBeTable, 0, sizeof(PgStat_StatBeEntry) * MaxBackends);

     readPipe = pgStatPipe[0];
@@ -1605,10 +1602,9 @@
         {
             if (errno == EINTR)
                 continue;
-            ereport(LOG,
+            ereport(ERROR,
                     (errcode_for_socket_access(),
                  errmsg("select() failed in statistics collector: %m")));
-            exit(1);
         }

         /*
@@ -1647,10 +1646,9 @@
                 {
                     if (errno == EINTR)
                         continue;
-                    ereport(LOG,
+                    ereport(ERROR,
                             (errcode_for_socket_access(),
                              errmsg("could not read from statistics collector pipe: %m")));
-                    exit(1);
                 }
                 if (len == 0)    /* EOF on the pipe! */
                 {
@@ -1670,9 +1669,8 @@
                          * sync with the buffer process somehow. Abort so
                          * that we can restart both processes.
                          */
-                        ereport(LOG,
+                        ereport(ERROR,
                           (errmsg("invalid statistics message length")));
-                        exit(1);
                     }
                 }
             }
@@ -1815,24 +1814,18 @@
      * the collector falls behind.
      */
     if (!set_noblock(writePipe))
-    {
-        ereport(LOG,
+        ereport(ERROR,
                 (errcode_for_socket_access(),
                  errmsg("could not set statistics collector pipe to nonblocking mode: %m")));
-        exit(1);
-    }

     /*
      * Allocate the message buffer
      */
     msgbuffer = (char *) malloc(PGSTAT_RECVBUFFERSZ);
     if (msgbuffer == NULL)
-    {
-        ereport(LOG,
+        ereport(ERROR,
                 (errcode(ERRCODE_OUT_OF_MEMORY),
              errmsg("out of memory in statistics collector --- abort")));
-        exit(1);
-    }

     /*
      * Loop forever
@@ -1887,10 +1881,9 @@
         {
             if (errno == EINTR)
                 continue;
-            ereport(LOG,
+            ereport(ERROR,
                     (errcode_for_socket_access(),
                      errmsg("select() failed in statistics buffer: %m")));
-            exit(1);
         }

         /*
@@ -1902,12 +1901,9 @@
             len = recv(pgStatSock, (char *) &input_buffer,
                        sizeof(PgStat_Msg), 0);
             if (len < 0)
-            {
-                ereport(LOG,
+                ereport(ERROR,
                         (errcode_for_socket_access(),
                        errmsg("could not read statistics message: %m")));
-                exit(1);
-            }

             /*
              * We ignore messages that are smaller than our common header
@@ -1968,10 +1965,9 @@
             {
                 if (errno == EINTR || errno == EAGAIN)
                     continue;    /* not enough space in pipe */
-                ereport(LOG,
+                ereport(ERROR,
                         (errcode_for_socket_access(),
                          errmsg("could not write to statistics collector pipe: %m")));
-                exit(1);
             }
             /* NB: len < xfr is okay */
             msg_send += len;
@@ -2093,12 +2092,9 @@
                                            (void *) &(msg->m_databaseid),
                                                  HASH_ENTER, &found);
     if (dbentry == NULL)
-    {
-        ereport(LOG,
+        ereport(ERROR,
                 (errcode(ERRCODE_OUT_OF_MEMORY),
              errmsg("out of memory in statistics collector --- abort")));
-        exit(1);
-    }

     /*
      * If not found, initialize the new one.
@@ -2123,14 +2120,6 @@
                                       PGSTAT_TAB_HASH_SIZE,
                                       &hash_ctl,
                                       HASH_ELEM | HASH_FUNCTION);
-        if (dbentry->tables == NULL)
-        {
-            /* assume the problem is out-of-memory */
-            ereport(LOG,
-                    (errcode(ERRCODE_OUT_OF_MEMORY),
-             errmsg("out of memory in statistics collector --- abort")));
-            exit(1);
-        }
     }

     /*
@@ -2179,12 +2171,10 @@
                                                        HASH_ENTER,
                                                        &found);
             if (deadbe == NULL)
-            {
-                ereport(LOG,
+                ereport(ERROR,
                         (errcode(ERRCODE_OUT_OF_MEMORY),
                          errmsg("out of memory in statistics collector --- abort")));
-                exit(1);
-            }
+
             if (!found)
             {
                 deadbe->backendid = i + 1;
@@ -2256,12 +2254,9 @@
                 if (hash_search(pgStatDBHash,
                                 (void *) &(dbentry->databaseid),
                                 HASH_REMOVE, NULL) == NULL)
-                {
-                    ereport(LOG,
+                    ereport(ERROR,
                             (errmsg("database hash table corrupted "
                                     "during cleanup --- abort")));
-                    exit(1);
-                }
             }

             /*
@@ -2294,12 +2291,11 @@
                                     (void *) &(tabentry->tableid),
                                     HASH_REMOVE, NULL) == NULL)
                     {
-                        ereport(LOG,
+                        ereport(ERROR,
                                 (errmsg("tables hash table for "
                                         "database %u corrupted during "
                                         "cleanup --- abort",
                                         dbentry->databaseid)));
-                        exit(1);
                     }
                 }
                 continue;
@@ -2374,10 +2373,9 @@
                             (void *) &(deadbe->procpid),
                             HASH_REMOVE, NULL) == NULL)
             {
-                ereport(LOG,
+                ereport(ERROR,
                       (errmsg("dead-server-process hash table corrupted "
                               "during cleanup --- abort")));
-                exit(1);
             }
         }
     }
@@ -2436,21 +2435,6 @@
     hash_ctl.hcxt = use_mcxt;
     *dbhash = hash_create("Databases hash", PGSTAT_DB_HASH_SIZE, &hash_ctl,
                           HASH_ELEM | HASH_FUNCTION | mcxt_flags);
-    if (*dbhash == NULL)
-    {
-        /* assume the problem is out-of-memory */
-        if (pgStatRunningInCollector)
-        {
-            ereport(LOG,
-                    (errcode(ERRCODE_OUT_OF_MEMORY),
-             errmsg("out of memory in statistics collector --- abort")));
-            exit(1);
-        }
-        /* in backend, can do normal error */
-        ereport(ERROR,
-                (errcode(ERRCODE_OUT_OF_MEMORY),
-                 errmsg("out of memory")));
-    }

     /*
      * Initialize the number of known backends to zero, just in case we do
@@ -2512,20 +2497,10 @@
                                                              &found);
                 if (dbentry == NULL)
                 {
-                    if (pgStatRunningInCollector)
-                    {
-                        ereport(LOG,
-                                (errcode(ERRCODE_OUT_OF_MEMORY),
-                                 errmsg("out of memory in statistics collector --- abort")));
-                        exit(1);
-                    }
-                    else
-                    {
-                        fclose(fpin);
-                        ereport(ERROR,
-                                (errcode(ERRCODE_OUT_OF_MEMORY),
-                                 errmsg("out of memory")));
-                    }
+                    fclose(fpin);
+                    ereport(ERROR,
+                            (errcode(ERRCODE_OUT_OF_MEMORY),
+                             errmsg("out of memory")));
                 }
                 if (found)
                 {
@@ -2551,26 +2541,19 @@
                 hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
                 hash_ctl.hash = tag_hash;
                 hash_ctl.hcxt = use_mcxt;
-                dbentry->tables = hash_create("Per-database table",
-                                              PGSTAT_TAB_HASH_SIZE,
-                                              &hash_ctl,
-                                 HASH_ELEM | HASH_FUNCTION | mcxt_flags);
-                if (dbentry->tables == NULL)
+                PG_TRY();
                 {
-                    /* assume the problem is out-of-memory */
-                    if (pgStatRunningInCollector)
-                    {
-                        ereport(LOG,
-                                (errcode(ERRCODE_OUT_OF_MEMORY),
-                                 errmsg("out of memory in statistics collector --- abort")));
-                        exit(1);
-                    }
-                    /* in backend, can do normal error */
+                    dbentry->tables = hash_create("Per-database table",
+                                                  PGSTAT_TAB_HASH_SIZE,
+                                                  &hash_ctl,
+                                                  HASH_ELEM | HASH_FUNCTION | mcxt_flags);
+                }
+                PG_CATCH();
+                {
                     fclose(fpin);
-                    ereport(ERROR,
-                            (errcode(ERRCODE_OUT_OF_MEMORY),
-                             errmsg("out of memory")));
+                    PG_RE_THROW();
                 }
+                PG_END_TRY();

                 /*
                  * Arrange that following 'T's add entries to this
@@ -2609,14 +2602,6 @@
                                                      HASH_ENTER, &found);
                 if (tabentry == NULL)
                 {
-                    if (pgStatRunningInCollector)
-                    {
-                        ereport(LOG,
-                                (errcode(ERRCODE_OUT_OF_MEMORY),
-                                 errmsg("out of memory in statistics collector --- abort")));
-                        exit(1);
-                    }
-                    /* in backend, can do normal error */
                     fclose(fpin);
                     ereport(ERROR,
                             (errcode(ERRCODE_OUT_OF_MEMORY),
@@ -2860,12 +2852,9 @@
                                               (void *) &(tabmsg[i].t_id),
                                                      HASH_ENTER, &found);
         if (tabentry == NULL)
-        {
-            ereport(LOG,
+            ereport(ERROR,
                     (errcode(ERRCODE_OUT_OF_MEMORY),
              errmsg("out of memory in statistics collector --- abort")));
-            exit(1);
-        }

         if (!found)
         {
@@ -3040,12 +3037,4 @@
                                   PGSTAT_TAB_HASH_SIZE,
                                   &hash_ctl,
                                   HASH_ELEM | HASH_FUNCTION);
-    if (dbentry->tables == NULL)
-    {
-        /* assume the problem is out-of-memory */
-        ereport(LOG,
-                (errcode(ERRCODE_OUT_OF_MEMORY),
-             errmsg("out of memory in statistics collector --- abort")));
-        exit(1);
-    }
 }
--- src/backend/storage/lmgr/lock.c
+++ src/backend/storage/lmgr/lock.c
@@ -335,9 +335,6 @@
                                                     &info,
                                                     hash_flags);

-    if (!LockMethodLocalHash[lockmethodid])
-        elog(FATAL, "could not initialize lock table \"%s\"", tabName);
-
     pfree(shmemName);

     return lockmethodid;
--- src/backend/storage/smgr/md.c
+++ src/backend/storage/smgr/md.c
@@ -132,10 +132,6 @@
                                       100L,
                                       &hash_ctl,
                                HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
-        if (pendingOpsTable == NULL)
-            ereport(FATAL,
-                    (errcode(ERRCODE_OUT_OF_MEMORY),
-                     errmsg("out of memory")));
     }

     return true;
--- src/backend/utils/fmgr/fmgr.c
+++ src/backend/utils/fmgr/fmgr.c
@@ -515,10 +515,6 @@
                                 100,
                                 &hash_ctl,
                                 HASH_ELEM | HASH_FUNCTION);
-        if (CFuncHash == NULL)
-            ereport(ERROR,
-                    (errcode(ERRCODE_OUT_OF_MEMORY),
-                     errmsg("out of memory")));
     }

     entry = (CFuncHashTabEntry *)
--- src/backend/utils/hash/dynahash.c
+++ src/backend/utils/hash/dynahash.c
@@ -120,8 +120,6 @@

     /* Initialize the hash header */
     hashp = (HTAB *) MEM_ALLOC(sizeof(HTAB));
-    if (!hashp)
-        return NULL;
     MemSet(hashp, 0, sizeof(HTAB));

     hashp->tabname = (char *) MEM_ALLOC(strlen(tabname) + 1);
@@ -175,7 +173,9 @@
     {
         hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR));
         if (!hashp->hctl)
-            return NULL;
+            ereport(ERROR,
+                    (errcode(ERRCODE_OUT_OF_MEMORY),
+                     errmsg("out of memory")));
     }

     hdefault(hashp);
@@ -231,7 +233,7 @@
     if (!init_htab(hashp, nelem))
     {
         hash_destroy(hashp);
-        return NULL;
+        elog(ERROR, "failed to initialize hash table");
     }

     /*
@@ -243,7 +243,9 @@
         if (!element_alloc(hashp, (int) nelem))
         {
             hash_destroy(hashp);
-            return NULL;
+            ereport(ERROR,
+                    (errcode(ERRCODE_OUT_OF_MEMORY),
+                     errmsg("out of memory")));
         }
     }


pgsql-patches by date:

Previous
From: Nicolai Tufar
Date:
Subject: (Turkish) New translation: psql
Next
From: Tom Lane
Date:
Subject: Re: hash_create(): check return code