Re: PostgreSQL insert speed tests - Mailing list pgsql-general

From Sezai YILMAZ
Subject Re: PostgreSQL insert speed tests
Date
Msg-id 40405184.8060700@pro-g.com.tr
Whole thread Raw
In response to Re: PostgreSQL insert speed tests  (Tom Lane <tgl@sss.pgh.pa.us>)
Responses Re: PostgreSQL insert speed tests  (Shridhar Daithankar <shridhar@frodo.hserus.net>)
Re: PostgreSQL insert speed tests  (Tom Lane <tgl@sss.pgh.pa.us>)
List pgsql-general
Tom Lane wrote:

>Sezai YILMAZ <sezai.yilmaz@pro-g.com.tr> writes:
>
>
>>I changed the three hash indexes to btree.
>>The performance is increased about 2 times (in PostgreSQL 7.3.4  1905
>>rows/s).
>>Concurrent inserts now work.
>>
>>
>Concurrent inserts should work with hash indexes in 7.4, though not 7.3.
>
>
I notice this condition. I do not get dead locks with 7.4 on schema with
hash indexes. 7.4 solves this problem but is very slow.

>The slowdown you report probably is due to the rewrite of hash indexing
>to allow more concurrency --- the locking algorithm is more complex than
>it used to be.  I am surprised that the effect is so large though.
>Could you make your test program available?
>
>
The test program and .SQL script is attached

Comiple and link scenarios:

without transactions (where each insert is a transaction)
$ gcc -o tester tester.c -lpq

with default 400 inserts per transaction blocks
$ gcc -DTRANSACTION -o tester tester.c -lpq

with 200 inserts per transaction blocks
$ gcc -DTRANSACTION -DINSERTPERTRANSACTION=200 -o tester tester.c -lpq

I do concurrent tests by starting seperate tester programs from
different xterm windows.

>>Changed indexes are more suitable for hash type.
>>
>>
>
>Are they?  How many distinct values are there in those columns?
>I suspect that your test may be stressing the case where only a few hash
>buckets are used and each bucket chain gets to be very long.
>
>
The biggest one gets 200 distinct values, the others are 5, and 10. More
information is in "tester.c" where INSERT query string is built.

Regards,

-sezai
create table logs (
    logid serial primary key,
    ctime integer not null,
    stime integer not null,
    itime integer not null,
    agentid integer not null,
    subagentid integer not null,
    ownerid integer not null,
    hostid integer not null,
    appname varchar(64) default null,
    logbody varchar(1024) not null
);

create index ctime_ndx on logs using btree (ctime);
create index stime_ndx on logs using btree (stime);
create index itime_ndx on logs using btree (itime);
create index agentid_ndx on logs using hash (agentid);
create index ownerid_ndx on logs using hash (ownerid);
create index hostid_ndx on logs using hash (hostid);
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <libpq-fe.h>

#ifndef INSERTPERTRANSACTION
#define INSERTPERTRANSACTION 400
#endif

void exit_nicely(PGconn *conn)
{
    PQfinish(conn);
    exit(1);
}

int main(int argc, char **argv)
{
    char query[2048];
    char *pghost,
         *pgport,
         *pguser,
         *pgpass,
         *pgoptions,
         *pgtty;
    char *dbName;
      int  nFields;
    int i;

     /* FILE *debug; */

    PGconn     *conn;
    PGresult   *res;

    pghost = NULL;
    pgport = NULL;
    pgoptions = NULL;
    pgtty = NULL;
    pguser = "postgres";
    pgpass = NULL;
    dbName = "pgtest";


    conn = PQsetdbLogin(pghost, pgport, pgoptions, pgtty, dbName,
                        pguser, pgpass);

    /*
    * check to see that the backend connection was successfully made
      */
    if (PQstatus(conn) == CONNECTION_BAD)
    {
        fprintf(stderr, "Connection to database '%s' failed.\n", dbName);
        fprintf(stderr, "%s", PQerrorMessage(conn));
        exit_nicely(conn);
    }


    /* Do 200000 inserts for this instance */
    for (i = 0; i < 200000; i++)
    {
#ifdef TRANSACTION
        /* First insert in transaction block begins, BEGIN TRANSACTION */
        if (!(i % INSERTPERTRANSACTION)) {
            /* start a transaction block */
            res = PQexec(conn, "BEGIN");
            if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
            {
                fprintf(stderr, "BEGIN command failed\n");
                PQclear(res);
                exit_nicely(conn);
            }

            /*
            * should PQclear PGresult whenever it is no longer needed to avoid
            * memory leaks
            */
            PQclear(res);
        }
#endif

        snprintf(query, 2048,
            "INSERT INTO LOGS (CTIME, STIME, ITIME, AGENTID, SUBAGENTID, "
            "OWNERID, HOSTID, APPNAME, LOGBODY) VALUES "
            "('%d', '%d', '%d', '%d', '%d', '%d', '%d', '%s', '%s');",
            (int) time(NULL),         /* simulate CTIME epoch */
            (int) time(NULL) + 23,    /* simulate STIME epoch */
            (int) time(NULL) + 45,    /* simulate ITIME epoch */
            (int) time(NULL) % 5,     /* simulate 5 different AGENTID */
            (int) time(NULL) % 15,    /* simulate 5 different SUBAGENTID */
            (int) time(NULL) % 200,   /* simulate 200 different OWNERID */
            (int) time(NULL) % 10,    /* simulate 10 different HOSTID */
            argv[0],
            "The dummy log for testing PostgreSQL 7.3.4 "
            "speed under heavy conditions."
            );

        /* start a transaction block */
        res = PQexec(conn, query);
        if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
        {
            fprintf(stderr, "INSERT command failed\n");
            fprintf(stderr, "Backend Says:%s\n", PQresultErrorMessage(res));
            PQclear(res);
            exit_nicely(conn);
        }

        /*
        * should PQclear PGresult whenever it is no longer needed to avoid
        * memory leaks
        */
        PQclear(res);

#ifdef TRANSACTION
        /* The last insert in this transaction, it is time to COMMIT */
        if ((i % INSERTPERTRANSACTION) == (INSERTPERTRANSACTION - 1)) {
            /* start a transaction block */
            res = PQexec(conn, "COMMIT");
            if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
            {
                fprintf(stderr, "COMMIT command failed\n");
                PQclear(res);
                exit_nicely(conn);
            }

            /*
            * should PQclear PGresult whenever it is no longer needed to avoid
            * memory leaks
            */
            PQclear(res);
        }
#endif

        if (!(i%1000)) { /* display time information for each 1000 inserts */
            fprintf(stderr, "%8d\t %d\n", i, (int) time(NULL));
        }

    }

#ifdef TRANSACTION
    /* Do truncated commit */
    if ((i % INSERTPERTRANSACTION) != 0) {
        printf("COMMIT TRANSACTION\n");
        /* start a transaction block */
        res = PQexec(conn, "COMMIT");
        if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
        {
            fprintf(stderr, "COMMIT command failed\n");
            PQclear(res);
            exit_nicely(conn);
        }

        /*
        * should PQclear PGresult whenever it is no longer needed to avoid
        * memory leaks
        */
        PQclear(res);
    }
#endif

    exit_nicely(conn);

}

pgsql-general by date:

Previous
From: "Uwe C. Schroeder"
Date:
Subject: Re: value too long error
Next
From: "Rajat Katyal"
Date:
Subject: Re: value too long error