JDBC: Updated LONGVARBINARY upload patch - Mailing list pgsql-jdbc

From Fernando Nasser
Subject JDBC: Updated LONGVARBINARY upload patch
Date
Msg-id 3ED61341.8020100@redhat.com
Whole thread Raw
List pgsql-jdbc
Here is the updated (for today's cvs tip) patch for LONGVARBINARY uploading.


--
Fernando Nasser
Red Hat Canada Ltd.                     E-Mail:  fnasser@redhat.com
2323 Yonge Street, Suite #300
Toronto, Ontario   M4P 2C9
? org/postgresql/util/PGLOInputStream.java
Index: org/postgresql/core/QueryExecutor.java
===================================================================
RCS file: /projects/cvsroot/pgsql-server/src/interfaces/jdbc/org/postgresql/core/QueryExecutor.java,v
retrieving revision 1.22
diff -c -p -r1.22 QueryExecutor.java
*** org/postgresql/core/QueryExecutor.java    29 May 2003 03:21:32 -0000    1.22
--- org/postgresql/core/QueryExecutor.java    29 May 2003 14:00:10 -0000
*************** import java.io.IOException;
*** 17,22 ****
--- 17,23 ----
  import java.sql.*;
  import org.postgresql.Driver;
  import org.postgresql.util.PSQLException;
+ import org.postgresql.util.PGLOInputStream;

  public class QueryExecutor
  {
*************** public class QueryExecutor
*** 82,87 ****
--- 83,89 ----
      private Field[] fields = null;
      private Vector tuples = new Vector();
      private boolean binaryCursor = false;
+     private boolean wasAutoCommit = false;
      private String status = null;
      private int update_count = 1;
      private long insert_oid = 0;
*************** public class QueryExecutor
*** 280,285 ****
--- 282,301 ----
                  }

              }
+
+             // Regardless of sucess or failure, get rid of any LO staging area created
+             for ( int i = 0; i < m_binds.length ; i++ )
+             {
+                 if ( m_binds[i].getClass() == PGLOInputStream.class )
+                 {
+                     // Close and unlink (delete) LO
+                     ((PGLOInputStream)m_binds[i]).dematerialize();
+                 }
+             }
+
+             // If we started the transaction end it
+             if (wasAutoCommit)
+                 connection.setAutoCommit(true);

              // did we get an error during this query?
              if ( errorMessage != null )
*************** public class QueryExecutor
*** 350,355 ****
--- 366,389 ----
          {
              if ( m_binds[i] == null )
                  throw new PSQLException("postgresql.prep.param", new Integer(i + 1));
+         }
+         // Now loop again and materialize all Streams into LOs
+         wasAutoCommit = false;
+         for ( int i = 0; i < m_binds.length ; i++ )
+         {
+             // Check if it is a LONGVARBINARY, which uses a
+             // LO as staging area for upload
+             if ( m_binds[i].getClass() == PGLOInputStream.class )
+             {
+                 // Make sure we are in a transaction
+                 if (connection.getAutoCommit())
+                 {
+                     connection.setAutoCommit(false);
+                     wasAutoCommit = true;
+                 }
+                 // Create and open LO
+                 ((PGLOInputStream)m_binds[i]).materialize();
+             }
          }
          try
          {
Index: org/postgresql/jdbc1/AbstractJdbc1Connection.java
===================================================================
RCS file: /projects/cvsroot/pgsql-server/src/interfaces/jdbc/org/postgresql/jdbc1/AbstractJdbc1Connection.java,v
retrieving revision 1.19
diff -c -p -r1.19 AbstractJdbc1Connection.java
*** org/postgresql/jdbc1/AbstractJdbc1Connection.java    29 May 2003 03:21:32 -0000    1.19
--- org/postgresql/jdbc1/AbstractJdbc1Connection.java    29 May 2003 14:00:11 -0000
*************** public abstract class AbstractJdbc1Conne
*** 778,784 ****
          //We also set the client encoding so that the driver only needs
          //to deal with utf8.  We can only do this in 7.3 because multibyte
          //support is now always included
!         if (haveMinimumServerVersion("7.3"))
          {
              BaseResultSet acRset =
                  execSQL("set client_encoding = 'UNICODE'; show autocommit");
--- 778,784 ----
          //We also set the client encoding so that the driver only needs
          //to deal with utf8.  We can only do this in 7.3 because multibyte
          //support is now always included
!         if (haveMinimumServerVersion("7.3") && !haveMinimumServerVersion("7.4"))
          {
              BaseResultSet acRset =
                  execSQL("set client_encoding = 'UNICODE'; show autocommit");
*************** public abstract class AbstractJdbc1Conne
*** 798,803 ****
--- 798,813 ----
              {
                  execSQL("set autocommit = on; commit;");
              }
+         }
+         // On 7.4 there is no more backend autocommit, but we still need
+         // to set the client encoding.
+         else if (haveMinimumServerVersion("7.4"))
+         {
+             BaseResultSet acRset =
+                 execSQL("set client_encoding = 'UNICODE'");
+
+             //set encoding to be unicode
+             encoding = Encoding.getEncoding("UNICODE", null);
          }

          // Initialise object handling
Index: org/postgresql/jdbc1/AbstractJdbc1Statement.java
===================================================================
RCS file: /projects/cvsroot/pgsql-server/src/interfaces/jdbc/org/postgresql/jdbc1/AbstractJdbc1Statement.java,v
retrieving revision 1.24
diff -c -p -r1.24 AbstractJdbc1Statement.java
*** org/postgresql/jdbc1/AbstractJdbc1Statement.java    29 May 2003 04:52:44 -0000    1.24
--- org/postgresql/jdbc1/AbstractJdbc1Statement.java    29 May 2003 14:00:12 -0000
*************** public abstract class AbstractJdbc1State
*** 1355,1382 ****
              //As the spec/javadoc for this method indicate this is to be used for
              //large binary values (i.e. LONGVARBINARY)    PG doesn't have a separate
              //long binary datatype, but with toast the bytea datatype is capable of
!             //handling very large values.  Thus the implementation ends up calling
!             //setBytes() since there is no current way to stream the value to the server
!             byte[] l_bytes = new byte[length];
!             int l_bytesRead;
!             try
              {
!                 l_bytesRead = x.read(l_bytes, 0, length);
!             }
!             catch (IOException l_ioe)
!             {
!                 throw new PSQLException("postgresql.unusual", l_ioe);
!             }
!             if (l_bytesRead == length)
!             {
!                 setBytes(parameterIndex, l_bytes);
              }
              else
              {
!                 //the stream contained less data than they said
!                 byte[] l_bytes2 = new byte[l_bytesRead];
!                 System.arraycopy(l_bytes, 0, l_bytes2, 0, l_bytesRead);
!                 setBytes(parameterIndex, l_bytes2);
              }
          }
          else
--- 1355,1368 ----
              //As the spec/javadoc for this method indicate this is to be used for
              //large binary values (i.e. LONGVARBINARY)    PG doesn't have a separate
              //long binary datatype, but with toast the bytea datatype is capable of
!             //handling very large values.
!             if (x == null)
              {
!                 setNull(parameterIndex, Types.VARBINARY);
              }
              else
              {
!                 bind(parameterIndex, new PGLOInputStream(connection, x, length), PG_BYTEA);
              }
          }
          else
/*-------------------------------------------------------------------------
 *
 * PGLOInputStream.java
 *     This class encapsulates InputStream and provides support for loading
 *     large files into bytea fields.
 *
 * Copyright (c) 2003, PostgreSQL Global Development Group
 *
 * IDENTIFICATION
 *      $Header
 *
 *-------------------------------------------------------------------------
 */
package org.postgresql.util;

import java.io.*;
import org.postgresql.core.BaseConnection;
import org.postgresql.largeobject.*;

public class PGLOInputStream
{
    protected BaseConnection connection;        // The connection who created us
    private java.io.InputStream stream;
    private int LOlength;
    private int LOoid;
    private LargeObject lob;
    boolean isMaterialized;

    /*
     * Constructor
     * @param stream The InputStream with data to load into the bytea
     * @param length The (maximum) length of the data to read from the file
     */
    public PGLOInputStream(BaseConnection connection, java.io.InputStream stream, int length)
    {
        this.connection = connection;
        this.stream = stream;
        this.LOlength = length;
        lob = null;
        LOoid = 0; /* Invalid Oid */
        isMaterialized = false;
    }

    /*
     * Creates a staging area (a PostgreSQL LO)
     * and reads the data into it.
     */
    public void materialize() throws java.sql.SQLException
    {
        LargeObjectManager lom = connection.getLargeObjectAPI();
        LOoid = lom.create();
System.out.println("LOoid=" + LOoid);
        lob = lom.open(LOoid);

        // Copy the file into the LO
        OutputStream los = lob.getOutputStream();
System.out.println("los=" + los);
        try
        {
            // could be buffered, but then the OutputStream returned by LargeObject
            // is buffered internally anyhow, so there would be no performance
            // boost gained, if anything it would be worse!
            int c = stream.read();
            int p = 0;
            while (c > -1 && p < LOlength)
            {
                los.write(c);
                c = stream.read();
                p++;
            }
            los.close();
        }
        catch (IOException se)
        {
            throw new PSQLException("postgresql.unusual", se);
        }
        // lob is closed by the stream so don't call lob.close()
        // lob.close();

        isMaterialized = true;
    }

    /*
     * Deletes (unlink) the staging area (a PostgreSQL LO).
     */
    public void dematerialize() throws java.sql.SQLException
    {
        LargeObjectManager lom = connection.getLargeObjectAPI();
        lom.delete(LOoid);
        LOoid = 0; /* Invalid Oid */
        isMaterialized = false;
    }

    /*
     * Overides Object
     */
    public String toString()
    {
        return "loread( lo_open(" + LOoid + ", x'60000'::INTEGER), " + LOlength + " )";
    }
}

pgsql-jdbc by date:

Previous
From: Barry Lind
Date:
Subject: Re: Newbie postgres/JDBC question.
Next
From: Paul Thomas
Date:
Subject: Re: Newbie postgres/JDBC question.