+Tue Jan 30 22:24:00 GMT 2001 peter@retep.org.uk
+ - Fixed bug where Statement.setMaxRows() was a global setting. Now
+ limited to just itself.
+ - Changed LargeObject.read(byte[],int,int) to return the actual number
+ of bytes read (used to be void).
+ - LargeObject now supports InputStream's!
+ - PreparedStatement.setBinaryStream() now works!
+ - ResultSet.getBinaryStream() now returns an InputStream that doesn't
+ copy the blob into memory first!
+ - Connection.isClosed() now tests to see if the connection is still alive
+ rather than if it thinks it's alive.
Thu Jan 25 09:11:00 GMT 2001 peter@retep.org.uk
- Added an alternative constructor to PGSQLException so that debugging
some more osteric bugs is easier. If only 1 arg is supplied and it's
/**
*
- * $Id: basic.java,v 1.5 2000/06/06 11:05:57 peter Exp $
+ * $Id: basic.java,v 1.6 2001/01/31 08:26:01 peter Exp $
*
* This example tests the basic components of the JDBC driver, and shows
* how even the simplest of queries can be implemented.
{
Connection db; // The connection to the database
Statement st; // Our statement to run queries with
-
+
public basic(String args[]) throws ClassNotFoundException, FileNotFoundException, IOException, SQLException
{
String url = args[0];
String usr = args[1];
String pwd = args[2];
-
+
// Load the driver
Class.forName("org.postgresql.Driver");
-
+
// Connect to database
System.out.println("Connecting to Database URL = " + url);
db = DriverManager.getConnection(url, usr, pwd);
-
+
System.out.println("Connected...Now creating a statement");
st = db.createStatement();
-
+
// Clean up the database (in case we failed earlier) then initialise
cleanup();
-
+
// Now run tests using JDBC methods
doexample();
-
+
// Clean up the database
cleanup();
-
+
// Finally close the database
System.out.println("Now closing the connection");
st.close();
db.close();
-
+
//throw postgresql.Driver.notImplemented();
}
-
+
/**
* This drops the table (if it existed). No errors are reported.
*/
// We ignore any errors here
}
}
-
+
/**
* This performs the example
*/
public void doexample() throws SQLException
{
System.out.println("\nRunning tests:");
-
+
// First we need a table to store data in
st.executeUpdate("create table basic (a int2, b int2)");
-
+
// Now insert some data, using the Statement
st.executeUpdate("insert into basic values (1,1)");
st.executeUpdate("insert into basic values (2,1)");
st.executeUpdate("insert into basic values (3,1)");
-
+
// This shows how to get the oid of a just inserted row
+ // updated for 7.1
st.executeUpdate("insert into basic values (4,1)");
- int insertedOID = ((org.postgresql.ResultSet)st.getResultSet()).getInsertedOID();
+ int insertedOID = ((org.postgresql.jdbc2.Statement)st).getInsertedOID();
System.out.println("Inserted row with oid "+insertedOID);
-
+
// Now change the value of b from 1 to 8
st.executeUpdate("update basic set b=8");
System.out.println("Updated "+st.getUpdateCount()+" rows");
-
+
// Now delete 2 rows
st.executeUpdate("delete from basic where a<3");
System.out.println("deleted "+st.getUpdateCount()+" rows");
-
+
// For large inserts, a PreparedStatement is more efficient, because it
// supports the idea of precompiling the SQL statement, and to store
// directly, a Java object into any column. PostgreSQL doesnt support
ps.executeUpdate(); // executeUpdate because insert returns no data
}
ps.close(); // Always close when we are done with it
-
+
// Finally perform a query on the table
System.out.println("performing a query");
ResultSet rs = st.executeQuery("select a, b from basic");
}
rs.close(); // again, you must close the result when done
}
-
+
// Now run the query again, showing a more efficient way of getting the
// result if you don't know what column number a value is in
System.out.println("performing another query");
//
int col_a = rs.findColumn("a");
int col_b = rs.findColumn("b");
-
+
// Now we run through the result set, printing out the result.
// Again, we must call .next() before attempting to read any results
while(rs.next()) {
}
rs.close(); // again, you must close the result when done
}
-
+
+ // Now test maxrows by setting it to 3 rows
+ st.setMaxRows(3);
+ System.out.println("performing a query limited to "+st.getMaxRows());
+ rs = st.executeQuery("select a, b from basic");
+ while(rs.next()) {
+ int a = rs.getInt("a"); // This shows how to get the value by name
+ int b = rs.getInt(2); // This shows how to get the value by column
+ System.out.println(" a="+a+" b="+b);
+ }
+ rs.close(); // again, you must close the result when done
+
// The last thing to do is to drop the table. This is done in the
// cleanup() method.
}
-
+
/**
* Display some instructions on how to run the example
*/
System.out.println("Useage:\n java example.basic jdbc:postgresql:database user password [debug]\n\nThe debug field can be anything. It's presence will enable DriverManager's\ndebug trace. Unless you want to see screens of items, don't put anything in\nhere.");
System.exit(1);
}
-
+
/**
* This little lot starts the test
*/
public static void main(String args[])
{
System.out.println("PostgreSQL basic test v6.3 rev 1\n");
-
+
if(args.length<3)
instructions();
-
+
// This line outputs debug information to stderr. To enable this, simply
// add an extra parameter to the command line
if(args.length>3)
DriverManager.setLogStream(System.err);
-
+
// Now run the tests
try {
basic test = new basic(args);
<property category="sys" name="CheckStable" value="1" />\r
<property category="sys" name="Company" value="" />\r
<property category="sys" name="Copyright" value="Copyright (c) 2001" />\r
- <property category="sys" name="DefaultPackage" value="org.postgresql.core" />\r
+ <property category="sys" name="DefaultPackage" value="org.postgresql.largeobject" />\r
<property category="sys" name="Description" value="" />\r
<property category="sys" name="DocPath" value="doc" />\r
<property category="sys" name="ExcludeClassEnabled" value="0" />\r
import org.postgresql.util.*;
/**
- * $Id: Connection.java,v 1.13 2001/01/18 17:37:12 peter Exp $
+ * $Id: Connection.java,v 1.14 2001/01/31 08:26:01 peter Exp $
*
* This abstract class is used by org.postgresql.Driver to open either the JDBC1 or
* JDBC2 versions of the Connection class.
public PG_Stream pg_stream;
// This is set by org.postgresql.Statement.setMaxRows()
- public int maxrows = 0; // maximum no. of rows; 0 = unlimited
+ //public int maxrows = 0; // maximum no. of rows; 0 = unlimited
private String PG_HOST;
private int PG_PORT;
*/
public java.sql.ResultSet ExecSQL(String sql,java.sql.Statement stat) throws SQLException
{
+ // added Jan 30 2001 to correct maxrows per statement
+ int maxrows=0;
+ if(stat!=null)
+ maxrows=stat.getMaxRows();
+
// added Oct 7 1998 to give us thread safety.
synchronized(pg_stream) {
// Deallocate all resources in the stream associated
SQLWarning warnings = null; // The warnings chain.
int timeout = 0; // The timeout for a query (not used)
boolean escapeProcessing = true;// escape processing flag
+ int maxrows=0;
/**
* Constructor for a Statement. It simply sets the connection
*/
public int getMaxRows() throws SQLException
{
- return connection.maxrows;
+ return maxrows;
}
/**
*/
public void setMaxRows(int max) throws SQLException
{
- connection.maxrows = max;
+ maxrows = max;
}
/**
import org.postgresql.util.*;
/**
- * $Id: Connection.java,v 1.5 2001/01/18 17:37:14 peter Exp $
+ * $Id: Connection.java,v 1.6 2001/01/31 08:26:02 peter Exp $
*
* A Connection represents a session with a specific database. Within the
* context of a Connection, SQL statements are executed and results are
*/
public boolean isClosed() throws SQLException
{
- return (pg_stream == null);
+ // If the stream is gone, then close() was called
+ if(pg_stream == null)
+ return true;
+
+ // ok, test the connection
+ try {
+ // by sending an empty query. If we are dead, then an SQLException should
+ // be thrown
+ java.sql.ResultSet rs = ExecSQL(" ");
+ if(rs!=null)
+ rs.close();
+
+ // By now, we must be alive
+ return false;
+ } catch(SQLException se) {
+ // Why throw an SQLException as this may fail without throwing one,
+ // ie isClosed() is called incase the connection has died, and we don't
+ // want to find out by an Exception, so instead we return true, as its
+ // most likely why it was thrown in the first place.
+ return true;
+ }
}
/**
* parameter. For instance, if the IN parameter has SQL type Integer, then
* setInt should be used.
*
- * <p>If arbitrary parameter type conversions are required, then the setObject
+ * <p>If arbitrary parameter type conversions are required, then the setObject
* method should be used with a target SQL type.
*
* @see ResultSet
* @see java.sql.PreparedStatement
*/
-public class PreparedStatement extends Statement implements java.sql.PreparedStatement
+public class PreparedStatement extends Statement implements java.sql.PreparedStatement
{
String sql;
String[] templateStrings;
}
s.append(templateStrings[inStrings.length]);
return super.executeUpdate(s.toString()); // in Statement class
- }
+ }
/**
* Set a parameter to SQL NULL
else {
StringBuffer b = new StringBuffer();
int i;
-
+
b.append('\'');
for (i = 0 ; i < x.length() ; ++i)
{
//
//set(parameterIndex, df.format(new java.util.Date(x.getTime()+86400000)));
}
-
+
/**
* Set a parameter to a java.sql.Time value. The driver converts
* this to a SQL TIME value when it sends it to the database.
* When a very large binary value is input to a LONGVARBINARY parameter,
* it may be more practical to send it via a java.io.InputStream.
* JDBC will read the data from the stream as needed, until it reaches
- * end-of-file.
+ * end-of-file.
*
* <P><B>Note:</B> This stream object can either be a standard Java
* stream object or your own subclass that implements the standard
*/
public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException
{
- throw new PSQLException("postgresql.prep.is");
+ LargeObjectManager lom = connection.getLargeObjectAPI();
+ int oid = lom.create();
+ LargeObject lob = lom.open(oid);
+ OutputStream los = lob.getOutputStream();
+ try {
+ // could be buffered, but then the OutputStream returned by LargeObject
+ // is buffered internally anyhow, so there would be no performance
+ // boost gained, if anything it would be worse!
+ int c=x.read();
+ while(c>-1) {
+ los.write(c);
+ c=x.read();
+ }
+ los.close();
+ } catch(IOException se) {
+ throw new PSQLException("postgresql.prep.is",se);
+ }
+ // lob is closed by the stream so don't call lob.close()
+ setInt(parameterIndex,oid);
}
/**
* @param x the object containing the input parameter value
* @param targetSqlType The SQL type to be send to the database
* @param scale For java.sql.Types.DECIMAL or java.sql.Types.NUMERIC
- * types this is the number of digits after the decimal. For
+ * types this is the number of digits after the decimal. For
* all other types this value will be ignored.
* @exception SQLException if a database access error occurs
*/
{
setObject(parameterIndex, x, targetSqlType, 0);
}
-
+
/**
* This stores an Object into a parameter.
* <p>New for 6.4, if the object is not recognised, but it is
/**
* Some prepared statements return multiple results; the execute method
- * handles these complex statements as well as the simpler form of
+ * handles these complex statements as well as the simpler form of
* statements handled by executeQuery and executeUpdate
*
* @return true if the next result is a ResultSet; false if it is an
s.append(templateStrings[inStrings.length]);
return s.toString();
}
-
+
// **************************************************************
- // END OF PUBLIC INTERFACE
+ // END OF PUBLIC INTERFACE
// **************************************************************
-
+
/**
* There are a lot of setXXX classes which all basically do
* the same thing. We need a method which actually does the
throw new PSQLException("postgresql.prep.range");
inStrings[paramIndex - 1] = s;
}
-
+
// ** JDBC 2 Extensions **
-
+
public void addBatch() throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public java.sql.ResultSetMetaData getMetaData() throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public void setArray(int i,Array x) throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public void setBlob(int i,Blob x) throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public void setCharacterStream(int i,java.io.Reader x,int length) throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public void setClob(int i,Clob x) throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public void setNull(int i,int t,String s) throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public void setRef(int i,Ref x) throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public void setDate(int i,java.sql.Date d,java.util.Calendar cal) throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public void setTime(int i,Time t,java.util.Calendar cal) throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
public void setTimestamp(int i,Timestamp t,java.util.Calendar cal) throws SQLException
{
throw org.postgresql.Driver.notImplemented();
}
-
+
}
*/
public InputStream getBinaryStream(int columnIndex) throws SQLException
{
+ // New in 7.1 Handle OID's as BLOBS so return the input stream
+ if(!wasNullFlag)
+ if( fields[columnIndex - 1].getOID() == 26) {
+ LargeObjectManager lom = connection.getLargeObjectAPI();
+ LargeObject lob = lom.open(getInt(columnIndex));
+ return lob.getInputStream();
+ }
+
+ // Not an OID so fake the stream
byte b[] = getBytes(columnIndex);
if (b != null)
private Vector batch=null;
int resultsettype; // the resultset type to return
int concurrency; // is it updateable or not?
+ int maxrows=0; // the maximum number of rows to return 0=unlimited
/**
* Constructor for a Statement. It simply sets the connection
*/
public int getMaxRows() throws SQLException
{
- return connection.maxrows;
+ return maxrows;
}
/**
*/
public void setMaxRows(int max) throws SQLException
{
- connection.maxrows = max;
+ maxrows = max;
}
/**
if(escapeProcessing)
sql=connection.EscapeSQL(sql);
- result = connection.ExecSQL(sql);
+ // New in 7.1, pass Statement so that ExecSQL can customise to it
+ result = connection.ExecSQL(sql,this);
// New in 7.1, required for ResultSet.getStatement() to work
((org.postgresql.jdbc2.ResultSet)result).setStatement(this);
throw org.postgresql.Driver.notImplemented();
}
- //public int getKeysetSize() throws SQLException
- //{
-// throw org.postgresql.Driver.notImplemented();
- //}
-
public int getResultSetConcurrency() throws SQLException
{
// new in 7.1
throw org.postgresql.Driver.notImplemented();
}
- //public void setKeysetSize(int keys) throws SQLException
- //{
-// throw org.postgresql.Driver.notImplemented();
- //}
-
public void setResultSetConcurrency(int value) throws SQLException
{
concurrency=value;
resultsettype=value;
}
+ /**
+ * New in 7.1: Returns the Last inserted oid. This should be used, rather
+ * than the old method using getResultSet, which for executeUpdate returns
+ * null.
+ * @return OID of last insert
+ */
+ public int getInsertedOID() throws SQLException
+ {
+ if(result!=null)
+ return ((org.postgresql.ResultSet)result).getInsertedOID();
+ return 0;
+ }
+
}
--- /dev/null
+package org.postgresql.largeobject;
+
+import java.io.InputStream;
+import java.io.IOException;
+import java.sql.SQLException;
+
+/**
+ * This is an initial implementation of an InputStream from a large object.
+ * For now, the bare minimum is implemented. Later (after 7.1) we will overide
+ * the other read methods to optimise them.
+ */
+public class BlobInputStream extends InputStream {
+ /**
+ * The parent LargeObject
+ */
+ private LargeObject lo;
+
+ /**
+ * Buffer used to improve performance
+ */
+ private byte[] buffer;
+
+ /**
+ * Position within buffer
+ */
+ private int bpos;
+
+ /**
+ * The buffer size
+ */
+ private int bsize;
+
+ /**
+ * The mark position
+ */
+ private int mpos=0;
+
+ /**
+ * @param lo LargeObject to read from
+ */
+ public BlobInputStream(LargeObject lo) {
+ this(lo,1024);
+ }
+
+ /**
+ * @param lo LargeObject to read from
+ * @param bsize buffer size
+ */
+ public BlobInputStream(LargeObject lo,int bsize) {
+ this.lo=lo;
+ buffer=null;
+ bpos=0;
+ this.bsize=bsize;
+ }
+
+ /**
+ * The minimum required to implement input stream
+ */
+ public int read() throws java.io.IOException {
+ try {
+ if(buffer==null || bpos>=buffer.length) {
+ buffer=lo.read(bsize);
+ bpos=0;
+ }
+
+ // Handle EOF
+ if(bpos>=buffer.length)
+ return -1;
+
+ return (int) buffer[bpos++];
+ } catch(SQLException se) {
+ throw new IOException(se.toString());
+ }
+ }
+
+
+ /**
+ * Closes this input stream and releases any system resources associated
+ * with the stream.
+ *
+ * <p> The <code>close</code> method of <code>InputStream</code> does
+ * nothing.
+ *
+ * @exception IOException if an I/O error occurs.
+ */
+ public void close() throws IOException {
+ try {
+ lo.close();
+ lo=null;
+ } catch(SQLException se) {
+ throw new IOException(se.toString());
+ }
+ }
+
+ /**
+ * Marks the current position in this input stream. A subsequent call to
+ * the <code>reset</code> method repositions this stream at the last marked
+ * position so that subsequent reads re-read the same bytes.
+ *
+ * <p> The <code>readlimit</code> arguments tells this input stream to
+ * allow that many bytes to be read before the mark position gets
+ * invalidated.
+ *
+ * <p> The general contract of <code>mark</code> is that, if the method
+ * <code>markSupported</code> returns <code>true</code>, the stream somehow
+ * remembers all the bytes read after the call to <code>mark</code> and
+ * stands ready to supply those same bytes again if and whenever the method
+ * <code>reset</code> is called. However, the stream is not required to
+ * remember any data at all if more than <code>readlimit</code> bytes are
+ * read from the stream before <code>reset</code> is called.
+ *
+ * <p> The <code>mark</code> method of <code>InputStream</code> does
+ * nothing.
+ *
+ * @param readlimit the maximum limit of bytes that can be read before
+ * the mark position becomes invalid.
+ * @see java.io.InputStream#reset()
+ */
+ public synchronized void mark(int readlimit) {
+ try {
+ mpos=lo.tell();
+ } catch(SQLException se) {
+ //throw new IOException(se.toString());
+ }
+ }
+
+ /**
+ * Repositions this stream to the position at the time the
+ * <code>mark</code> method was last called on this input stream.
+ * NB: If mark is not called we move to the begining.
+ * @see java.io.InputStream#mark(int)
+ * @see java.io.IOException
+ */
+ public synchronized void reset() throws IOException {
+ try {
+ lo.seek(mpos);
+ } catch(SQLException se) {
+ throw new IOException(se.toString());
+ }
+ }
+
+ /**
+ * Tests if this input stream supports the <code>mark</code> and
+ * <code>reset</code> methods. The <code>markSupported</code> method of
+ * <code>InputStream</code> returns <code>false</code>.
+ *
+ * @return <code>true</code> if this true type supports the mark and reset
+ * method; <code>false</code> otherwise.
+ * @see java.io.InputStream#mark(int)
+ * @see java.io.InputStream#reset()
+ */
+ public boolean markSupported() {
+ return true;
+ }
+
+}
\ No newline at end of file
--- /dev/null
+package org.postgresql.largeobject;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.sql.SQLException;
+
+/**
+ * This implements a basic output stream that writes to a LargeObject
+ */
+public class BlobOutputStream extends OutputStream {
+ /**
+ * The parent LargeObject
+ */
+ private LargeObject lo;
+
+ /**
+ * Buffer
+ */
+ private byte buf[];
+
+ /**
+ * Size of the buffer (default 1K)
+ */
+ private int bsize;
+
+ /**
+ * Position within the buffer
+ */
+ private int bpos;
+
+ /**
+ * Create an OutputStream to a large object
+ * @param lo LargeObject
+ */
+ public BlobOutputStream(LargeObject lo) {
+ this(lo,1024);
+ }
+
+ /**
+ * Create an OutputStream to a large object
+ * @param lo LargeObject
+ * @param bsize The size of the buffer used to improve performance
+ */
+ public BlobOutputStream(LargeObject lo,int bsize) {
+ this.lo=lo;
+ this.bsize=bsize;
+ buf=new byte[bsize];
+ bpos=0;
+ }
+
+ public void write(int b) throws java.io.IOException {
+ try {
+ if(bpos>=bsize) {
+ lo.write(buf);
+ bpos=0;
+ }
+ buf[bpos++]=(byte)b;
+ } catch(SQLException se) {
+ throw new IOException(se.toString());
+ }
+ }
+
+ /**
+ * Flushes this output stream and forces any buffered output bytes
+ * to be written out. The general contract of <code>flush</code> is
+ * that calling it is an indication that, if any bytes previously
+ * written have been buffered by the implementation of the output
+ * stream, such bytes should immediately be written to their
+ * intended destination.
+ *
+ * @exception IOException if an I/O error occurs.
+ */
+ public void flush() throws IOException {
+ try {
+ if(bpos>0)
+ lo.write(buf,0,bpos);
+ bpos=0;
+ } catch(SQLException se) {
+ throw new IOException(se.toString());
+ }
+ }
+
+ /**
+ * Closes this output stream and releases any system resources
+ * associated with this stream. The general contract of <code>close</code>
+ * is that it closes the output stream. A closed stream cannot perform
+ * output operations and cannot be reopened.
+ * <p>
+ * The <code>close</code> method of <code>OutputStream</code> does nothing.
+ *
+ * @exception IOException if an I/O error occurs.
+ */
+ public void close() throws IOException {
+ try {
+ lo.close();
+ lo=null;
+ } catch(SQLException se) {
+ throw new IOException(se.toString());
+ }
+ }
+
+}
\ No newline at end of file
* for this object.
*
* <p>Normally, client code would use the getAsciiStream, getBinaryStream,
- * or getUnicodeStream methods in ResultSet, or setAsciiStream,
+ * or getUnicodeStream methods in ResultSet, or setAsciiStream,
* setBinaryStream, or setUnicodeStream methods in PreparedStatement to
* access Large Objects.
*
* Indicates a seek from the begining of a file
*/
public static final int SEEK_SET = 0;
-
+
/**
* Indicates a seek from the current position
*/
public static final int SEEK_CUR = 1;
-
+
/**
* Indicates a seek from the end of a file
*/
public static final int SEEK_END = 2;
-
+
private Fastpath fp; // Fastpath API to use
private int oid; // OID of this object
private int fd; // the descriptor of the open large object
-
+
/**
* This opens a large object.
*
{
this.fp = fp;
this.oid = oid;
-
+
FastpathArg args[] = new FastpathArg[2];
args[0] = new FastpathArg(oid);
args[1] = new FastpathArg(mode);
this.fd = fp.getInteger("lo_open",args);
}
-
+
/**
* @return the OID of this LargeObject
*/
{
return oid;
}
-
+
/**
* This method closes the object. You must not call methods in this
* object after this is called.
args[0] = new FastpathArg(fd);
fp.fastpath("lo_close",false,args); // true here as we dont care!!
}
-
+
/**
* Reads some data from the object, and return as a byte[] array
*
args[0] = new FastpathArg(fd);
args[1] = new FastpathArg(len);
return fp.getData("loread",args);
-
+
// This version allows us to break this down into 4k blocks
//if(len<=4048) {
//// handle as before, return the whole block in one go
//return buf;
//}
}
-
+
/**
* Reads some data from the object into an existing array
*
* @param buf destination array
* @param off offset within array
* @param len number of bytes to read
+ * @return the number of bytes actually read
* @exception SQLException if a database-access error occurs.
*/
- public void read(byte buf[],int off,int len) throws SQLException
+ public int read(byte buf[],int off,int len) throws SQLException
{
- System.arraycopy(read(len),0,buf,off,len);
+ byte b[] = read(len);
+ if(b.length<len)
+ len=b.length;
+ System.arraycopy(b,0,buf,off,len);
+ return len;
}
-
+
/**
* Writes an array to the object
*
args[1] = new FastpathArg(buf);
fp.fastpath("lowrite",false,args);
}
-
+
/**
* Writes some data from an array to the object
*
System.arraycopy(buf,off,data,0,len);
write(data);
}
-
+
/**
* Sets the current position within the object.
*
args[2] = new FastpathArg(ref);
fp.fastpath("lo_lseek",false,args);
}
-
+
/**
* Sets the current position within the object.
*
{
seek(pos,SEEK_SET);
}
-
+
/**
* @return the current position within the object
* @exception SQLException if a database-access error occurs.
args[0] = new FastpathArg(fd);
return fp.getInteger("lo_tell",args);
}
-
+
/**
* This method is inefficient, as the only way to find out the size of
* the object is to seek to the end, record the current position, then
seek(cp,SEEK_SET);
return sz;
}
-
+
/**
* Returns an InputStream from this object.
*
*/
public InputStream getInputStream() throws SQLException
{
- throw org.postgresql.Driver.notImplemented();
+ return new BlobInputStream(this);
}
-
+
/**
* Returns an OutputStream to this object
*
*/
public OutputStream getOutputStream() throws SQLException
{
- throw org.postgresql.Driver.notImplemented();
+ return new BlobOutputStream(this);
}
+
}