+ String acl = (String)acls.elementAt(i);
+ addACLPrivileges(acl,privileges);
+ }
+ return privileges;
+ }
+
/*
* Get a description of a table's optimal set of columns that
* uniquely identifies a row. They are ordered by SCOPE.
// Implementation note: This is required for Borland's JBuilder to work
public java.sql.ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException
{
- // for now, this returns an empty result set.
Field f[] = new Field[8];
- ResultSet r; // ResultSet for the SQL query that we need to do
Vector v = new Vector(); // The new ResultSet tuple stuff
f[0] = new Field(connection, "SCOPE", iInt2Oid, 2);
- f[1] = new Field(connection, "COLUMN_NAME", iVarcharOid, NAME_SIZE);
+ f[1] = new Field(connection, "COLUMN_NAME", iVarcharOid, getMaxNameLength());
f[2] = new Field(connection, "DATA_TYPE", iInt2Oid, 2);
- f[3] = new Field(connection, "TYPE_NAME", iVarcharOid, NAME_SIZE);
+ f[3] = new Field(connection, "TYPE_NAME", iVarcharOid, getMaxNameLength());
f[4] = new Field(connection, "COLUMN_SIZE", iInt4Oid, 4);
f[5] = new Field(connection, "BUFFER_LENGTH", iInt4Oid, 4);
f[6] = new Field(connection, "DECIMAL_DIGITS", iInt2Oid, 2);
f[7] = new Field(connection, "PSEUDO_COLUMN", iInt2Oid, 2);
+ /* At the moment this simply returns a table's primary key,
+ * if there is one. I believe other unique indexes, ctid,
+ * and oid should also be considered. -KJ
+ */
+
+ String from;
+ String where = "";
+ if (connection.haveMinimumServerVersion("7.3")) {
+ from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, pg_catalog.pg_attribute a, pg_catalog.pg_index i ";
+ where = " AND ct.relnamespace = n.oid ";
+ if (schema != null && !"".equals(schema)) {
+ where += " AND n.nspname = '"+escapeQuotes(schema.toLowerCase())+"' ";
+ }
+ } else {
+ from = " FROM pg_class ct, pg_class ci, pg_attribute a, pg_index i ";
+ }
+ String sql = "SELECT a.attname, a.atttypid "+
+ from+
+ " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid "+
+ " AND a.attrelid=ci.oid AND i.indisprimary "+
+ " AND ct.relname = '"+escapeQuotes(table.toLowerCase())+"' "+
+ where+
+ " ORDER BY a.attnum ";
+
+ ResultSet rs = connection.createStatement().executeQuery(sql);
+ while (rs.next()) {
+ byte tuple[][] = new byte[8][];
+ int columnTypeOid = rs.getInt("atttypid");
+ tuple[0] = Integer.toString(scope).getBytes();
+ tuple[1] = rs.getBytes("attname");
+ tuple[2] = Integer.toString(connection.getSQLType(columnTypeOid)).getBytes();
+ tuple[3] = connection.getPGType(columnTypeOid).getBytes();
+ tuple[4] = null;
+ tuple[5] = null;
+ tuple[6] = null;
+ tuple[7] = Integer.toString(java.sql.DatabaseMetaData.bestRowNotPseudo).getBytes();
+ v.addElement(tuple);
+ }
return connection.getResultSet(null, f, v, "OK", 1);
}
* updated when any value in a row is updated. They are
* unordered.
*
- * This method is currently unimplemented.
- *
*
Each column description has the following columns:
*
SCOPE short => is not used
*/
public java.sql.ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException
{
- throw org.postgresql.Driver.notImplemented();
+ Field f[] = new Field[8];
+ Vector v = new Vector(); // The new ResultSet tuple stuff
+
+ f[0] = new Field(connection, "SCOPE", iInt2Oid, 2);
+ f[1] = new Field(connection, "COLUMN_NAME", iVarcharOid, getMaxNameLength());
+ f[2] = new Field(connection, "DATA_TYPE", iInt2Oid, 2);
+ f[3] = new Field(connection, "TYPE_NAME", iVarcharOid, getMaxNameLength());
+ f[4] = new Field(connection, "COLUMN_SIZE", iInt4Oid, 4);
+ f[5] = new Field(connection, "BUFFER_LENGTH", iInt4Oid, 4);
+ f[6] = new Field(connection, "DECIMAL_DIGITS", iInt2Oid, 2);
+ f[7] = new Field(connection, "PSEUDO_COLUMN", iInt2Oid, 2);
+
+ byte tuple[][] = new byte[8][0];
+
+ /* Postgresql does not have any column types that are
+ * automatically updated like some databases' timestamp type.
+ * We can't tell what rules or triggers might be doing, so we
+ * are left with the system columns that change on an update.
+ * An update may change all of the following system columns:
+ * ctid, xmax, xmin, cmax, and cmin. Depending on if we are
+ * in a transaction and wether we roll it back or not the
+ * only guaranteed change is to ctid. -KJ
+ */
+
+ tuple[0] = null;
+ tuple[1] = "ctid".getBytes();
+ tuple[2] = Integer.toString(connection.getSQLType("tid")).getBytes();
+ tuple[3] = "tid".getBytes();
+ tuple[4] = null;
+ tuple[5] = null;
+ tuple[6] = null;
+ tuple[7] = Integer.toString(java.sql.DatabaseMetaData.versionColumnPseudo).getBytes();
+ v.addElement(tuple);
+
+ /* Perhaps we should check that the given
+ * catalog.schema.table actually exists. -KJ
+ */
+ return connection.getResultSet(null, f, v, "OK", 1);
}
/*
*/
public java.sql.ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException
{
- return connection.createStatement().executeQuery("SELECT " +
- "'' as TABLE_CAT," +
- "'' AS TABLE_SCHEM," +
- "bc.relname AS TABLE_NAME," +
- "a.attname AS COLUMN_NAME," +
- "a.attnum as KEY_SEQ," +
- "ic.relname as PK_NAME " +
- " FROM pg_class bc, pg_class ic, pg_index i, pg_attribute a" +
- " WHERE bc.relkind = 'r' " + // -- not indices
- " and upper(bc.relname) = upper('" + table + "')" +
- " and i.indrelid = bc.oid" +
- " and i.indexrelid = ic.oid" +
- " and ic.oid = a.attrelid" +
- " and i.indisprimary='t' " +
- " ORDER BY table_name, pk_name, key_seq"
- );
+ String select;
+ String from;
+ String where = "";
+ if (connection.haveMinimumServerVersion("7.3")) {
+ select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, ";
+ from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, pg_catalog.pg_attribute a, pg_catalog.pg_index i ";
+ where = " AND ct.relnamespace = n.oid ";
+ if (schema != null && !"".equals(schema)) {
+ where += " AND n.nspname = '"+escapeQuotes(schema.toLowerCase())+"' ";
+ }
+ } else {
+ select = "SELECT NULL AS TABLE_CAT, NULL AS TABLE_SCHEM, ";
+ from = " FROM pg_class ct, pg_class ci, pg_attribute a, pg_index i ";
+ }
+ String sql = select+
+ " ct.relname AS TABLE_NAME, "+
+ " a.attname AS COLUMN_NAME, "+
+ " a.attnum AS KEY_SEQ, "+
+ " ci.relname AS PK_NAME "+
+ from+
+ " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid "+
+ " AND a.attrelid=ci.oid AND i.indisprimary "+
+ " AND ct.relname = '"+escapeQuotes(table.toLowerCase())+"' "+
+ where+
+ " ORDER BY table_name, pk_name, key_seq";
+ return connection.createStatement().executeQuery(sql);
}
/*
* @throws SQLException
*/
- protected java.sql.ResultSet getImportedExportedKeys(String catalog, String schema, String primaryTable, String foreignTable) throws SQLException
+ protected java.sql.ResultSet getImportedExportedKeys(String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException
{
Field f[] = new Field[14];
- f[0] = new Field(connection, "PKTABLE_CAT", iVarcharOid, NAME_SIZE);
- f[1] = new Field(connection, "PKTABLE_SCHEM", iVarcharOid, NAME_SIZE);
- f[2] = new Field(connection, "PKTABLE_NAME", iVarcharOid, NAME_SIZE);
- f[3] = new Field(connection, "PKCOLUMN_NAME", iVarcharOid, NAME_SIZE);
- f[4] = new Field(connection, "FKTABLE_CAT", iVarcharOid, NAME_SIZE);
- f[5] = new Field(connection, "FKTABLE_SCHEM", iVarcharOid, NAME_SIZE);
- f[6] = new Field(connection, "FKTABLE_NAME", iVarcharOid, NAME_SIZE);
- f[7] = new Field(connection, "FKCOLUMN_NAME", iVarcharOid, NAME_SIZE);
+ f[0] = new Field(connection, "PKTABLE_CAT", iVarcharOid, getMaxNameLength());
+ f[1] = new Field(connection, "PKTABLE_SCHEM", iVarcharOid, getMaxNameLength());
+ f[2] = new Field(connection, "PKTABLE_NAME", iVarcharOid, getMaxNameLength());
+ f[3] = new Field(connection, "PKCOLUMN_NAME", iVarcharOid, getMaxNameLength());
+ f[4] = new Field(connection, "FKTABLE_CAT", iVarcharOid, getMaxNameLength());
+ f[5] = new Field(connection, "FKTABLE_SCHEM", iVarcharOid, getMaxNameLength());
+ f[6] = new Field(connection, "FKTABLE_NAME", iVarcharOid, getMaxNameLength());
+ f[7] = new Field(connection, "FKCOLUMN_NAME", iVarcharOid, getMaxNameLength());
f[8] = new Field(connection, "KEY_SEQ", iInt2Oid, 2);
f[9] = new Field(connection, "UPDATE_RULE", iInt2Oid, 2);
f[10] = new Field(connection, "DELETE_RULE", iInt2Oid, 2);
- f[11] = new Field(connection, "FK_NAME", iVarcharOid, NAME_SIZE);
- f[12] = new Field(connection, "PK_NAME", iVarcharOid, NAME_SIZE);
+ f[11] = new Field(connection, "FK_NAME", iVarcharOid, getMaxNameLength());
+ f[12] = new Field(connection, "PK_NAME", iVarcharOid, getMaxNameLength());
f[13] = new Field(connection, "DEFERRABILITY", iInt2Oid, 2);
- java.sql.ResultSet rs = connection.ExecSQL(
- "SELECT distinct "
- + "c.relname as prelname, "
- + "c2.relname as frelname, "
- + "t.tgconstrname, "
- + "a.attnum as keyseq, "
- + "ic.relname as fkeyname, "
- + "t.tgdeferrable, "
- + "t.tginitdeferred, "
- + "t.tgnargs,t.tgargs, "
- + "p1.proname as updaterule, "
- + "p2.proname as deleterule "
- + "FROM "
- + "pg_trigger t, "
- + "pg_trigger t1, "
- + "pg_class c, "
- + "pg_class c2, "
- + "pg_class ic, "
- + "pg_proc p1, "
- + "pg_proc p2, "
- + "pg_index i, "
- + "pg_attribute a "
- + "WHERE "
- // isolate the update rule
- + "(t.tgrelid=c.oid "
- + "AND t.tgisconstraint "
- + "AND t.tgconstrrelid=c2.oid "
- + "AND t.tgfoid=p1.oid "
- + "and p1.proname like '%%upd') "
-
- + "and "
- // isolate the delete rule
- + "(t1.tgrelid=c.oid "
- + "and t1.tgisconstraint "
- + "and t1.tgconstrrelid=c2.oid "
- + "AND t1.tgfoid=p2.oid "
- + "and p2.proname like '%%del') "
-
- // if we are looking for exported keys then primary table will be used
- + ((primaryTable != null) ? "AND c.relname='" + primaryTable + "' " : "")
-
- // if we are looking for imported keys then the foreign table will be used
- + ((foreignTable != null) ? "AND c2.relname='" + foreignTable + "' " : "")
- + "AND i.indrelid=c.oid "
- + "AND i.indexrelid=ic.oid "
- + "AND ic.oid=a.attrelid "
- + "AND i.indisprimary "
- + "ORDER BY "
-
- // orderby is as follows getExported, orders by FKTABLE,
- // getImported orders by PKTABLE
- // getCrossReference orders by FKTABLE, so this should work for both,
- // since when getting crossreference, primaryTable will be defined
-
- + (primaryTable != null ? "frelname" : "prelname") + ",keyseq");
+
+ String select;
+ String from;
+ String where = "";
+
+ /*
+ * The addition of the pg_constraint in 7.3 table should have really
+ * helped us out here, but it comes up just a bit short.
+ * - The conkey, confkey columns aren't really useful without
+ * contrib/array unless we want to issues separate queries.
+ * - Unique indexes that can support foreign keys are not necessarily
+ * added to pg_constraint. Also multiple unique indexes covering
+ * the same keys can be created which make it difficult to determine
+ * the PK_NAME field.
+ */
+
+ if (connection.haveMinimumServerVersion("7.3")) {
+ select = "SELECT DISTINCT n.nspname as pnspname,n2.nspname as fnspname, ";
+ from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_namespace n2, pg_catalog.pg_trigger t, pg_catalog.pg_trigger t1, pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_class ic, pg_catalog.pg_proc p1, pg_catalog.pg_proc p2, pg_catalog.pg_index i, pg_catalog.pg_attribute a ";
+ where = " AND c.relnamespace = n.oid AND c2.relnamespace=n2.oid ";
+ if (primarySchema != null && !"".equals(primarySchema)) {
+ where += " AND n.nspname = '"+escapeQuotes(primarySchema.toLowerCase())+"' ";
+ }
+ if (foreignSchema != null && !"".equals(foreignSchema)) {
+ where += " AND n2.nspname = '"+escapeQuotes(foreignSchema.toLowerCase())+"' ";
+ }
+ } else {
+ select = "SELECT DISTINCT NULL::text as pnspname, NULL::text as fnspname, ";
+ from = " FROM pg_trigger t, pg_trigger t1, pg_class c, pg_class c2, pg_class ic, pg_proc p1, pg_proc p2, pg_index i, pg_attribute a ";
+ }
+
+ String sql = select
+ + "c.relname as prelname, "
+ + "c2.relname as frelname, "
+ + "t.tgconstrname, "
+ + "a.attnum as keyseq, "
+ + "ic.relname as fkeyname, "
+ + "t.tgdeferrable, "
+ + "t.tginitdeferred, "
+ + "t.tgnargs,t.tgargs, "
+ + "p1.proname as updaterule, "
+ + "p2.proname as deleterule "
+ + from
+ + "WHERE "
+ // isolate the update rule
+ + "(t.tgrelid=c.oid "
+ + "AND t.tgisconstraint "
+ + "AND t.tgconstrrelid=c2.oid "
+ + "AND t.tgfoid=p1.oid "
+ + "and p1.proname like 'RI\\\\_FKey\\\\_%\\\\_upd') "
+
+ + "and "
+ // isolate the delete rule
+ + "(t1.tgrelid=c.oid "
+ + "and t1.tgisconstraint "
+ + "and t1.tgconstrrelid=c2.oid "
+ + "AND t1.tgfoid=p2.oid "
+ + "and p2.proname like 'RI\\\\_FKey\\\\_%\\\\_del') "
+ + "AND i.indrelid=c.oid "
+ + "AND i.indexrelid=ic.oid "
+ + "AND ic.oid=a.attrelid "
+ + "AND i.indisprimary "
+ + where;
+
+ if (primaryTable != null) {
+ sql += "AND c.relname='" + escapeQuotes(primaryTable.toLowerCase()) + "' ";
+ }
+ if (foreignTable != null) {
+ sql += "AND c2.relname='" + escapeQuotes(foreignTable.toLowerCase()) + "' ";
+ }
+
+ sql += "ORDER BY ";
+
+ // orderby is as follows getExported, orders by FKTABLE,
+ // getImported orders by PKTABLE
+ // getCrossReference orders by FKTABLE, so this should work for both,
+ // since when getting crossreference, primaryTable will be defined
+
+ if (primaryTable != null) {
+ sql += "frelname";
+ } else {
+ sql += "prelname";
+ }
+
+ sql += ",keyseq";
+
+ ResultSet rs = connection.createStatement().executeQuery(sql);
// returns the following columns
// and some example data with a table defined as follows
Vector tuples = new Vector();
-
while ( rs.next() )
{
byte tuple[][] = new byte[14][];
- tuple[2] = rs.getBytes(1); //PKTABLE_NAME
- tuple[6] = rs.getBytes(2); //FKTABLE_NAME
- String fKeyName = rs.getString(3);
- String updateRule = rs.getString(10);
+ tuple[1] = rs.getBytes(1); //PKTABLE_SCHEM
+ tuple[5] = rs.getBytes(2); //FKTABLE_SCHEM
+ tuple[2] = rs.getBytes(3); //PKTABLE_NAME
+ tuple[6] = rs.getBytes(4); //FKTABLE_NAME
+ String fKeyName = rs.getString(5);
+ String updateRule = rs.getString(12);
if (updateRule != null )
{
}
- String deleteRule = rs.getString(11);
+ String deleteRule = rs.getString(13);
if ( deleteRule != null )
{
}
+ int keySequence = rs.getInt(6); //KEY_SEQ
+
// Parse the tgargs data
String fkeyColumn = "";
String pkeyColumn = "";
-
-
// Note, I am guessing at most of this, but it should be close
// if not, please correct
// the keys are in pairs and start after the first four arguments
// the arguments are seperated by \000
- int keySequence = rs.getInt(4); //KEY_SEQ
-
- // get the args
- String targs = rs.getString(9);
+ String targs = rs.getString(11);
// args look like this
//\000ww\000vv\000UNSPECIFIED\000m\000a\000n\000b\000
tuple[3] = pkeyColumn.getBytes(); //PKCOLUMN_NAME
tuple[7] = fkeyColumn.getBytes(); //FKCOLUMN_NAME
- tuple[8] = rs.getBytes(4); //KEY_SEQ
+ tuple[8] = rs.getBytes(6); //KEY_SEQ
tuple[11] = targs.getBytes(); //FK_NAME this will give us a unique name for the foreign key
- tuple[12] = rs.getBytes(5); //PK_NAME
+ tuple[12] = rs.getBytes(7); //PK_NAME
// DEFERRABILITY
int deferrability = java.sql.DatabaseMetaData.importedKeyNotDeferrable;
- boolean deferrable = rs.getBoolean(6);
- boolean initiallyDeferred = rs.getBoolean(7);
+ boolean deferrable = rs.getBoolean(8);
+ boolean initiallyDeferred = rs.getBoolean(9);
if (deferrable)
{
if (initiallyDeferred)
*/
public java.sql.ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException
{
- return getImportedExportedKeys(catalog, schema, null, table);
+ return getImportedExportedKeys(null,null,null,catalog, schema, table);
}
/*
*/
public java.sql.ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException
{
- return getImportedExportedKeys(catalog, schema, table, null);
+ return getImportedExportedKeys(catalog, schema, table, null,null,null);
}
/*
*/
public java.sql.ResultSet getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException
{
- return getImportedExportedKeys(primaryCatalog, primarySchema, primaryTable, foreignTable);
+ return getImportedExportedKeys(primaryCatalog, primarySchema, primaryTable, foreignCatalog, foreignSchema, foreignTable);
}
/*
*/
public java.sql.ResultSet getTypeInfo() throws SQLException
{
- java.sql.ResultSet rs = connection.ExecSQL("select typname from pg_type");
- if (rs != null)
- {
- Field f[] = new Field[18];
- ResultSet r; // ResultSet for the SQL query that we need to do
- Vector v = new Vector(); // The new ResultSet tuple stuff
-
- f[0] = new Field(connection, "TYPE_NAME", iVarcharOid, NAME_SIZE);
- f[1] = new Field(connection, "DATA_TYPE", iInt2Oid, 2);
- f[2] = new Field(connection, "PRECISION", iInt4Oid, 4);
- f[3] = new Field(connection, "LITERAL_PREFIX", iVarcharOid, NAME_SIZE);
- f[4] = new Field(connection, "LITERAL_SUFFIX", iVarcharOid, NAME_SIZE);
- f[5] = new Field(connection, "CREATE_PARAMS", iVarcharOid, NAME_SIZE);
- f[6] = new Field(connection, "NULLABLE", iInt2Oid, 2);
- f[7] = new Field(connection, "CASE_SENSITIVE", iBoolOid, 1);
- f[8] = new Field(connection, "SEARCHABLE", iInt2Oid, 2);
- f[9] = new Field(connection, "UNSIGNED_ATTRIBUTE", iBoolOid, 1);
- f[10] = new Field(connection, "FIXED_PREC_SCALE", iBoolOid, 1);
- f[11] = new Field(connection, "AUTO_INCREMENT", iBoolOid, 1);
- f[12] = new Field(connection, "LOCAL_TYPE_NAME", iVarcharOid, NAME_SIZE);
- f[13] = new Field(connection, "MINIMUM_SCALE", iInt2Oid, 2);
- f[14] = new Field(connection, "MAXIMUM_SCALE", iInt2Oid, 2);
- f[15] = new Field(connection, "SQL_DATA_TYPE", iInt4Oid, 4);
- f[16] = new Field(connection, "SQL_DATETIME_SUB", iInt4Oid, 4);
- f[17] = new Field(connection, "NUM_PREC_RADIX", iInt4Oid, 4);
-
- // cache some results, this will keep memory useage down, and speed
- // things up a little.
- byte b9[] = "9".getBytes();
- byte b10[] = "10".getBytes();
- byte bf[] = "f".getBytes();
- byte bnn[] = Integer.toString(java.sql.DatabaseMetaData.typeNoNulls).getBytes();
- byte bts[] = Integer.toString(java.sql.DatabaseMetaData.typeSearchable).getBytes();
-
- while (rs.next())
- {
- byte[][] tuple = new byte[18][];
- String typname = rs.getString(1);
- tuple[0] = typname.getBytes();
- tuple[1] = Integer.toString(connection.getSQLType(typname)).getBytes();
- tuple[2] = b9; // for now
- tuple[6] = bnn; // for now
- tuple[7] = bf; // false for now - not case sensitive
- tuple[8] = bts;
- tuple[9] = bf; // false for now - it's signed
- tuple[10] = bf; // false for now - must handle money
- tuple[11] = bf; // false for now - handle autoincrement
- // 12 - LOCAL_TYPE_NAME is null
- // 13 & 14 ?
- // 15 & 16 are unused so we return null
- tuple[17] = b10; // everything is base 10
- v.addElement(tuple);
- }
- rs.close();
- return connection.getResultSet(null, f, v, "OK", 1);
+
+ Field f[] = new Field[18];
+ ResultSet r; // ResultSet for the SQL query that we need to do
+ Vector v = new Vector(); // The new ResultSet tuple stuff
+
+ f[0] = new Field(connection, "TYPE_NAME", iVarcharOid, getMaxNameLength());
+ f[1] = new Field(connection, "DATA_TYPE", iInt2Oid, 2);
+ f[2] = new Field(connection, "PRECISION", iInt4Oid, 4);
+ f[3] = new Field(connection, "LITERAL_PREFIX", iVarcharOid, getMaxNameLength());
+ f[4] = new Field(connection, "LITERAL_SUFFIX", iVarcharOid, getMaxNameLength());
+ f[5] = new Field(connection, "CREATE_PARAMS", iVarcharOid, getMaxNameLength());
+ f[6] = new Field(connection, "NULLABLE", iInt2Oid, 2);
+ f[7] = new Field(connection, "CASE_SENSITIVE", iBoolOid, 1);
+ f[8] = new Field(connection, "SEARCHABLE", iInt2Oid, 2);
+ f[9] = new Field(connection, "UNSIGNED_ATTRIBUTE", iBoolOid, 1);
+ f[10] = new Field(connection, "FIXED_PREC_SCALE", iBoolOid, 1);
+ f[11] = new Field(connection, "AUTO_INCREMENT", iBoolOid, 1);
+ f[12] = new Field(connection, "LOCAL_TYPE_NAME", iVarcharOid, getMaxNameLength());
+ f[13] = new Field(connection, "MINIMUM_SCALE", iInt2Oid, 2);
+ f[14] = new Field(connection, "MAXIMUM_SCALE", iInt2Oid, 2);
+ f[15] = new Field(connection, "SQL_DATA_TYPE", iInt4Oid, 4);
+ f[16] = new Field(connection, "SQL_DATETIME_SUB", iInt4Oid, 4);
+ f[17] = new Field(connection, "NUM_PREC_RADIX", iInt4Oid, 4);
+
+ String sql;
+ if (connection.haveMinimumServerVersion("7.3")) {
+ sql = "SELECT typname FROM pg_catalog.pg_type";
+ } else {
+ sql = "SELECT typname FROM pg_type";
}
- throw new PSQLException("postgresql.metadata.unavailable");
+ ResultSet rs = connection.createStatement().executeQuery(sql);
+ // cache some results, this will keep memory useage down, and speed
+ // things up a little.
+ byte b9[] = "9".getBytes();
+ byte b10[] = "10".getBytes();
+ byte bf[] = "f".getBytes();
+ byte bnn[] = Integer.toString(java.sql.DatabaseMetaData.typeNoNulls).getBytes();
+ byte bts[] = Integer.toString(java.sql.DatabaseMetaData.typeSearchable).getBytes();
+
+ while (rs.next())
+ {
+ byte[][] tuple = new byte[18][];
+ String typname = rs.getString(1);
+ tuple[0] = typname.getBytes();
+ tuple[1] = Integer.toString(connection.getSQLType(typname)).getBytes();
+ tuple[2] = b9; // for now
+ tuple[6] = bnn; // for now
+ tuple[7] = bf; // false for now - not case sensitive
+ tuple[8] = bts;
+ tuple[9] = bf; // false for now - it's signed
+ tuple[10] = bf; // false for now - must handle money
+ tuple[11] = bf; // false for now - handle autoincrement
+ // 12 - LOCAL_TYPE_NAME is null
+ // 13 & 14 ?
+ // 15 & 16 are unused so we return null
+ tuple[17] = b10; // everything is base 10
+ v.addElement(tuple);
+ }
+ rs.close();
+ return connection.getResultSet(null, f, v, "OK", 1);
}
/*
// Implementation note: This is required for Borland's JBuilder to work
public java.sql.ResultSet getIndexInfo(String catalog, String schema, String tableName, boolean unique, boolean approximate) throws SQLException
{
- Field f[] = new Field[13];
- java.sql.ResultSet r; // ResultSet for the SQL query that we need to do
- Vector v = new Vector(); // The new ResultSet tuple stuff
-
- f[0] = new Field(connection, "TABLE_CAT", iVarcharOid, NAME_SIZE);
- f[1] = new Field(connection, "TABLE_SCHEM", iVarcharOid, NAME_SIZE);
- f[2] = new Field(connection, "TABLE_NAME", iVarcharOid, NAME_SIZE);
- f[3] = new Field(connection, "NON_UNIQUE", iBoolOid, 1);
- f[4] = new Field(connection, "INDEX_QUALIFIER", iVarcharOid, NAME_SIZE);
- f[5] = new Field(connection, "INDEX_NAME", iVarcharOid, NAME_SIZE);
- f[6] = new Field(connection, "TYPE", iInt2Oid, 2);
- f[7] = new Field(connection, "ORDINAL_POSITION", iInt2Oid, 2);
- f[8] = new Field(connection, "COLUMN_NAME", iVarcharOid, NAME_SIZE);
- f[9] = new Field(connection, "ASC_OR_DESC", iVarcharOid, NAME_SIZE);
- f[10] = new Field(connection, "CARDINALITY", iInt4Oid, 4);
- f[11] = new Field(connection, "PAGES", iInt4Oid, 4);
- f[12] = new Field(connection, "FILTER_CONDITION", iVarcharOid, NAME_SIZE);
-
- r = connection.ExecSQL("select " +
- "c.relname, " +
- "x.indisunique, " +
- "i.relname, " +
- "x.indisclustered, " +
- "a.amname, " +
- "x.indkey, " +
- "c.reltuples, " +
- "c.relpages, " +
- "x.indexrelid " +
- "FROM pg_index x, pg_class c, pg_class i, pg_am a " +
- "WHERE ((c.relname = '" + tableName.toLowerCase() + "') " +
- " AND (c.oid = x.indrelid) " +
- " AND (i.oid = x.indexrelid) " +
- " AND (i.relam = a.oid)) " +
- "ORDER BY x.indisunique DESC, " +
- " x.indisclustered, a.amname, i.relname");
- while (r.next())
- {
- // indkey is an array of column ordinals (integers). In the JDBC
- // interface, this has to be separated out into a separate
- // tuple for each indexed column. Also, getArray() is not yet
- // implemented for Postgres JDBC, so we parse by hand.
- String columnOrdinalString = r.getString(6);
- StringTokenizer stok = new StringTokenizer(columnOrdinalString);
- int [] columnOrdinals = new int[stok.countTokens()];
- int o = 0;
- while (stok.hasMoreTokens())
- {
- columnOrdinals[o++] = Integer.parseInt(stok.nextToken());
- }
- java.sql.ResultSet columnNameRS = connection.ExecSQL("select a.attname FROM pg_attribute a WHERE a.attrelid = " + r.getInt(9));
- for (int i = 0; i < columnOrdinals.length; i++)
- {
- byte [] [] tuple = new byte [13] [];
- tuple[0] = "".getBytes();
- tuple[1] = "".getBytes();
- tuple[2] = r.getBytes(1);
- tuple[3] = r.getBoolean(2) ? "false".getBytes() : "true".getBytes();
- tuple[4] = null;
- tuple[5] = r.getBytes(3);
- tuple[6] = r.getBoolean(4) ?
- Integer.toString(java.sql.DatabaseMetaData.tableIndexClustered).getBytes() :
- r.getString(5).equals("hash") ?
- Integer.toString(java.sql.DatabaseMetaData.tableIndexHashed).getBytes() :
- Integer.toString(java.sql.DatabaseMetaData.tableIndexOther).getBytes();
- tuple[7] = Integer.toString(i + 1).getBytes();
- if (columnNameRS.next())
- {
- tuple[8] = columnNameRS.getBytes(1);
- }
- else
- {
- tuple[8] = "".getBytes();
- }
- tuple[9] = null; // sort sequence ???
- tuple[10] = r.getBytes(7); // inexact
- tuple[11] = r.getBytes(8);
- tuple[12] = null;
- v.addElement(tuple);
+ String select;
+ String from;
+ String where = "";
+ if (connection.haveMinimumServerVersion("7.3")) {
+ select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, ";
+ from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, pg_catalog.pg_index i, pg_catalog.pg_attribute a, pg_catalog.pg_am am ";
+ where = " AND n.oid = ct.relnamespace ";
+ if (schema != null && ! "".equals(schema)) {
+ where += " AND n.nspname = '"+escapeQuotes(schema.toLowerCase())+"' ";
}
+ } else {
+ select = "SELECT NULL AS TABLE_CAT, NULL AS TABLE_SCHEM, ";
+ from = " FROM pg_class ct, pg_class ci, pg_index i, pg_attribute a, pg_am am ";
}
- return connection.getResultSet(null, f, v, "OK", 1);
+ String sql = select+
+ " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, "+
+ " CASE i.indisclustered "+
+ " WHEN true THEN "+java.sql.DatabaseMetaData.tableIndexClustered+
+ " ELSE CASE am.amname "+
+ " WHEN 'hash' THEN "+java.sql.DatabaseMetaData.tableIndexHashed+
+ " ELSE "+java.sql.DatabaseMetaData.tableIndexOther+
+ " END "+
+ " END AS TYPE, "+
+ " a.attnum AS ORDINAL_POSITION, "+
+ " a.attname AS COLUMN_NAME, "+
+ " NULL AS ASC_OR_DESC, "+
+ " ci.reltuples AS CARDINALITY, "+
+ " ci.relpages AS PAGES, "+
+ " NULL AS FILTER_CONDITION "+
+ from+
+ " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid AND a.attrelid=ci.oid AND ci.relam=am.oid "+
+ where+
+ " AND ct.relname = '"+escapeQuotes(tableName.toLowerCase())+"' ";
+
+ if (unique) {
+ sql += " AND i.indisunique ";
+ }
+ sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME ";
+ return connection.createStatement().executeQuery(sql);
}
}