diff --git a/sql/connect/client/jdbc/src/main/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDatabaseMetaData.scala b/sql/connect/client/jdbc/src/main/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDatabaseMetaData.scala index 4efbd2b8f917f..097bd3a0dc6c2 100644 --- a/sql/connect/client/jdbc/src/main/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDatabaseMetaData.scala +++ b/sql/connect/client/jdbc/src/main/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDatabaseMetaData.scala @@ -24,11 +24,9 @@ import org.apache.spark.util.VersionUtils class SparkConnectDatabaseMetaData(conn: SparkConnectConnection) extends DatabaseMetaData { - override def allProceduresAreCallable: Boolean = - throw new SQLFeatureNotSupportedException + override def allProceduresAreCallable: Boolean = false - override def allTablesAreSelectable: Boolean = - throw new SQLFeatureNotSupportedException + override def allTablesAreSelectable: Boolean = false override def getURL: String = conn.url @@ -36,17 +34,13 @@ class SparkConnectDatabaseMetaData(conn: SparkConnectConnection) extends Databas override def isReadOnly: Boolean = false - override def nullsAreSortedHigh: Boolean = - throw new SQLFeatureNotSupportedException + override def nullsAreSortedHigh: Boolean = false - override def nullsAreSortedLow: Boolean = - throw new SQLFeatureNotSupportedException + override def nullsAreSortedLow: Boolean = false - override def nullsAreSortedAtStart: Boolean = - throw new SQLFeatureNotSupportedException + override def nullsAreSortedAtStart: Boolean = false - override def nullsAreSortedAtEnd: Boolean = - throw new SQLFeatureNotSupportedException + override def nullsAreSortedAtEnd: Boolean = false override def getDatabaseProductName: String = "Apache Spark Connect Server" @@ -60,35 +54,25 @@ class SparkConnectDatabaseMetaData(conn: SparkConnectConnection) extends Databas override def getDriverMinorVersion: Int = VersionUtils.minorVersion(SPARK_VERSION) - override def usesLocalFiles: Boolean = - throw new SQLFeatureNotSupportedException + override def usesLocalFiles: Boolean = false - override def usesLocalFilePerTable: Boolean = - throw new SQLFeatureNotSupportedException + override def usesLocalFilePerTable: Boolean = false - override def supportsMixedCaseIdentifiers: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsMixedCaseIdentifiers: Boolean = false - override def storesUpperCaseIdentifiers: Boolean = - throw new SQLFeatureNotSupportedException + override def storesUpperCaseIdentifiers: Boolean = false - override def storesLowerCaseIdentifiers: Boolean = - throw new SQLFeatureNotSupportedException + override def storesLowerCaseIdentifiers: Boolean = false - override def storesMixedCaseIdentifiers: Boolean = - throw new SQLFeatureNotSupportedException + override def storesMixedCaseIdentifiers: Boolean = false - override def supportsMixedCaseQuotedIdentifiers: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsMixedCaseQuotedIdentifiers: Boolean = false - override def storesUpperCaseQuotedIdentifiers: Boolean = - throw new SQLFeatureNotSupportedException + override def storesUpperCaseQuotedIdentifiers: Boolean = false - override def storesLowerCaseQuotedIdentifiers: Boolean = - throw new SQLFeatureNotSupportedException + override def storesLowerCaseQuotedIdentifiers: Boolean = false - override def storesMixedCaseQuotedIdentifiers: Boolean = - throw new SQLFeatureNotSupportedException + override def storesMixedCaseQuotedIdentifiers: Boolean = false override def getIdentifierQuoteString: String = "`" @@ -112,17 +96,13 @@ class SparkConnectDatabaseMetaData(conn: SparkConnectConnection) extends Databas override def getExtraNameCharacters: String = "" - override def supportsAlterTableWithAddColumn: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsAlterTableWithAddColumn: Boolean = true - override def supportsAlterTableWithDropColumn: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsAlterTableWithDropColumn: Boolean = true - override def supportsColumnAliasing: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsColumnAliasing: Boolean = true - override def nullPlusNonNullIsNull: Boolean = - throw new SQLFeatureNotSupportedException + override def nullPlusNonNullIsNull: Boolean = true override def supportsConvert: Boolean = throw new SQLFeatureNotSupportedException @@ -130,58 +110,41 @@ class SparkConnectDatabaseMetaData(conn: SparkConnectConnection) extends Databas override def supportsConvert(fromType: Int, toType: Int): Boolean = throw new SQLFeatureNotSupportedException - override def supportsTableCorrelationNames: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsTableCorrelationNames: Boolean = true - override def supportsDifferentTableCorrelationNames: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsDifferentTableCorrelationNames: Boolean = false - override def supportsExpressionsInOrderBy: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsExpressionsInOrderBy: Boolean = true - override def supportsOrderByUnrelated: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsOrderByUnrelated: Boolean = true override def supportsGroupBy: Boolean = true - override def supportsGroupByUnrelated: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsGroupByUnrelated: Boolean = true - override def supportsGroupByBeyondSelect: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsGroupByBeyondSelect: Boolean = true - override def supportsLikeEscapeClause: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsLikeEscapeClause: Boolean = true - override def supportsMultipleResultSets: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsMultipleResultSets: Boolean = false - override def supportsMultipleTransactions: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsMultipleTransactions: Boolean = false - override def supportsNonNullableColumns: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsNonNullableColumns: Boolean = true - override def supportsMinimumSQLGrammar: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsMinimumSQLGrammar: Boolean = true - override def supportsCoreSQLGrammar: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsCoreSQLGrammar: Boolean = true - override def supportsExtendedSQLGrammar: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsExtendedSQLGrammar: Boolean = false - override def supportsANSI92EntryLevelSQL: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsANSI92EntryLevelSQL: Boolean = true - override def supportsANSI92IntermediateSQL: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsANSI92IntermediateSQL(): Boolean = false - override def supportsANSI92FullSQL: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsANSI92FullSQL: Boolean = false - override def supportsIntegrityEnhancementFacility: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsIntegrityEnhancementFacility: Boolean = false override def supportsOuterJoins: Boolean = true @@ -199,164 +162,114 @@ class SparkConnectDatabaseMetaData(conn: SparkConnectConnection) extends Databas override def getCatalogSeparator: String = "." - override def supportsSchemasInDataManipulation: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSchemasInDataManipulation: Boolean = true - override def supportsSchemasInProcedureCalls: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSchemasInProcedureCalls: Boolean = true - override def supportsSchemasInTableDefinitions: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSchemasInTableDefinitions: Boolean = true - override def supportsSchemasInIndexDefinitions: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSchemasInIndexDefinitions: Boolean = true override def supportsSchemasInPrivilegeDefinitions: Boolean = throw new SQLFeatureNotSupportedException - override def supportsCatalogsInDataManipulation: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsCatalogsInDataManipulation: Boolean = true - override def supportsCatalogsInProcedureCalls: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsCatalogsInProcedureCalls: Boolean = true - override def supportsCatalogsInTableDefinitions: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsCatalogsInTableDefinitions: Boolean = true - override def supportsCatalogsInIndexDefinitions: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsCatalogsInIndexDefinitions: Boolean = true override def supportsCatalogsInPrivilegeDefinitions: Boolean = throw new SQLFeatureNotSupportedException - override def supportsPositionedDelete: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsPositionedDelete: Boolean = false - override def supportsPositionedUpdate: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsPositionedUpdate: Boolean = false - override def supportsSelectForUpdate: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSelectForUpdate: Boolean = false - override def supportsStoredProcedures: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsStoredProcedures: Boolean = true - override def supportsSubqueriesInComparisons: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSubqueriesInComparisons: Boolean = true - override def supportsSubqueriesInExists: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSubqueriesInExists: Boolean = true - override def supportsSubqueriesInIns: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSubqueriesInIns: Boolean = true - override def supportsSubqueriesInQuantifieds: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSubqueriesInQuantifieds: Boolean = true - override def supportsCorrelatedSubqueries: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsCorrelatedSubqueries: Boolean = true - override def supportsUnion: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsUnion: Boolean = true - override def supportsUnionAll: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsUnionAll: Boolean = true - override def supportsOpenCursorsAcrossCommit: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsOpenCursorsAcrossCommit: Boolean = false - override def supportsOpenCursorsAcrossRollback: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsOpenCursorsAcrossRollback: Boolean = false - override def supportsOpenStatementsAcrossCommit: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsOpenStatementsAcrossCommit: Boolean = false - override def supportsOpenStatementsAcrossRollback: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsOpenStatementsAcrossRollback: Boolean = false - override def getMaxBinaryLiteralLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxBinaryLiteralLength: Int = 0 - override def getMaxCharLiteralLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxCharLiteralLength: Int = 0 - override def getMaxColumnNameLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxColumnNameLength: Int = 0 - override def getMaxColumnsInGroupBy: Int = - throw new SQLFeatureNotSupportedException + override def getMaxColumnsInGroupBy: Int = 0 - override def getMaxColumnsInIndex: Int = - throw new SQLFeatureNotSupportedException + override def getMaxColumnsInIndex: Int = 0 - override def getMaxColumnsInOrderBy: Int = - throw new SQLFeatureNotSupportedException + override def getMaxColumnsInOrderBy: Int = 0 - override def getMaxColumnsInSelect: Int = - throw new SQLFeatureNotSupportedException + override def getMaxColumnsInSelect: Int = 0 - override def getMaxColumnsInTable: Int = - throw new SQLFeatureNotSupportedException + override def getMaxColumnsInTable: Int = 0 - override def getMaxConnections: Int = - throw new SQLFeatureNotSupportedException + override def getMaxConnections: Int = 0 - override def getMaxCursorNameLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxCursorNameLength: Int = 0 - override def getMaxIndexLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxIndexLength: Int = 0 - override def getMaxSchemaNameLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxSchemaNameLength: Int = 0 - override def getMaxProcedureNameLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxProcedureNameLength: Int = 0 - override def getMaxCatalogNameLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxCatalogNameLength: Int = 0 - override def getMaxRowSize: Int = - throw new SQLFeatureNotSupportedException + override def getMaxRowSize: Int = 0 - override def doesMaxRowSizeIncludeBlobs: Boolean = - throw new SQLFeatureNotSupportedException + override def doesMaxRowSizeIncludeBlobs: Boolean = false - override def getMaxStatementLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxStatementLength: Int = 0 - override def getMaxStatements: Int = - throw new SQLFeatureNotSupportedException + override def getMaxStatements: Int = 0 - override def getMaxTableNameLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxTableNameLength: Int = 0 - override def getMaxTablesInSelect: Int = - throw new SQLFeatureNotSupportedException + override def getMaxTablesInSelect: Int = 0 - override def getMaxUserNameLength: Int = - throw new SQLFeatureNotSupportedException + override def getMaxUserNameLength: Int = 0 - override def getDefaultTransactionIsolation: Int = - throw new SQLFeatureNotSupportedException + override def getDefaultTransactionIsolation: Int = Connection.TRANSACTION_NONE - override def supportsTransactions: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsTransactions: Boolean = false override def supportsTransactionIsolationLevel(level: Int): Boolean = - throw new SQLFeatureNotSupportedException + level == Connection.TRANSACTION_NONE - override def supportsDataDefinitionAndDataManipulationTransactions: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsDataDefinitionAndDataManipulationTransactions: Boolean = false - override def supportsDataManipulationTransactionsOnly: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsDataManipulationTransactionsOnly: Boolean = false - override def dataDefinitionCausesTransactionCommit: Boolean = - throw new SQLFeatureNotSupportedException + override def dataDefinitionCausesTransactionCommit: Boolean = false - override def dataDefinitionIgnoredInTransactions: Boolean = - throw new SQLFeatureNotSupportedException + override def dataDefinitionIgnoredInTransactions: Boolean = false override def getProcedures( catalog: String, @@ -452,40 +365,30 @@ class SparkConnectDatabaseMetaData(conn: SparkConnectConnection) extends Databas throw new SQLFeatureNotSupportedException override def supportsResultSetType(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + `type` == ResultSet.TYPE_FORWARD_ONLY override def supportsResultSetConcurrency(`type`: Int, concurrency: Int): Boolean = - throw new SQLFeatureNotSupportedException + `type` == ResultSet.TYPE_FORWARD_ONLY && concurrency == ResultSet.CONCUR_READ_ONLY - override def ownUpdatesAreVisible(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + override def ownUpdatesAreVisible(`type`: Int): Boolean = false - override def ownDeletesAreVisible(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + override def ownDeletesAreVisible(`type`: Int): Boolean = false - override def ownInsertsAreVisible(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + override def ownInsertsAreVisible(`type`: Int): Boolean = false - override def othersUpdatesAreVisible(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + override def othersUpdatesAreVisible(`type`: Int): Boolean = false - override def othersDeletesAreVisible(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + override def othersDeletesAreVisible(`type`: Int): Boolean = false - override def othersInsertsAreVisible(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + override def othersInsertsAreVisible(`type`: Int): Boolean = false - override def updatesAreDetected(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + override def updatesAreDetected(`type`: Int): Boolean = false - override def deletesAreDetected(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + override def deletesAreDetected(`type`: Int): Boolean = false - override def insertsAreDetected(`type`: Int): Boolean = - throw new SQLFeatureNotSupportedException + override def insertsAreDetected(`type`: Int): Boolean = false - override def supportsBatchUpdates: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsBatchUpdates: Boolean = false override def getUDTs( catalog: String, @@ -496,17 +399,14 @@ class SparkConnectDatabaseMetaData(conn: SparkConnectConnection) extends Databas override def getConnection: Connection = conn - override def supportsSavepoints: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsSavepoints: Boolean = false override def supportsNamedParameters: Boolean = throw new SQLFeatureNotSupportedException - override def supportsMultipleOpenResults: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsMultipleOpenResults: Boolean = false - override def supportsGetGeneratedKeys: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsGetGeneratedKeys: Boolean = false override def getSuperTypes( catalog: String, @@ -543,21 +443,17 @@ class SparkConnectDatabaseMetaData(conn: SparkConnectConnection) extends Databas override def getJDBCMinorVersion: Int = 3 - override def getSQLStateType: Int = - throw new SQLFeatureNotSupportedException + override def getSQLStateType: Int = DatabaseMetaData.sqlStateSQL - override def locatorsUpdateCopy: Boolean = - throw new SQLFeatureNotSupportedException + override def locatorsUpdateCopy: Boolean = false override def supportsStatementPooling: Boolean = false override def getRowIdLifetime: RowIdLifetime = RowIdLifetime.ROWID_UNSUPPORTED - override def supportsStoredFunctionsUsingCallSyntax: Boolean = - throw new SQLFeatureNotSupportedException + override def supportsStoredFunctionsUsingCallSyntax: Boolean = false - override def autoCommitFailureClosesAllResultSets: Boolean = - throw new SQLFeatureNotSupportedException + override def autoCommitFailureClosesAllResultSets: Boolean = false override def getClientInfoProperties: ResultSet = throw new SQLFeatureNotSupportedException diff --git a/sql/connect/client/jdbc/src/test/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDatabaseMetaDataSuite.scala b/sql/connect/client/jdbc/src/test/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDatabaseMetaDataSuite.scala new file mode 100644 index 0000000000000..ee212a0c4be57 --- /dev/null +++ b/sql/connect/client/jdbc/src/test/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDatabaseMetaDataSuite.scala @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.connect.client.jdbc + +import java.sql.{Array => _, _} + +import org.apache.spark.SparkBuildInfo.{spark_version => SPARK_VERSION} +import org.apache.spark.sql.connect.client.jdbc.test.JdbcHelper +import org.apache.spark.sql.connect.test.{ConnectFunSuite, RemoteSparkSession} +import org.apache.spark.util.VersionUtils + +class SparkConnectDatabaseMetaDataSuite extends ConnectFunSuite with RemoteSparkSession + with JdbcHelper { + + def jdbcUrl: String = s"jdbc:sc://localhost:$serverPort" + + test("SparkConnectDatabaseMetaData simple methods") { + withConnection { conn => + val spark = conn.asInstanceOf[SparkConnectConnection].spark + val metadata = conn.getMetaData + assert(metadata.allProceduresAreCallable === false) + assert(metadata.allTablesAreSelectable === false) + assert(metadata.getURL === jdbcUrl) + assert(metadata.isReadOnly === false) + assert(metadata.nullsAreSortedHigh === false) + assert(metadata.nullsAreSortedLow === false) + assert(metadata.nullsAreSortedAtStart === false) + assert(metadata.nullsAreSortedAtEnd === false) + assert(metadata.getUserName === spark.client.configuration.userName) + assert(metadata.getDatabaseProductName === "Apache Spark Connect Server") + assert(metadata.getDatabaseProductVersion === spark.version) + assert(metadata.getDriverVersion === SPARK_VERSION) + assert(metadata.getDriverMajorVersion === VersionUtils.majorVersion(SPARK_VERSION)) + assert(metadata.getDriverMinorVersion === VersionUtils.minorVersion(SPARK_VERSION)) + assert(metadata.usesLocalFiles === false) + assert(metadata.usesLocalFilePerTable === false) + assert(metadata.supportsMixedCaseIdentifiers === false) + assert(metadata.storesUpperCaseIdentifiers === false) + assert(metadata.storesLowerCaseIdentifiers === false) + assert(metadata.storesMixedCaseIdentifiers === false) + assert(metadata.supportsMixedCaseQuotedIdentifiers === false) + assert(metadata.storesUpperCaseQuotedIdentifiers === false) + assert(metadata.storesLowerCaseQuotedIdentifiers === false) + assert(metadata.storesMixedCaseQuotedIdentifiers === false) + assert(metadata.getIdentifierQuoteString === "`") + assert(metadata.getExtraNameCharacters === "") + assert(metadata.supportsAlterTableWithAddColumn === true) + assert(metadata.supportsAlterTableWithDropColumn === true) + assert(metadata.supportsColumnAliasing === true) + assert(metadata.nullPlusNonNullIsNull === true) + assert(metadata.supportsTableCorrelationNames === true) + assert(metadata.supportsDifferentTableCorrelationNames === false) + assert(metadata.supportsExpressionsInOrderBy === true) + assert(metadata.supportsOrderByUnrelated === true) + assert(metadata.supportsGroupBy === true) + assert(metadata.supportsGroupByUnrelated === true) + assert(metadata.supportsGroupByBeyondSelect === true) + assert(metadata.supportsLikeEscapeClause === true) + assert(metadata.supportsMultipleResultSets === false) + assert(metadata.supportsMultipleTransactions === false) + assert(metadata.supportsNonNullableColumns === true) + assert(metadata.supportsMinimumSQLGrammar === true) + assert(metadata.supportsCoreSQLGrammar === true) + assert(metadata.supportsExtendedSQLGrammar === false) + assert(metadata.supportsANSI92EntryLevelSQL === true) + assert(metadata.supportsANSI92IntermediateSQL === false) + assert(metadata.supportsANSI92FullSQL === false) + assert(metadata.supportsIntegrityEnhancementFacility === false) + assert(metadata.supportsOuterJoins === true) + assert(metadata.supportsFullOuterJoins === true) + assert(metadata.supportsLimitedOuterJoins === true) + assert(metadata.getSchemaTerm === "schema") + assert(metadata.getProcedureTerm === "procedure") + assert(metadata.getCatalogTerm === "catalog") + assert(metadata.isCatalogAtStart === true) + assert(metadata.getCatalogSeparator === ".") + assert(metadata.supportsSchemasInDataManipulation === true) + assert(metadata.supportsSchemasInProcedureCalls === true) + assert(metadata.supportsSchemasInTableDefinitions === true) + assert(metadata.supportsSchemasInIndexDefinitions === true) + assert(metadata.supportsCatalogsInDataManipulation === true) + assert(metadata.supportsCatalogsInProcedureCalls === true) + assert(metadata.supportsCatalogsInTableDefinitions === true) + assert(metadata.supportsCatalogsInIndexDefinitions === true) + assert(metadata.supportsPositionedDelete === false) + assert(metadata.supportsPositionedUpdate === false) + assert(metadata.supportsSelectForUpdate === false) + assert(metadata.supportsStoredProcedures === true) + assert(metadata.supportsSubqueriesInComparisons === true) + assert(metadata.supportsSubqueriesInExists === true) + assert(metadata.supportsSubqueriesInIns === true) + assert(metadata.supportsSubqueriesInQuantifieds === true) + assert(metadata.supportsCorrelatedSubqueries === true) + assert(metadata.supportsUnion === true) + assert(metadata.supportsUnionAll === true) + assert(metadata.supportsOpenCursorsAcrossCommit === false) + assert(metadata.supportsOpenCursorsAcrossRollback === false) + assert(metadata.supportsOpenStatementsAcrossCommit === false) + assert(metadata.supportsOpenStatementsAcrossRollback === false) + assert(metadata.getMaxBinaryLiteralLength === 0) + assert(metadata.getMaxCharLiteralLength === 0) + assert(metadata.getMaxColumnNameLength === 0) + assert(metadata.getMaxColumnsInGroupBy === 0) + assert(metadata.getMaxColumnsInIndex === 0) + assert(metadata.getMaxColumnsInOrderBy === 0) + assert(metadata.getMaxColumnsInSelect === 0) + assert(metadata.getMaxColumnsInTable === 0) + assert(metadata.getMaxConnections === 0) + assert(metadata.getMaxCursorNameLength === 0) + assert(metadata.getMaxIndexLength === 0) + assert(metadata.getMaxSchemaNameLength === 0) + assert(metadata.getMaxProcedureNameLength === 0) + assert(metadata.getMaxCatalogNameLength === 0) + assert(metadata.getMaxRowSize === 0) + assert(metadata.doesMaxRowSizeIncludeBlobs === false) + assert(metadata.getMaxStatementLength === 0) + assert(metadata.getMaxStatements === 0) + assert(metadata.getMaxTableNameLength === 0) + assert(metadata.getMaxTablesInSelect === 0) + assert(metadata.getMaxUserNameLength === 0) + assert(metadata.getDefaultTransactionIsolation === Connection.TRANSACTION_NONE) + assert(metadata.supportsTransactions === false) + Seq(Connection.TRANSACTION_NONE, Connection.TRANSACTION_READ_UNCOMMITTED, + Connection.TRANSACTION_READ_COMMITTED, Connection.TRANSACTION_REPEATABLE_READ, + Connection.TRANSACTION_SERIALIZABLE).foreach { level => + val actual = metadata.supportsTransactionIsolationLevel(level) + val expected = level == Connection.TRANSACTION_NONE + assert(actual === expected) + } + assert(metadata.supportsDataDefinitionAndDataManipulationTransactions === false) + assert(metadata.supportsDataManipulationTransactionsOnly === false) + assert(metadata.dataDefinitionCausesTransactionCommit === false) + assert(metadata.dataDefinitionIgnoredInTransactions === false) + Seq(ResultSet.TYPE_FORWARD_ONLY, ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.TYPE_SCROLL_SENSITIVE).foreach { typ => + var actual = metadata.supportsResultSetType(typ) + var expected = typ == ResultSet.TYPE_FORWARD_ONLY + assert(actual === expected) + Seq(ResultSet.CONCUR_READ_ONLY, ResultSet.CONCUR_UPDATABLE).foreach { concur => + actual = metadata.supportsResultSetConcurrency(typ, concur) + expected = typ == ResultSet.TYPE_FORWARD_ONLY && concur == ResultSet.CONCUR_READ_ONLY + assert(actual === expected) + } + assert(metadata.ownUpdatesAreVisible(typ) === false) + assert(metadata.ownDeletesAreVisible(typ) === false) + assert(metadata.ownInsertsAreVisible(typ) === false) + assert(metadata.othersUpdatesAreVisible(typ) === false) + assert(metadata.othersDeletesAreVisible(typ) === false) + assert(metadata.othersInsertsAreVisible(typ) === false) + assert(metadata.updatesAreDetected(typ) === false) + assert(metadata.deletesAreDetected(typ) === false) + assert(metadata.insertsAreDetected(typ) === false) + } + assert(metadata.supportsBatchUpdates === false) + assert(metadata.getConnection === conn) + assert(metadata.supportsSavepoints === false) + assert(metadata.supportsMultipleOpenResults === false) + assert(metadata.supportsGetGeneratedKeys === false) + assert(metadata.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT) === false) + assert(metadata.supportsResultSetHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT) === true) + assert(metadata.getResultSetHoldability === ResultSet.CLOSE_CURSORS_AT_COMMIT) + assert(metadata.getDatabaseMajorVersion === VersionUtils.majorVersion(spark.version)) + assert(metadata.getDatabaseMinorVersion === VersionUtils.minorVersion(spark.version)) + assert(metadata.getJDBCMajorVersion === 4) + assert(metadata.getJDBCMinorVersion === 3) + assert(metadata.getSQLStateType === DatabaseMetaData.sqlStateSQL) + assert(metadata.locatorsUpdateCopy === false) + assert(metadata.supportsStoredFunctionsUsingCallSyntax === false) + assert(metadata.autoCommitFailureClosesAllResultSets === false) + assert(metadata.supportsStatementPooling === false) + assert(metadata.getRowIdLifetime === RowIdLifetime.ROWID_UNSUPPORTED) + assert(metadata.generatedKeyAlwaysReturned === false) + assert(metadata.getMaxLogicalLobSize === 0) + assert(metadata.supportsRefCursors === false) + assert(metadata.supportsSharding === false) + } + } +} diff --git a/sql/connect/client/jdbc/src/test/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDriverSuite.scala b/sql/connect/client/jdbc/src/test/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDriverSuite.scala index 09ba786297579..853d68b2d8513 100644 --- a/sql/connect/client/jdbc/src/test/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDriverSuite.scala +++ b/sql/connect/client/jdbc/src/test/scala/org/apache/spark/sql/connect/client/jdbc/SparkConnectDriverSuite.scala @@ -20,10 +20,8 @@ package org.apache.spark.sql.connect.client.jdbc import java.sql.{Array => _, _} import java.util.Properties -import org.apache.spark.SparkBuildInfo.{spark_version => SPARK_VERSION} import org.apache.spark.sql.connect.client.jdbc.test.JdbcHelper import org.apache.spark.sql.connect.test.{ConnectFunSuite, RemoteSparkSession} -import org.apache.spark.util.VersionUtils class SparkConnectDriverSuite extends ConnectFunSuite with RemoteSparkSession with JdbcHelper { @@ -42,44 +40,4 @@ class SparkConnectDriverSuite extends ConnectFunSuite with RemoteSparkSession assert(conn.isInstanceOf[SparkConnectConnection]) } } - - test("get DatabaseMetaData from SparkConnectConnection") { - withConnection { conn => - val spark = conn.asInstanceOf[SparkConnectConnection].spark - val metadata = conn.getMetaData - assert(metadata.getURL === jdbcUrl) - assert(metadata.isReadOnly === false) - assert(metadata.getUserName === spark.client.configuration.userName) - assert(metadata.getDatabaseProductName === "Apache Spark Connect Server") - assert(metadata.getDatabaseProductVersion === spark.version) - assert(metadata.getDriverVersion === SPARK_VERSION) - assert(metadata.getDriverMajorVersion === VersionUtils.majorVersion(SPARK_VERSION)) - assert(metadata.getDriverMinorVersion === VersionUtils.minorVersion(SPARK_VERSION)) - assert(metadata.getIdentifierQuoteString === "`") - assert(metadata.getExtraNameCharacters === "") - assert(metadata.supportsGroupBy === true) - assert(metadata.supportsOuterJoins === true) - assert(metadata.supportsFullOuterJoins === true) - assert(metadata.supportsLimitedOuterJoins === true) - assert(metadata.getSchemaTerm === "schema") - assert(metadata.getProcedureTerm === "procedure") - assert(metadata.getCatalogTerm === "catalog") - assert(metadata.isCatalogAtStart === true) - assert(metadata.getCatalogSeparator === ".") - assert(metadata.getConnection === conn) - assert(metadata.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT) === false) - assert(metadata.supportsResultSetHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT) === true) - assert(metadata.getResultSetHoldability === ResultSet.CLOSE_CURSORS_AT_COMMIT) - assert(metadata.getDatabaseMajorVersion === VersionUtils.majorVersion(spark.version)) - assert(metadata.getDatabaseMinorVersion === VersionUtils.minorVersion(spark.version)) - assert(metadata.getJDBCMajorVersion === 4) - assert(metadata.getJDBCMinorVersion === 3) - assert(metadata.supportsStatementPooling === false) - assert(metadata.getRowIdLifetime === RowIdLifetime.ROWID_UNSUPPORTED) - assert(metadata.generatedKeyAlwaysReturned === false) - assert(metadata.getMaxLogicalLobSize === 0) - assert(metadata.supportsRefCursors === false) - assert(metadata.supportsSharding === false) - } - } }